# HG changeset patch # User Gilles Duboscq # Date 1381480683 -7200 # Node ID cefad50507d8750c975c3682bd4f12d2a237d4ad # Parent ccb4f2af2319b3bb19a19b79522af445c6d42782# Parent f6962730bbde82f279a0ae3a1c14bc5e58096c6e Merge with hs25-b53 diff -r ccb4f2af2319 -r cefad50507d8 .hgtags --- a/.hgtags Thu Oct 10 18:26:22 2013 +0200 +++ b/.hgtags Fri Oct 11 10:38:03 2013 +0200 @@ -369,3 +369,17 @@ 7f55137d6aa81efc6eb0035813709f2cb6a26b8b hs25-b45 6f9be7f87b9653e94fd8fb3070891a0cc91b15bf jdk8-b103 580430d131ccd475e2f2ad4006531b8c4813d102 hs25-b46 +104743074675359cfbf7f4dcd9ab2a5974a16627 jdk8-b104 +c1604d5885a6f2adc0bcea2fa142a8f6bafad2f0 hs25-b47 +acac3bde66b2c22791c257a8d99611d6d08c6713 jdk8-b105 +18b4798adbc42c6fa16f5ecb7d5cd3ca130754bf hs25-b48 +aed585cafc0d9655726af6d1e1081d1c94cb3b5c jdk8-b106 +50794d8ac11c9579b41dec4de23b808fef9f34a1 hs25-b49 +5b7f90aab3ad25a25b75b7b2bb18d5ae23d8231c jdk8-b107 +a09fe9d1e016c285307507a5793bc4fa6215e9c9 hs25-b50 +85072013aad46050a362d10ab78e963121c8014c jdk8-b108 +566db1b0e6efca31f181456e54c8911d0192410d hs25-b51 +c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109 +58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52 +6209b0ed51c086d4127bac0e086c8f326d1764d7 jdk8-b110 +562a3d356de67670b4172b82aca2d30743449e04 hs25-b53 diff -r ccb4f2af2319 -r cefad50507d8 agent/src/os/linux/LinuxDebuggerLocal.c --- a/agent/src/os/linux/LinuxDebuggerLocal.c Thu Oct 10 18:26:22 2013 +0200 +++ b/agent/src/os/linux/LinuxDebuggerLocal.c Fri Oct 11 10:38:03 2013 +0200 @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -80,7 +81,7 @@ (JNIEnv *env, jclass cls) { jclass listClass; - if (init_libproc(getenv("LIBSAPROC_DEBUG")) != true) { + if (init_libproc(getenv("LIBSAPROC_DEBUG") != NULL) != true) { THROW_NEW_DEBUGGER_EXCEPTION("can't initialize libproc"); } diff -r ccb4f2af2319 -r cefad50507d8 agent/src/os/linux/ps_core.c --- a/agent/src/os/linux/ps_core.c Thu Oct 10 18:26:22 2013 +0200 +++ b/agent/src/os/linux/ps_core.c Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -698,29 +698,58 @@ // read segments of a shared object static bool read_lib_segments(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* lib_ehdr, uintptr_t lib_base) { - int i = 0; - ELF_PHDR* phbuf; - ELF_PHDR* lib_php = NULL; + int i = 0; + ELF_PHDR* phbuf; + ELF_PHDR* lib_php = NULL; + + int page_size=sysconf(_SC_PAGE_SIZE); - if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) - return false; + if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) { + return false; + } + + // we want to process only PT_LOAD segments that are not writable. + // i.e., text segments. The read/write/exec (data) segments would + // have been already added from core file segments. + for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) { + if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) { + + uintptr_t target_vaddr = lib_php->p_vaddr + lib_base; + map_info *existing_map = core_lookup(ph, target_vaddr); - // we want to process only PT_LOAD segments that are not writable. - // i.e., text segments. The read/write/exec (data) segments would - // have been already added from core file segments. - for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) { - if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) { - if (add_map_info(ph, lib_fd, lib_php->p_offset, lib_php->p_vaddr + lib_base, lib_php->p_filesz) == NULL) - goto err; + if (existing_map == NULL){ + if (add_map_info(ph, lib_fd, lib_php->p_offset, + target_vaddr, lib_php->p_filesz) == NULL) { + goto err; + } + } else { + if ((existing_map->memsz != page_size) && + (existing_map->fd != lib_fd) && + (existing_map->memsz != lib_php->p_filesz)){ + + print_debug("address conflict @ 0x%lx (size = %ld, flags = %d\n)", + target_vaddr, lib_php->p_filesz, lib_php->p_flags); + goto err; + } + + /* replace PT_LOAD segment with library segment */ + print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n", + existing_map->memsz, lib_php->p_filesz); + + existing_map->fd = lib_fd; + existing_map->offset = lib_php->p_offset; + existing_map->memsz = lib_php->p_filesz; } - lib_php++; - } + } + + lib_php++; + } - free(phbuf); - return true; + free(phbuf); + return true; err: - free(phbuf); - return false; + free(phbuf); + return false; } // process segments from interpreter (ld.so or ld-linux.so) diff -r ccb4f2af2319 -r cefad50507d8 agent/src/os/linux/ps_proc.c --- a/agent/src/os/linux/ps_proc.c Thu Oct 10 18:26:22 2013 +0200 +++ b/agent/src/os/linux/ps_proc.c Fri Oct 11 10:38:03 2013 +0200 @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include #include "libproc_impl.h" diff -r ccb4f2af2319 -r cefad50507d8 agent/src/os/linux/salibelf.c --- a/agent/src/os/linux/salibelf.c Thu Oct 10 18:26:22 2013 +0200 +++ b/agent/src/os/linux/salibelf.c Fri Oct 11 10:38:03 2013 +0200 @@ -25,6 +25,7 @@ #include "salibelf.h" #include #include +#include extern void print_debug(const char*,...); diff -r ccb4f2af2319 -r cefad50507d8 agent/src/os/linux/symtab.c --- a/agent/src/os/linux/symtab.c Thu Oct 10 18:26:22 2013 +0200 +++ b/agent/src/os/linux/symtab.c Fri Oct 11 10:38:03 2013 +0200 @@ -305,7 +305,7 @@ unsigned char *bytes = (unsigned char*)(note+1) + note->n_namesz; - unsigned char *filename + char *filename = (build_id_to_debug_filename (note->n_descsz, bytes)); fd = pathmap_open(filename); diff -r ccb4f2af2319 -r cefad50507d8 agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java --- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java Thu Oct 10 18:26:22 2013 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java Fri Oct 11 10:38:03 2013 +0200 @@ -1213,6 +1213,7 @@ } HotSpotTypeDataBase db = (HotSpotTypeDataBase)agent.getTypeDataBase(); if (t.countTokens() == 1) { + String name = t.nextToken(); out.println("intConstant " + name + " " + db.lookupIntConstant(name)); } else if (t.countTokens() == 0) { Iterator i = db.getIntConstants(); @@ -1235,6 +1236,7 @@ } HotSpotTypeDataBase db = (HotSpotTypeDataBase)agent.getTypeDataBase(); if (t.countTokens() == 1) { + String name = t.nextToken(); out.println("longConstant " + name + " " + db.lookupLongConstant(name)); } else if (t.countTokens() == 0) { Iterator i = db.getLongConstants(); diff -r ccb4f2af2319 -r cefad50507d8 agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java --- a/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java Thu Oct 10 18:26:22 2013 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java Fri Oct 11 10:38:03 2013 +0200 @@ -81,7 +81,7 @@ public Address getCompKlassAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException { - return debugger.readCompOopAddress(addr + offset); + return debugger.readCompKlassAddress(addr + offset); } // diff -r ccb4f2af2319 -r cefad50507d8 agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java --- a/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java Thu Oct 10 18:26:22 2013 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java Fri Oct 11 10:38:03 2013 +0200 @@ -75,19 +75,19 @@ javaFieldsCount = new CIntField(type.getCIntegerField("_java_fields_count"), 0); constants = new MetadataField(type.getAddressField("_constants"), 0); classLoaderData = type.getAddressField("_class_loader_data"); - sourceFileName = type.getAddressField("_source_file_name"); sourceDebugExtension = type.getAddressField("_source_debug_extension"); innerClasses = type.getAddressField("_inner_classes"); + sourceFileNameIndex = new CIntField(type.getCIntegerField("_source_file_name_index"), 0); nonstaticFieldSize = new CIntField(type.getCIntegerField("_nonstatic_field_size"), 0); staticFieldSize = new CIntField(type.getCIntegerField("_static_field_size"), 0); - staticOopFieldCount = new CIntField(type.getCIntegerField("_static_oop_field_count"), 0); + staticOopFieldCount = new CIntField(type.getCIntegerField("_static_oop_field_count"), 0); nonstaticOopMapSize = new CIntField(type.getCIntegerField("_nonstatic_oop_map_size"), 0); isMarkedDependent = new CIntField(type.getCIntegerField("_is_marked_dependent"), 0); initState = new CIntField(type.getCIntegerField("_init_state"), 0); vtableLen = new CIntField(type.getCIntegerField("_vtable_len"), 0); itableLen = new CIntField(type.getCIntegerField("_itable_len"), 0); breakpoints = type.getAddressField("_breakpoints"); - genericSignature = type.getAddressField("_generic_signature"); + genericSignatureIndex = new CIntField(type.getCIntegerField("_generic_signature_index"), 0); majorVersion = new CIntField(type.getCIntegerField("_major_version"), 0); minorVersion = new CIntField(type.getCIntegerField("_minor_version"), 0); headerSize = Oop.alignObjectOffset(type.getSize()); @@ -134,9 +134,9 @@ private static CIntField javaFieldsCount; private static MetadataField constants; private static AddressField classLoaderData; - private static AddressField sourceFileName; private static AddressField sourceDebugExtension; private static AddressField innerClasses; + private static CIntField sourceFileNameIndex; private static CIntField nonstaticFieldSize; private static CIntField staticFieldSize; private static CIntField staticOopFieldCount; @@ -146,7 +146,7 @@ private static CIntField vtableLen; private static CIntField itableLen; private static AddressField breakpoints; - private static AddressField genericSignature; + private static CIntField genericSignatureIndex; private static CIntField majorVersion; private static CIntField minorVersion; @@ -346,7 +346,7 @@ public ConstantPool getConstants() { return (ConstantPool) constants.getValue(this); } public ClassLoaderData getClassLoaderData() { return ClassLoaderData.instantiateWrapperFor(classLoaderData.getValue(getAddress())); } public Oop getClassLoader() { return getClassLoaderData().getClassLoader(); } - public Symbol getSourceFileName() { return getSymbol(sourceFileName); } + public Symbol getSourceFileName() { return getConstants().getSymbolAt(sourceFileNameIndex.getValue(this)); } public String getSourceDebugExtension(){ return CStringUtilities.getString(sourceDebugExtension.getValue(getAddress())); } public long getNonstaticFieldSize() { return nonstaticFieldSize.getValue(this); } public long getStaticOopFieldCount() { return staticOopFieldCount.getValue(this); } @@ -354,9 +354,16 @@ public boolean getIsMarkedDependent() { return isMarkedDependent.getValue(this) != 0; } public long getVtableLen() { return vtableLen.getValue(this); } public long getItableLen() { return itableLen.getValue(this); } - public Symbol getGenericSignature() { return getSymbol(genericSignature); } public long majorVersion() { return majorVersion.getValue(this); } public long minorVersion() { return minorVersion.getValue(this); } + public Symbol getGenericSignature() { + long index = genericSignatureIndex.getValue(this); + if (index != 0) { + return getConstants().getSymbolAt(index); + } else { + return null; + } + } // "size helper" == instance size in words public long getSizeHelper() { diff -r ccb4f2af2319 -r cefad50507d8 agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java --- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Thu Oct 10 18:26:22 2013 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Fri Oct 11 10:38:03 2013 +0200 @@ -134,15 +134,13 @@ private String type; private String name; private Address addr; - private String kind; - private int origin; + private int flags; - private Flag(String type, String name, Address addr, String kind, int origin) { + private Flag(String type, String name, Address addr, int flags) { this.type = type; this.name = name; this.addr = addr; - this.kind = kind; - this.origin = origin; + this.flags = flags; } public String getType() { @@ -157,12 +155,8 @@ return addr; } - public String getKind() { - return kind; - } - public int getOrigin() { - return origin; + return flags & 0xF; // XXX can we get the mask bits from somewhere? } public boolean isBool() { @@ -173,8 +167,7 @@ if (Assert.ASSERTS_ENABLED) { Assert.that(isBool(), "not a bool flag!"); } - return addr.getCIntegerAt(0, boolType.getSize(), boolType.isUnsigned()) - != 0; + return addr.getCIntegerAt(0, boolType.getSize(), boolType.isUnsigned()) != 0; } public boolean isIntx() { @@ -792,7 +785,7 @@ public boolean isCompressedKlassPointersEnabled() { if (compressedKlassPointersEnabled == null) { - Flag flag = getCommandLineFlag("UseCompressedKlassPointers"); + Flag flag = getCommandLineFlag("UseCompressedClassPointers"); compressedKlassPointersEnabled = (flag == null) ? Boolean.FALSE: (flag.getBool()? Boolean.TRUE: Boolean.FALSE); } @@ -843,11 +836,10 @@ Address flagAddr = flagType.getAddressField("flags").getValue(); - AddressField typeFld = flagType.getAddressField("type"); - AddressField nameFld = flagType.getAddressField("name"); - AddressField addrFld = flagType.getAddressField("addr"); - AddressField kindFld = flagType.getAddressField("kind"); - CIntField originFld = new CIntField(flagType.getCIntegerField("origin"), 0); + AddressField typeFld = flagType.getAddressField("_type"); + AddressField nameFld = flagType.getAddressField("_name"); + AddressField addrFld = flagType.getAddressField("_addr"); + CIntField flagsFld = new CIntField(flagType.getCIntegerField("_flags"), 0); long flagSize = flagType.getSize(); // sizeof(Flag) @@ -856,9 +848,8 @@ String type = CStringUtilities.getString(typeFld.getValue(flagAddr)); String name = CStringUtilities.getString(nameFld.getValue(flagAddr)); Address addr = addrFld.getValue(flagAddr); - String kind = CStringUtilities.getString(kindFld.getValue(flagAddr)); - int origin = (int)originFld.getValue(flagAddr); - commandLineFlags[f] = new Flag(type, name, addr, kind, origin); + int flags = (int)flagsFld.getValue(flagAddr); + commandLineFlags[f] = new Flag(type, name, addr, flags); flagAddr = flagAddr.addOffsetTo(flagSize); } diff -r ccb4f2af2319 -r cefad50507d8 agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Thu Oct 10 18:26:22 2013 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Fri Oct 11 10:38:03 2013 +0200 @@ -66,18 +66,18 @@ printGCAlgorithm(flagMap); System.out.println(); System.out.println("Heap Configuration:"); - printValue("MinHeapFreeRatio = ", getFlagValue("MinHeapFreeRatio", flagMap)); - printValue("MaxHeapFreeRatio = ", getFlagValue("MaxHeapFreeRatio", flagMap)); - printValMB("MaxHeapSize = ", getFlagValue("MaxHeapSize", flagMap)); - printValMB("NewSize = ", getFlagValue("NewSize", flagMap)); - printValMB("MaxNewSize = ", getFlagValue("MaxNewSize", flagMap)); - printValMB("OldSize = ", getFlagValue("OldSize", flagMap)); - printValue("NewRatio = ", getFlagValue("NewRatio", flagMap)); - printValue("SurvivorRatio = ", getFlagValue("SurvivorRatio", flagMap)); - printValMB("MetaspaceSize = ", getFlagValue("MetaspaceSize", flagMap)); - printValMB("ClassMetaspaceSize = ", getFlagValue("ClassMetaspaceSize", flagMap)); - printValMB("MaxMetaspaceSize = ", getFlagValue("MaxMetaspaceSize", flagMap)); - printValMB("G1HeapRegionSize = ", HeapRegion.grainBytes()); + printValue("MinHeapFreeRatio = ", getFlagValue("MinHeapFreeRatio", flagMap)); + printValue("MaxHeapFreeRatio = ", getFlagValue("MaxHeapFreeRatio", flagMap)); + printValMB("MaxHeapSize = ", getFlagValue("MaxHeapSize", flagMap)); + printValMB("NewSize = ", getFlagValue("NewSize", flagMap)); + printValMB("MaxNewSize = ", getFlagValue("MaxNewSize", flagMap)); + printValMB("OldSize = ", getFlagValue("OldSize", flagMap)); + printValue("NewRatio = ", getFlagValue("NewRatio", flagMap)); + printValue("SurvivorRatio = ", getFlagValue("SurvivorRatio", flagMap)); + printValMB("MetaspaceSize = ", getFlagValue("MetaspaceSize", flagMap)); + printValMB("CompressedClassSpaceSize = ", getFlagValue("CompressedClassSpaceSize", flagMap)); + printValMB("MaxMetaspaceSize = ", getFlagValue("MaxMetaspaceSize", flagMap)); + printValMB("G1HeapRegionSize = ", HeapRegion.grainBytes()); System.out.println(); System.out.println("Heap Usage:"); diff -r ccb4f2af2319 -r cefad50507d8 agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java Thu Oct 10 18:26:22 2013 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java Fri Oct 11 10:38:03 2013 +0200 @@ -92,8 +92,13 @@ System.err.println("Warning: Can not create class filter!"); } } - String outputDirectory = System.getProperty("sun.jvm.hotspot.tools.jcore.outputDir", "."); - setOutputDirectory(outputDirectory); + + // outputDirectory and jarStream are alternatives: setting one closes the other. + // If neither is set, use outputDirectory from the System property: + if (outputDirectory == null && jarStream == null) { + String dirName = System.getProperty("sun.jvm.hotspot.tools.jcore.outputDir", "."); + setOutputDirectory(dirName); + } // walk through the system dictionary SystemDictionary dict = VM.getVM().getSystemDictionary(); diff -r ccb4f2af2319 -r cefad50507d8 agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js Thu Oct 10 18:26:22 2013 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js Fri Oct 11 10:38:03 2013 +0200 @@ -35,8 +35,9 @@ sapkg.code = sapkg.hotspot.code; sapkg.compiler = sapkg.hotspot.compiler; -// 'debugger' is a JavaScript keyword :-( -// sapkg.debugger = sapkg.hotspot.debugger; +// 'debugger' is a JavaScript keyword, but ES5 relaxes the +// restriction of using keywords as property name +sapkg.debugger = sapkg.hotspot.debugger; sapkg.interpreter = sapkg.hotspot.interpreter; sapkg.jdi = sapkg.hotspot.jdi; @@ -116,27 +117,36 @@ return args; } + // Handle __has__ specially to avoid metacircularity problems + // when called from __get__. + // Calling + // this.__has__(name) + // will in turn call + // this.__call__('__has__', name) + // which is not handled below + function __has__(name) { + if (typeof(name) == 'number') { + return so["has(int)"](name); + } else { + if (name == '__wrapped__') { + return true; + } else if (so["has(java.lang.String)"](name)) { + return true; + } else if (name.equals('toString')) { + return true; + } else { + return false; + } + } + } + if (so instanceof sapkg.utilities.soql.ScriptObject) { return new JSAdapter() { - __getIds__: function() { - return so.getIds(); + __getIds__: function() { + return so.getIds(); }, - __has__ : function(name) { - if (typeof(name) == 'number') { - return so["has(int)"](name); - } else { - if (name == '__wrapped__') { - return true; - } else if (so["has(java.lang.String)"](name)) { - return true; - } else if (name.equals('toString')) { - return true; - } else { - return false; - } - } - }, + __has__ : __has__, __delete__ : function(name) { if (typeof(name) == 'number') { @@ -147,7 +157,8 @@ }, __get__ : function(name) { - if (! this.__has__(name)) { + // don't call this.__has__(name); see comments above function __has__ + if (! __has__.call(this, name)) { return undefined; } if (typeof(name) == 'number') { @@ -162,7 +173,7 @@ var args = prepareArgsArray(arguments); var r; try { - r = value.call(args); + r = value.call(Java.to(args, 'java.lang.Object[]')); } catch (e) { println("call to " + name + " failed!"); throw e; @@ -204,6 +215,18 @@ } // define "writeln" and "write" if not defined + if (typeof(println) == 'undefined') { + println = function (str) { + java.lang.System.out.println(String(str)); + } + } + + if (typeof(print) == 'undefined') { + print = function (str) { + java.lang.System.out.print(String(str)); + } + } + if (typeof(writeln) == 'undefined') { writeln = println; } @@ -235,7 +258,7 @@ this.jclasses = function() { forEachKlass(function (clazz) { - writeln(clazz.getName().asString() + " @" + clazz.getHandle().toString()); + writeln(clazz.getName().asString() + " @" + clazz.getAddress().toString()); }); } registerCommand("classes", "classes", "jclasses"); @@ -490,14 +513,14 @@ function forEachKlass(callback) { var VisitorClass = sapkg.memory.SystemDictionary.ClassVisitor; var visitor = new VisitorClass() { visit: callback }; - sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary$ClassVisitor)"](visitor); + sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary.ClassVisitor)"](visitor); } // iterate system dictionary for each 'Klass' and initiating loader function forEachKlassAndLoader(callback) { var VisitorClass = sapkg.memory.SystemDictionary.ClassAndLoaderVisitor; var visitor = new VisitorClass() { visit: callback }; - sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary$ClassAndLoaderVisitor)"](visitor); + sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary.ClassAndLoaderVisitor)"](visitor); } // iterate system dictionary for each primitive array klass @@ -522,7 +545,12 @@ // iterates Java heap for each Oop function forEachOop(callback) { - sa.objHeap.iterate(new sapkg.oops.HeapVisitor() { doObj: callback }); + function empty() { } + sa.objHeap.iterate(new sapkg.oops.HeapVisitor() { + prologue: empty, + doObj: callback, + epilogue: empty + }); } // iterates Java heap for each Oop of given 'klass'. @@ -536,8 +564,14 @@ if (includeSubtypes == undefined) { includeSubtypes = true; } + + function empty() { } sa.objHeap.iterateObjectsOfKlass( - new sapkg.oops.HeapVisitor() { doObj: callback }, + new sapkg.oops.HeapVisitor() { + prologue: empty, + doObj: callback, + epilogue: empty + }, klass, includeSubtypes); } @@ -746,9 +780,9 @@ // ignore; continue; } else { - // some type names have ':'. replace to make it as a + // some type names have ':', '<', '>', '*', ' '. replace to make it as a // JavaScript identifier - tmp.name = tmp.name.replace(':', '_').replace('<', '_').replace('>', '_').replace('*', '_').replace(' ', '_'); + tmp.name = ("" + tmp.name).replace(/[:<>* ]/g, '_'); eval("function read" + tmp.name + "(addr) {" + " return readVMType('" + tmp.name + "', addr);}"); eval("function print" + tmp.name + "(addr) {" + diff -r ccb4f2af2319 -r cefad50507d8 make/bsd/makefiles/fastdebug.make --- a/make/bsd/makefiles/fastdebug.make Thu Oct 10 18:26:22 2013 +0200 +++ b/make/bsd/makefiles/fastdebug.make Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -59,5 +59,5 @@ MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug VERSION = fastdebug -SYSDEFS += -DASSERT +SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS PICFLAGS = DEFAULT diff -r ccb4f2af2319 -r cefad50507d8 make/bsd/makefiles/gcc.make --- a/make/bsd/makefiles/gcc.make Thu Oct 10 18:26:22 2013 +0200 +++ b/make/bsd/makefiles/gcc.make Fri Oct 11 10:38:03 2013 +0200 @@ -80,7 +80,7 @@ HOSTCC = $(CC) endif - AS = $(CC) -c -x assembler-with-cpp + AS = $(CC) -c endif @@ -129,16 +129,21 @@ # We only use precompiled headers for the JVM build CFLAGS += $(VM_PCH_FLAG) - - # There are some files which don't like precompiled headers - # The following files are build with 'OPT_CFLAGS/NOOPT' (-O0) in the opt build. - # But Clang doesn't support a precompiled header which was compiled with -O3 - # to be used in a compilation unit which uses '-O0'. We could also prepare an - # extra '-O0' PCH file for the opt build and use it here, but it's probably - # not worth the effort as long as only two files need this special handling. + + # The following files are compiled at various optimization + # levels due to optimization issues encountered at the + # 'OPT_CFLAGS_DEFAULT' level. The Clang compiler issues a compile + # time error if there is an optimization level specification + # skew between the PCH file and the C++ file. Especially if the + # PCH file is compiled at a higher optimization level than + # the C++ file. One solution might be to prepare extra optimization + # level specific PCH files for the opt build and use them here, but + # it's probably not worth the effort as long as only a few files + # need this special handling. PCH_FLAG/loopTransform.o = $(PCH_FLAG/NO_PCH) PCH_FLAG/sharedRuntimeTrig.o = $(PCH_FLAG/NO_PCH) PCH_FLAG/sharedRuntimeTrans.o = $(PCH_FLAG/NO_PCH) + PCH_FLAG/unsafe.o = $(PCH_FLAG/NO_PCH) endif else # ($(USE_CLANG), true) @@ -242,12 +247,12 @@ ifeq ($(USE_CLANG), true) # However we need to clean the code up before we can unrestrictedly enable this option with Clang - WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses + WARNINGS_ARE_ERRORS += -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-compare # Not yet supported by clang in Xcode 4.6.2 # WARNINGS_ARE_ERRORS += -Wno-tautological-constant-out-of-range-compare WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess - WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body + WARNINGS_ARE_ERRORS += -Wno-empty-body endif WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef @@ -257,7 +262,7 @@ # conversions which might affect the values. Only enable it in earlier versions. WARNING_FLAGS = -Wunused-function ifeq ($(USE_CLANG),) - WARNINGS_FLAGS += -Wconversion + WARNING_FLAGS += -Wconversion endif endif @@ -306,6 +311,7 @@ ifeq ($(USE_CLANG), true) ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1) OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT) + OPT_CFLAGS/unsafe.o += -O1 endif else # 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation. @@ -341,6 +347,13 @@ LDFLAGS += -mmacosx-version-min=$(MACOSX_VERSION_MIN) endif + +#------------------------------------------------------------------------ +# Assembler flags + +# Enforce prerpocessing of .s files +ASFLAGS += -x assembler-with-cpp + #------------------------------------------------------------------------ # Linker flags diff -r ccb4f2af2319 -r cefad50507d8 make/excludeSrc.make --- a/make/excludeSrc.make Thu Oct 10 18:26:22 2013 +0200 +++ b/make/excludeSrc.make Fri Oct 11 10:38:03 2013 +0200 @@ -88,7 +88,7 @@ g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \ g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \ g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \ - heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \ + g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \ ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \ adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \ cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \ @@ -99,7 +99,7 @@ psTasks.cpp psVirtualspace.cpp psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp \ parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \ gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \ - mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp + mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp hSpaceCounters.cpp endif ifeq ($(INCLUDE_NMT), false) diff -r ccb4f2af2319 -r cefad50507d8 make/hotspot_version --- a/make/hotspot_version Thu Oct 10 18:26:22 2013 +0200 +++ b/make/hotspot_version Fri Oct 11 10:38:03 2013 +0200 @@ -35,7 +35,7 @@ HS_MAJOR_VER=25 HS_MINOR_VER=0 -HS_BUILD_NUMBER=46 +HS_BUILD_NUMBER=53 JDK_MAJOR_VER=1 JDK_MINOR_VER=8 diff -r ccb4f2af2319 -r cefad50507d8 make/jprt.properties --- a/make/jprt.properties Thu Oct 10 18:26:22 2013 +0200 +++ b/make/jprt.properties Fri Oct 11 10:38:03 2013 +0200 @@ -120,13 +120,13 @@ jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7} jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}} -jprt.my.windows.i586.jdk8=windows_i586_5.1 -jprt.my.windows.i586.jdk7=windows_i586_5.1 +jprt.my.windows.i586.jdk8=windows_i586_6.1 +jprt.my.windows.i586.jdk7=windows_i586_6.1 jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7} jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}} -jprt.my.windows.x64.jdk8=windows_x64_5.2 -jprt.my.windows.x64.jdk7=windows_x64_5.2 +jprt.my.windows.x64.jdk8=windows_x64_6.1 +jprt.my.windows.x64.jdk7=windows_x64_6.1 jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7} jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}} diff -r ccb4f2af2319 -r cefad50507d8 make/linux/makefiles/amd64.make --- a/make/linux/makefiles/amd64.make Thu Oct 10 18:26:22 2013 +0200 +++ b/make/linux/makefiles/amd64.make Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -31,9 +31,4 @@ CFLAGS += -D_LP64=1 -# The serviceability agent relies on frame pointer (%rbp) to walk thread stack -ifndef USE_SUNCC - CFLAGS += -fno-omit-frame-pointer -endif - OPT_CFLAGS/compactingPermGenGen.o = -O1 diff -r ccb4f2af2319 -r cefad50507d8 make/linux/makefiles/fastdebug.make --- a/make/linux/makefiles/fastdebug.make Thu Oct 10 18:26:22 2013 +0200 +++ b/make/linux/makefiles/fastdebug.make Fri Oct 11 10:38:03 2013 +0200 @@ -59,5 +59,5 @@ MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug VERSION = optimized -SYSDEFS += -DASSERT +SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS PICFLAGS = DEFAULT diff -r ccb4f2af2319 -r cefad50507d8 make/linux/makefiles/gcc.make --- a/make/linux/makefiles/gcc.make Thu Oct 10 18:26:22 2013 +0200 +++ b/make/linux/makefiles/gcc.make Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -208,7 +208,7 @@ ifeq ($(USE_CLANG), true) # However we need to clean the code up before we can unrestrictedly enable this option with Clang - WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses + WARNINGS_ARE_ERRORS += -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-constant-out-of-range-compare -Wno-tautological-compare WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body @@ -398,3 +398,10 @@ ifdef MINIMIZE_RAM_USAGE CFLAGS += -DMINIMIZE_RAM_USAGE endif + +# Stack walking in the JVM relies on frame pointer (%rbp) to walk thread stack. +# Explicitly specify -fno-omit-frame-pointer because it is off by default +# starting with gcc 4.6. +ifndef USE_SUNCC + CFLAGS += -fno-omit-frame-pointer +endif diff -r ccb4f2af2319 -r cefad50507d8 make/linux/makefiles/jsig.make --- a/make/linux/makefiles/jsig.make Thu Oct 10 18:26:22 2013 +0200 +++ b/make/linux/makefiles/jsig.make Fri Oct 11 10:38:03 2013 +0200 @@ -74,9 +74,9 @@ install_jsig: $(LIBJSIG) @echo "Copying $(LIBJSIG) to $(DEST_JSIG)" - -$(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \ + $(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \ cp -f $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO) - -$(QUIETLY) test -f $(LIBJSIG_DIZ) && \ + $(QUIETLY) test -f $(LIBJSIG_DIZ) && \ cp -f $(LIBJSIG_DIZ) $(DEST_JSIG_DIZ) $(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done" diff -r ccb4f2af2319 -r cefad50507d8 make/linux/makefiles/vm.make --- a/make/linux/makefiles/vm.make Thu Oct 10 18:26:22 2013 +0200 +++ b/make/linux/makefiles/vm.make Fri Oct 11 10:38:03 2013 +0200 @@ -381,9 +381,9 @@ install_jvm: $(LIBJVM) @echo "Copying $(LIBJVM) to $(DEST_JVM)" - -$(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \ + $(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \ cp -f $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO) - -$(QUIETLY) test -f $(LIBJVM_DIZ) && \ + $(QUIETLY) test -f $(LIBJVM_DIZ) && \ cp -f $(LIBJVM_DIZ) $(DEST_JVM_DIZ) $(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done" diff -r ccb4f2af2319 -r cefad50507d8 make/solaris/makefiles/jsig.make --- a/make/solaris/makefiles/jsig.make Thu Oct 10 18:26:22 2013 +0200 +++ b/make/solaris/makefiles/jsig.make Fri Oct 11 10:38:03 2013 +0200 @@ -79,9 +79,9 @@ install_jsig: $(LIBJSIG) @echo "Copying $(LIBJSIG) to $(DEST_JSIG)" - -$(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \ + $(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \ cp -f $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO) - -$(QUIETLY) test -f $(LIBJSIG_DIZ) && \ + $(QUIETLY) test -f $(LIBJSIG_DIZ) && \ cp -f $(LIBJSIG_DIZ) $(DEST_JSIG_DIZ) $(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done" diff -r ccb4f2af2319 -r cefad50507d8 make/solaris/makefiles/vm.make --- a/make/solaris/makefiles/vm.make Thu Oct 10 18:26:22 2013 +0200 +++ b/make/solaris/makefiles/vm.make Fri Oct 11 10:38:03 2013 +0200 @@ -341,9 +341,9 @@ install_jvm: $(LIBJVM) @echo "Copying $(LIBJVM) to $(DEST_JVM)" - -$(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \ + $(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \ cp -f $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO) - -$(QUIETLY) test -f $(LIBJVM_DIZ) && \ + $(QUIETLY) test -f $(LIBJVM_DIZ) && \ cp -f $(LIBJVM_DIZ) $(DEST_JVM_DIZ) $(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done" diff -r ccb4f2af2319 -r cefad50507d8 make/windows/build_vm_def.sh --- a/make/windows/build_vm_def.sh Thu Oct 10 18:26:22 2013 +0200 +++ b/make/windows/build_vm_def.sh Fri Oct 11 10:38:03 2013 +0200 @@ -42,8 +42,6 @@ MKS_HOME=`dirname "$SH"` fi -echo "EXPORTS" > vm1.def - AWK="$MKS_HOME/awk.exe" if [ ! -e $AWK ]; then AWK="$MKS_HOME/gawk.exe" @@ -56,6 +54,22 @@ DUMPBIN="link.exe /dump" export VS_UNICODE_OUTPUT= +if [ "$1" = "-nosa" ]; then + echo EXPORTS > vm.def + echo "" + echo "***" + echo "*** Not building SA: BUILD_WIN_SA != 1" + echo "*** C++ Vtables NOT included in vm.def" + echo "*** This jvm.dll will NOT work properly with SA." + echo "***" + echo "*** When in doubt, set BUILD_WIN_SA=1, clean and rebuild." + echo "***" + echo "" + exit +fi + +echo "EXPORTS" > vm1.def + # When called from IDE the first param should contain the link version, otherwise may be nill if [ "x$1" != "x" ]; then LD_VER="$1" diff -r ccb4f2af2319 -r cefad50507d8 make/windows/create.bat --- a/make/windows/create.bat Thu Oct 10 18:26:22 2013 +0200 +++ b/make/windows/create.bat Fri Oct 11 10:38:03 2013 +0200 @@ -86,6 +86,7 @@ echo ************************************************************** set ProjectFile=%HotSpotBuildSpace%\jvm.vcproj +echo MSC_VER = "%MSC_VER%" if "%MSC_VER%" == "1200" ( set ProjectFile=%HotSpotBuildSpace%\jvm.dsp echo Will generate VC6 project {unsupported} @@ -100,11 +101,17 @@ echo Will generate VC10 {Visual Studio 2010} set ProjectFile=%HotSpotBuildSpace%\jvm.vcxproj ) else ( +if "%MSC_VER%" == "1700" ( +echo Will generate VC10 {compatible with Visual Studio 2012} +echo After opening in VS 2012, click "Update" when prompted. +set ProjectFile=%HotSpotBuildSpace%\jvm.vcxproj +) else ( echo Will generate VC7 project {Visual Studio 2003 .NET} ) ) ) ) +) echo %ProjectFile% echo ************************************************************** diff -r ccb4f2af2319 -r cefad50507d8 make/windows/makefiles/debug.make --- a/make/windows/makefiles/debug.make Thu Oct 10 18:26:22 2013 +0200 +++ b/make/windows/makefiles/debug.make Fri Oct 11 10:38:03 2013 +0200 @@ -49,9 +49,6 @@ # Force resources to be rebuilt every time $(Res_Files): FORCE -vm.def: $(Obj_Files) - sh $(WorkSpace)/make/windows/build_vm_def.sh - $(AOUT): $(Res_Files) $(Obj_Files) vm.def $(LD) @<< $(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files) diff -r ccb4f2af2319 -r cefad50507d8 make/windows/makefiles/fastdebug.make --- a/make/windows/makefiles/fastdebug.make Thu Oct 10 18:26:22 2013 +0200 +++ b/make/windows/makefiles/fastdebug.make Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,7 @@ !include ../local.make !include compile.make -CXX_FLAGS=$(CXX_FLAGS) $(FASTDEBUG_OPT_OPTION) +CXX_FLAGS=$(CXX_FLAGS) $(FASTDEBUG_OPT_OPTION) /D "CHECK_UNHANDLED_OOPS" !include $(WorkSpace)/make/windows/makefiles/vm.make !include local.make @@ -48,9 +48,6 @@ # Force resources to be rebuilt every time $(Res_Files): FORCE -vm.def: $(Obj_Files) - sh $(WorkSpace)/make/windows/build_vm_def.sh - $(AOUT): $(Res_Files) $(Obj_Files) vm.def $(LD) @<< $(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files) diff -r ccb4f2af2319 -r cefad50507d8 make/windows/makefiles/product.make --- a/make/windows/makefiles/product.make Thu Oct 10 18:26:22 2013 +0200 +++ b/make/windows/makefiles/product.make Fri Oct 11 10:38:03 2013 +0200 @@ -51,9 +51,6 @@ # Force resources to be rebuilt every time $(Res_Files): FORCE -vm.def: $(Obj_Files) - sh $(WorkSpace)/make/windows/build_vm_def.sh - $(AOUT): $(Res_Files) $(Obj_Files) vm.def $(LD) @<< $(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files) diff -r ccb4f2af2319 -r cefad50507d8 make/windows/makefiles/projectcreator.make --- a/make/windows/makefiles/projectcreator.make Thu Oct 10 18:26:22 2013 +0200 +++ b/make/windows/makefiles/projectcreator.make Fri Oct 11 10:38:03 2013 +0200 @@ -93,6 +93,10 @@ -disablePch getThread_windows_$(Platform_arch).cpp \ -disablePch_compiler2 opcodes.cpp +!if "$(BUILD_WIN_SA)" != "1" +BUILD_VM_DEF_FLAG=-nosa +!endif + # Common options for the IDE builds for c1, and c2 ProjectCreatorIDEOptions=\ $(ProjectCreatorIDEOptions) \ @@ -105,7 +109,7 @@ -jdkTargetRoot $(HOTSPOTJDKDIST) \ -define ALIGN_STACK_FRAMES \ -define VM_LITTLE_ENDIAN \ - -prelink "" "Generating vm.def..." "cd %o set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) set JAVA_HOME=$(HOTSPOTJDKDIST) $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LD_VER)" \ + -prelink "" "Generating vm.def..." "cd %o set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) set JAVA_HOME=$(HOTSPOTJDKDIST) $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(BUILD_VM_DEF_FLAG) $(LD_VER)" \ -postbuild "" "Building hotspot.exe..." "cd %o set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) nmake -f $(HOTSPOTWORKSPACE)\make\windows\projectfiles\common\Makefile LOCAL_MAKE=$(HOTSPOTBUILDSPACE)\%f\local.make JAVA_HOME=$(HOTSPOTJDKDIST) launcher" \ -ignoreFile jsig.c \ -ignoreFile jvmtiEnvRecommended.cpp \ @@ -189,7 +193,6 @@ ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \ -define_compiler2 COMPILER2 \ -define_compiler2 GRAAL \ - -define_compiler2 TIERED \ -ignorePath_compiler2 graal/generated \ -additionalFile_compiler2 $(Platform_arch_model).ad \ -additionalFile_compiler2 ad_$(Platform_arch_model).cpp \ diff -r ccb4f2af2319 -r cefad50507d8 make/windows/makefiles/rules.make --- a/make/windows/makefiles/rules.make Thu Oct 10 18:26:22 2013 +0200 +++ b/make/windows/makefiles/rules.make Fri Oct 11 10:38:03 2013 +0200 @@ -69,6 +69,13 @@ VcVersion=VC10 ProjectFile=jvm.vcxproj +!elseif "$(MSC_VER)" == "1700" +# This is VS2012, but it loads VS10 projects just fine (and will +# upgrade them automatically to VS2012 format). + +VcVersion=VC10 +ProjectFile=jvm.vcxproj + !else VcVersion=VC7 diff -r ccb4f2af2319 -r cefad50507d8 make/windows/makefiles/vm.make --- a/make/windows/makefiles/vm.make Thu Oct 10 18:26:22 2013 +0200 +++ b/make/windows/makefiles/vm.make Fri Oct 11 10:38:03 2013 +0200 @@ -398,3 +398,11 @@ _build_pch_file.obj: @echo #include "precompiled.hpp" > ../generated/_build_pch_file.cpp $(CXX) $(CXX_FLAGS) /Fp"vm.pch" /Yc"precompiled.hpp" /c ../generated/_build_pch_file.cpp + +!if "$(BUILD_WIN_SA)" != "1" +BUILD_VM_DEF_FLAG=-nosa +!endif + +vm.def: $(Obj_Files) + sh $(WorkSpace)/make/windows/build_vm_def.sh $(BUILD_VM_DEF_FLAG) + diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp --- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -307,7 +307,7 @@ assert(a_byte == *start++, "should be the same code"); } #endif - } else if (_id == load_mirror_id) { + } else if (_id == load_mirror_id || _id == load_appendix_id) { // produce a copy of the load mirror instruction for use by the being initialized case #ifdef ASSERT address start = __ pc(); @@ -384,6 +384,7 @@ case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; + case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); } __ bind(call_patch); @@ -397,7 +398,7 @@ ce->add_call_info_here(_info); __ br(Assembler::always, false, Assembler::pt, _patch_site_entry); __ delayed()->nop(); - if (_id == load_klass_id || _id == load_mirror_id) { + if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { CodeSection* cs = __ code_section(); address pc = (address)_pc_start; RelocIterator iter(cs, pc, pc + 1); diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -105,7 +105,7 @@ if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false; } - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS && src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false; } @@ -520,7 +520,7 @@ void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { // Allocate a new index in table to hold the object once it's been patched int oop_index = __ oop_recorder()->allocate_oop_index(NULL); - PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id, oop_index); + PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index)); assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); @@ -963,7 +963,7 @@ case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break; case T_ADDRESS: #ifdef _LP64 - if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedKlassPointers) { + if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) { __ lduw(base, offset, to_reg->as_register()); __ decode_klass_not_null(to_reg->as_register()); } else @@ -2208,7 +2208,7 @@ // We don't know the array types are compatible if (basic_type != T_OBJECT) { // Simple test for basic type arrays - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { // We don't need decode because we just need to compare __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); @@ -2342,7 +2342,7 @@ // but not necessarily exactly of type default_type. Label known_ok, halt; metadata2reg(op->expected_type()->constant_encoding(), tmp); - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { // tmp holds the default type. It currently comes uncompressed after the // load of a constant, so encode it. __ encode_klass_not_null(tmp); diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp --- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -186,7 +186,7 @@ set((intx)markOopDesc::prototype(), t1); } st_ptr(t1, obj, oopDesc::mark_offset_in_bytes()); - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { // Save klass mov(klass, t1); encode_klass_not_null(t1); @@ -196,7 +196,7 @@ } if (len->is_valid()) { st(len, obj, arrayOopDesc::length_offset_in_bytes()); - } else if (UseCompressedKlassPointers) { + } else if (UseCompressedClassPointers) { // otherwise length is in the class gap store_klass_gap(G0, obj); } diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/sparc/vm/c1_Runtime1_sparc.cpp --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -804,6 +804,12 @@ } break; + case load_appendix_patching_id: + { __ set_info("load_appendix_patching", dont_gc_arguments); + oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); + } + break; + case dtrace_object_alloc_id: { // O0: object __ set_info("dtrace_object_alloc", dont_gc_arguments); diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/sparc/vm/c2_globals_sparc.hpp --- a/src/cpu/sparc/vm/c2_globals_sparc.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -63,6 +63,7 @@ define_pd_global(bool, UseTLAB, true); define_pd_global(bool, ResizeTLAB, true); define_pd_global(intx, LoopUnrollLimit, 60); // Design center runs on 1.3.1 +define_pd_global(intx, MinJumpTableSize, 5); // Peephole and CISC spilling both break the graph, and so makes the // scheduler sick. diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/sparc/vm/frame_sparc.cpp --- a/src/cpu/sparc/vm/frame_sparc.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/sparc/vm/frame_sparc.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -764,7 +764,7 @@ #ifdef CC_INTERP *oop_result = istate->_oop_temp; #else - oop obj = (oop) at(interpreter_frame_oop_temp_offset); + oop obj = cast_to_oop(at(interpreter_frame_oop_temp_offset)); assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check"); *oop_result = obj; #endif // CC_INTERP @@ -788,7 +788,7 @@ switch(type) { case T_OBJECT: case T_ARRAY: { - oop obj = (oop)*tos_addr; + oop obj = cast_to_oop(*tos_addr); assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check"); *oop_result = obj; break; diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/sparc/vm/macroAssembler_sparc.cpp --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -3911,7 +3911,7 @@ // The number of bytes in this code is used by // MachCallDynamicJavaNode::ret_addr_offset() // if this changes, change that. - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); decode_klass_not_null(klass); } else { @@ -3920,7 +3920,7 @@ } void MacroAssembler::store_klass(Register klass, Register dst_oop) { - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { assert(dst_oop != klass, "not enough registers"); encode_klass_not_null(klass); st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); @@ -3930,7 +3930,7 @@ } void MacroAssembler::store_klass_gap(Register s, Register d) { - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { assert(s != d, "not enough registers"); st(s, d, oopDesc::klass_gap_offset_in_bytes()); } @@ -4089,7 +4089,7 @@ } void MacroAssembler::encode_klass_not_null(Register r) { - assert (UseCompressedKlassPointers, "must be compressed"); + assert (UseCompressedClassPointers, "must be compressed"); assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); assert(r != G6_heapbase, "bad register choice"); set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); @@ -4105,7 +4105,7 @@ if (src == dst) { encode_klass_not_null(src); } else { - assert (UseCompressedKlassPointers, "must be compressed"); + assert (UseCompressedClassPointers, "must be compressed"); assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); set((intptr_t)Universe::narrow_klass_base(), dst); sub(src, dst, dst); @@ -4119,7 +4119,7 @@ // generated by decode_klass_not_null() and reinit_heapbase(). Hence, if // the instructions they generate change, then this method needs to be updated. int MacroAssembler::instr_size_for_decode_klass_not_null() { - assert (UseCompressedKlassPointers, "only for compressed klass ptrs"); + assert (UseCompressedClassPointers, "only for compressed klass ptrs"); // set + add + set int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 + insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); @@ -4135,7 +4135,7 @@ void MacroAssembler::decode_klass_not_null(Register r) { // Do not add assert code to this unless you change vtableStubs_sparc.cpp // pd_code_size_limit. - assert (UseCompressedKlassPointers, "must be compressed"); + assert (UseCompressedClassPointers, "must be compressed"); assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); assert(r != G6_heapbase, "bad register choice"); set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); @@ -4151,7 +4151,7 @@ } else { // Do not add assert code to this unless you change vtableStubs_sparc.cpp // pd_code_size_limit. - assert (UseCompressedKlassPointers, "must be compressed"); + assert (UseCompressedClassPointers, "must be compressed"); assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); if (Universe::narrow_klass_shift() != 0) { assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); @@ -4167,7 +4167,7 @@ } void MacroAssembler::reinit_heapbase() { - if (UseCompressedOops || UseCompressedKlassPointers) { + if (UseCompressedOops || UseCompressedClassPointers) { if (Universe::heap() != NULL) { set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase); } else { diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/sparc/vm/methodHandles_sparc.cpp --- a/src/cpu/sparc/vm/methodHandles_sparc.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -121,6 +121,7 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp, bool for_compiler_entry) { + Label L_no_such_method; assert(method == G5_method, "interpreter calling convention"); assert_different_registers(method, target, temp); @@ -133,6 +134,9 @@ const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); __ ld(interp_only, temp); __ cmp_and_br_short(temp, 0, Assembler::zero, Assembler::pt, run_compiled_code); + // Null method test is replicated below in compiled case, + // it might be able to address across the verify_thread() + __ br_null_short(G5_method, Assembler::pn, L_no_such_method); __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target); __ jmp(target, 0); __ delayed()->nop(); @@ -141,11 +145,19 @@ // it doesn't matter, since this is interpreter code. } + // Compiled case, either static or fall-through from runtime conditional + __ br_null_short(G5_method, Assembler::pn, L_no_such_method); + const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() : Method::from_interpreted_offset(); __ ld_ptr(G5_method, in_bytes(entry_offset), target); __ jmp(target, 0); __ delayed()->nop(); + + __ bind(L_no_such_method); + AddressLiteral ame(StubRoutines::throw_AbstractMethodError_entry()); + __ jump_to(ame, temp); + __ delayed()->nop(); } void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/sparc/vm/nativeInst_sparc.cpp --- a/src/cpu/sparc/vm/nativeInst_sparc.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/sparc/vm/nativeInst_sparc.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -360,7 +360,7 @@ oop_Relocation *r = iter.oop_reloc(); if (oop_addr == NULL) { oop_addr = r->oop_addr(); - *oop_addr = (oop)x; + *oop_addr = cast_to_oop(x); } else { assert(oop_addr == r->oop_addr(), "must be only one set-oop here"); } @@ -480,7 +480,7 @@ oop_Relocation *r = iter.oop_reloc(); if (oop_addr == NULL) { oop_addr = r->oop_addr(); - *oop_addr = (oop)x; + *oop_addr = cast_to_oop(x); } else { assert(oop_addr == r->oop_addr(), "must be only one set-oop here"); } diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/sparc/vm/sparc.ad --- a/src/cpu/sparc/vm/sparc.ad Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/sparc/vm/sparc.ad Fri Oct 11 10:38:03 2013 +0200 @@ -557,7 +557,7 @@ int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); int klass_load_size; - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { assert(Universe::heap() != NULL, "java heap should be initialized"); klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord; } else { @@ -1657,7 +1657,7 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { st->print_cr("\nUEP:"); #ifdef _LP64 - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { assert(Universe::heap() != NULL, "java heap should be initialized"); st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass"); st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base"); @@ -1897,7 +1897,7 @@ bool Matcher::narrow_klass_use_complex_address() { NOT_LP64(ShouldNotCallThis()); - assert(UseCompressedKlassPointers, "only for compressed klass code"); + assert(UseCompressedClassPointers, "only for compressed klass code"); return false; } @@ -2018,6 +2018,15 @@ return L7_REGP_mask(); } +const RegMask Matcher::mathExactI_result_proj_mask() { + return G1_REGI_mask(); +} + +const RegMask Matcher::mathExactI_flags_proj_mask() { + return INT_FLAGS_mask(); +} + + %} @@ -2561,7 +2570,7 @@ int off = __ offset(); __ load_klass(O0, G3_scratch); int klass_load_size; - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { assert(Universe::heap() != NULL, "java heap should be initialized"); klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord; } else { @@ -4245,12 +4254,16 @@ greater_equal(0xB); less_equal(0x2); greater(0xA); + overflow(0x7); + no_overflow(0xF); %} %} // Comparison Op, unsigned operand cmpOpU() %{ match(Bool); + predicate(n->as_Bool()->_test._test != BoolTest::overflow && + n->as_Bool()->_test._test != BoolTest::no_overflow); format %{ "u" %} interface(COND_INTER) %{ @@ -4260,12 +4273,16 @@ greater_equal(0xD); less_equal(0x4); greater(0xC); + overflow(0x7); + no_overflow(0xF); %} %} // Comparison Op, pointer (same as unsigned) operand cmpOpP() %{ match(Bool); + predicate(n->as_Bool()->_test._test != BoolTest::overflow && + n->as_Bool()->_test._test != BoolTest::no_overflow); format %{ "p" %} interface(COND_INTER) %{ @@ -4275,12 +4292,16 @@ greater_equal(0xD); less_equal(0x4); greater(0xC); + overflow(0x7); + no_overflow(0xF); %} %} // Comparison Op, branch-register encoding operand cmpOp_reg() %{ match(Bool); + predicate(n->as_Bool()->_test._test != BoolTest::overflow && + n->as_Bool()->_test._test != BoolTest::no_overflow); format %{ "" %} interface(COND_INTER) %{ @@ -4290,12 +4311,16 @@ greater_equal(0x7); less_equal (0x2); greater (0x6); + overflow(0x7); // not supported + no_overflow(0xF); // not supported %} %} // Comparison Code, floating, unordered same as less operand cmpOpF() %{ match(Bool); + predicate(n->as_Bool()->_test._test != BoolTest::overflow && + n->as_Bool()->_test._test != BoolTest::no_overflow); format %{ "fl" %} interface(COND_INTER) %{ @@ -4305,12 +4330,17 @@ greater_equal(0xB); less_equal(0xE); greater(0x6); + + overflow(0x7); // not supported + no_overflow(0xF); // not supported %} %} // Used by long compare operand cmpOp_commute() %{ match(Bool); + predicate(n->as_Bool()->_test._test != BoolTest::overflow && + n->as_Bool()->_test._test != BoolTest::no_overflow); format %{ "" %} interface(COND_INTER) %{ @@ -4320,6 +4350,8 @@ greater_equal(0x2); less_equal(0xB); greater(0x3); + overflow(0x7); + no_overflow(0xF); %} %} diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/sparc/vm/stubGenerator_sparc.cpp --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -2945,7 +2945,7 @@ BLOCK_COMMENT("arraycopy argument klass checks"); // get src->klass() - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { __ delayed()->nop(); // ??? not good __ load_klass(src, G3_src_klass); } else { @@ -2980,7 +2980,7 @@ // Load 32-bits signed value. Use br() instruction with it to check icc. __ lduw(G3_src_klass, lh_offset, G5_lh); - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { __ load_klass(dst, G4_dst_klass); } // Handle objArrays completely differently... @@ -2988,7 +2988,7 @@ __ set(objArray_lh, O5_temp); __ cmp(G5_lh, O5_temp); __ br(Assembler::equal, false, Assembler::pt, L_objArray); - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { __ delayed()->nop(); } else { __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass); diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/sparc/vm/vtableStubs_sparc.cpp --- a/src/cpu/sparc/vm/vtableStubs_sparc.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/sparc/vm/vtableStubs_sparc.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -52,6 +52,11 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { const int sparc_code_length = VtableStub::pd_code_size_limit(true); VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index); + // Can be NULL if there is no free space in the code cache. + if (s == NULL) { + return NULL; + } + ResourceMark rm; CodeBuffer cb(s->entry_point(), sparc_code_length); MacroAssembler* masm = new MacroAssembler(&cb); @@ -125,6 +130,11 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { const int sparc_code_length = VtableStub::pd_code_size_limit(false); VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index); + // Can be NULL if there is no free space in the code cache. + if (s == NULL) { + return NULL; + } + ResourceMark rm; CodeBuffer cb(s->entry_point(), sparc_code_length); MacroAssembler* masm = new MacroAssembler(&cb); @@ -218,13 +228,13 @@ // ld;ld;ld,jmp,nop const int basic = 5*BytesPerInstWord + // shift;add for load_klass (only shift with zero heap based) - (UseCompressedKlassPointers ? + (UseCompressedClassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0); return basic + slop; } else { const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord + // shift;add for load_klass (only shift with zero heap based) - (UseCompressedKlassPointers ? + (UseCompressedClassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0); return (basic + slop); } diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/assembler_x86.cpp --- a/src/cpu/x86/vm/assembler_x86.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/assembler_x86.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -4769,7 +4769,7 @@ } void Assembler::adcq(Register dst, Register src) { - (int) prefixq_and_encode(dst->encoding(), src->encoding()); + (void) prefixq_and_encode(dst->encoding(), src->encoding()); emit_arith(0x13, 0xC0, dst, src); } @@ -4824,7 +4824,7 @@ } void Assembler::andq(Register dst, Register src) { - (int) prefixq_and_encode(dst->encoding(), src->encoding()); + (void) prefixq_and_encode(dst->encoding(), src->encoding()); emit_arith(0x23, 0xC0, dst, src); } diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/c1_CodeStubs_x86.cpp --- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -402,6 +402,7 @@ case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; + case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); } __ bind(call_patch); @@ -419,7 +420,7 @@ for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) { __ nop(); } - if (_id == load_klass_id || _id == load_mirror_id) { + if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { CodeSection* cs = __ code_section(); RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1)); relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none); diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/c1_FrameMap_x86.hpp --- a/src/cpu/x86/vm/c1_FrameMap_x86.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/c1_FrameMap_x86.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -148,7 +148,7 @@ static int adjust_reg_range(int range) { // Reduce the number of available regs (to free r12) in case of compressed oops - if (UseCompressedOops || UseCompressedKlassPointers) return range - 1; + if (UseCompressedOops || UseCompressedClassPointers) return range - 1; return range; } diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/c1_LIRAssembler_x86.cpp --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -341,7 +341,7 @@ Register receiver = FrameMap::receiver_opr->as_register(); Register ic_klass = IC_Klass; const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); - const bool do_post_padding = VerifyOops || UseCompressedKlassPointers; + const bool do_post_padding = VerifyOops || UseCompressedClassPointers; if (!do_post_padding) { // insert some nops so that the verified entry point is aligned on CodeEntryAlignment while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) { @@ -362,7 +362,7 @@ void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { jobject o = NULL; - PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id); + PatchingStub* patch = new PatchingStub(_masm, patching_id(info)); __ movoop(reg, o); patching_epilog(patch, lir_patch_normal, reg, info); } @@ -1263,7 +1263,7 @@ break; case T_ADDRESS: - if (UseCompressedKlassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { + if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { __ movl(dest->as_register(), from_addr); } else { __ movptr(dest->as_register(), from_addr); @@ -1371,7 +1371,7 @@ __ verify_oop(dest->as_register()); } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { #ifdef _LP64 - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { __ decode_klass_not_null(dest->as_register()); } #endif @@ -1716,7 +1716,7 @@ } else if (obj == klass_RInfo) { klass_RInfo = dst; } - if (k->is_loaded() && !UseCompressedKlassPointers) { + if (k->is_loaded() && !UseCompressedClassPointers) { select_different_registers(obj, dst, k_RInfo, klass_RInfo); } else { Rtmp1 = op->tmp3()->as_register(); @@ -1724,14 +1724,6 @@ } assert_different_registers(obj, k_RInfo, klass_RInfo); - if (!k->is_loaded()) { - klass2reg_with_patching(k_RInfo, op->info_for_patch()); - } else { -#ifdef _LP64 - __ mov_metadata(k_RInfo, k->constant_encoding()); -#endif // _LP64 - } - assert(obj != k_RInfo, "must be different"); __ cmpptr(obj, (int32_t)NULL_WORD); if (op->should_profile()) { @@ -1748,13 +1740,21 @@ } else { __ jcc(Assembler::equal, *obj_is_null); } + + if (!k->is_loaded()) { + klass2reg_with_patching(k_RInfo, op->info_for_patch()); + } else { +#ifdef _LP64 + __ mov_metadata(k_RInfo, k->constant_encoding()); +#endif // _LP64 + } __ verify_oop(obj); if (op->fast_check()) { // get object class // not a safepoint as obj null check happens earlier #ifdef _LP64 - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { __ load_klass(Rtmp1, obj); __ cmpptr(k_RInfo, Rtmp1); } else { @@ -3294,7 +3294,7 @@ // We don't know the array types are compatible if (basic_type != T_OBJECT) { // Simple test for basic type arrays - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { __ movl(tmp, src_klass_addr); __ cmpl(tmp, dst_klass_addr); } else { @@ -3456,21 +3456,21 @@ Label known_ok, halt; __ mov_metadata(tmp, default_type->constant_encoding()); #ifdef _LP64 - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { __ encode_klass_not_null(tmp); } #endif if (basic_type != T_OBJECT) { - if (UseCompressedKlassPointers) __ cmpl(tmp, dst_klass_addr); + if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); else __ cmpptr(tmp, dst_klass_addr); __ jcc(Assembler::notEqual, halt); - if (UseCompressedKlassPointers) __ cmpl(tmp, src_klass_addr); + if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr); else __ cmpptr(tmp, src_klass_addr); __ jcc(Assembler::equal, known_ok); } else { - if (UseCompressedKlassPointers) __ cmpl(tmp, dst_klass_addr); + if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); else __ cmpptr(tmp, dst_klass_addr); __ jcc(Assembler::equal, known_ok); __ cmpptr(src, dst); diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/c1_LIRGenerator_x86.cpp --- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1239,7 +1239,7 @@ } LIR_Opr reg = rlock_result(x); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; - if (!x->klass()->is_loaded() || UseCompressedKlassPointers) { + if (!x->klass()->is_loaded() || UseCompressedClassPointers) { tmp3 = new_register(objectType); } __ checkcast(reg, obj.result(), x->klass(), @@ -1261,7 +1261,7 @@ } obj.load_item(); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; - if (!x->klass()->is_loaded() || UseCompressedKlassPointers) { + if (!x->klass()->is_loaded() || UseCompressedClassPointers) { tmp3 = new_register(objectType); } __ instanceof(reg, obj.result(), x->klass(), diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/c1_MacroAssembler_x86.cpp --- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -157,7 +157,7 @@ movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype()); } #ifdef _LP64 - if (UseCompressedKlassPointers) { // Take care not to kill klass + if (UseCompressedClassPointers) { // Take care not to kill klass movptr(t1, klass); encode_klass_not_null(t1); movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); @@ -171,7 +171,7 @@ movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len); } #ifdef _LP64 - else if (UseCompressedKlassPointers) { + else if (UseCompressedClassPointers) { xorptr(t1, t1); store_klass_gap(obj, t1); } @@ -334,7 +334,7 @@ assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); int start_offset = offset(); - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { load_klass(rscratch1, receiver); cmpptr(rscratch1, iCache); } else { @@ -345,7 +345,7 @@ jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); - assert(UseCompressedKlassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry"); + assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry"); } diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/c1_Runtime1_x86.cpp --- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1499,6 +1499,13 @@ } break; + case load_appendix_patching_id: + { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); + // we should set up register map + oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); + } + break; + case dtrace_object_alloc_id: { // rax,: object StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/c2_globals_x86.hpp --- a/src/cpu/x86/vm/c2_globals_x86.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/c2_globals_x86.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -30,7 +30,6 @@ // Sets the default values for platform dependent flags used by the server compiler. // (see c2_globals.hpp). Alpha-sorted. - define_pd_global(bool, BackgroundCompilation, true); define_pd_global(bool, UseTLAB, true); define_pd_global(bool, ResizeTLAB, true); @@ -58,6 +57,7 @@ define_pd_global(intx, ConditionalMoveLimit, 3); define_pd_global(intx, FLOATPRESSURE, 6); define_pd_global(intx, FreqInlineSize, 325); +define_pd_global(intx, MinJumpTableSize, 10); #ifdef AMD64 define_pd_global(intx, INTPRESSURE, 13); define_pd_global(intx, InteriorEntryAlignment, 16); diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/frame_x86.cpp --- a/src/cpu/x86/vm/frame_x86.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/frame_x86.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -639,7 +639,7 @@ #ifdef CC_INTERP obj = istate->_oop_temp; #else - obj = (oop) at(interpreter_frame_oop_temp_offset); + obj = cast_to_oop(at(interpreter_frame_oop_temp_offset)); #endif // CC_INTERP } else { oop* obj_p = (oop*)tos_addr; diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/macroAssembler_x86.cpp --- a/src/cpu/x86/vm/macroAssembler_x86.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1635,7 +1635,7 @@ #ifdef ASSERT // TraceBytecodes does not use r12 but saves it over the call, so don't verify // r12 is the heapbase. - LP64_ONLY(if ((UseCompressedOops || UseCompressedKlassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) + LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) #endif // ASSERT assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); @@ -4802,7 +4802,7 @@ void MacroAssembler::load_klass(Register dst, Register src) { #ifdef _LP64 - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); decode_klass_not_null(dst); } else @@ -4817,7 +4817,7 @@ void MacroAssembler::store_klass(Register dst, Register src) { #ifdef _LP64 - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { encode_klass_not_null(src); movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); } else @@ -4892,7 +4892,7 @@ #ifdef _LP64 void MacroAssembler::store_klass_gap(Register dst, Register src) { - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { // Store to klass gap in destination movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); } diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/methodHandles_x86.cpp --- a/src/cpu/x86/vm/methodHandles_x86.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/methodHandles_x86.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -114,6 +114,11 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, bool for_compiler_entry) { assert(method == rbx, "interpreter calling convention"); + + Label L_no_such_method; + __ testptr(rbx, rbx); + __ jcc(Assembler::zero, L_no_such_method); + __ verify_method_ptr(method); if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) { @@ -138,6 +143,9 @@ const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() : Method::from_interpreted_offset(); __ jmp(Address(method, entry_offset)); + + __ bind(L_no_such_method); + __ jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry())); } void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, @@ -475,7 +483,7 @@ const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx"; tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT, adaptername, mh_reg_name, - mh, entry_sp); + (void *)mh, entry_sp); if (Verbose) { tty->print_cr("Registers:"); diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/templateInterpreter_x86.hpp --- a/src/cpu/x86/vm/templateInterpreter_x86.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/templateInterpreter_x86.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -36,7 +36,7 @@ #ifdef AMD64 const static int InterpreterCodeSize = 240 * 1024; #else - const static int InterpreterCodeSize = 168 * 1024; + const static int InterpreterCodeSize = 176 * 1024; #endif // AMD64 #endif // CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/templateInterpreter_x86_64.cpp --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -304,6 +304,7 @@ // Helpers for commoning out cases in the various type of method entries. // + // increment invocation count & check for overflow // // Note: checking for negative value instead of overflow @@ -356,7 +357,6 @@ __ incrementl(Address(rax, MethodCounters::interpreter_invocation_counter_offset())); } - // Update standard invocation counters __ movl(rcx, invocation_counter); __ incrementl(rcx, InvocationCounter::count_increment); @@ -926,8 +926,8 @@ // rbx,: Method* // r13: senderSP must preserved for slow path, set SP to it on fast path - // rdx: scratch - // rdi: scratch + // c_rarg0: scratch (rdi on non-Win64, rcx on Win64) + // c_rarg1: scratch (rsi on non-Win64, rdx on Win64) Label slow_path; // If we need a safepoint check, generate full interpreter entry. @@ -941,8 +941,8 @@ // Load parameters const Register crc = rax; // crc - const Register val = rdx; // source java byte value - const Register tbl = rdi; // scratch + const Register val = c_rarg0; // source java byte value + const Register tbl = c_rarg1; // scratch // Arguments are reversed on java expression stack __ movl(val, Address(rsp, wordSize)); // byte value @@ -1001,18 +1001,18 @@ // Calculate address of start element if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { __ movptr(buf, Address(rsp, 3*wordSize)); // long buf - __ movslq(len, Address(rsp, 2*wordSize)); // offset - __ addq(buf, len); // + offset + __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset + __ addq(buf, off); // + offset __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC } else { __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size - __ movslq(len, Address(rsp, 2*wordSize)); // offset - __ addq(buf, len); // + offset + __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset + __ addq(buf, off); // + offset __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC } // Can now load 'len' since we're finished with 'off' - __ movl(len, Address(rsp, wordSize)); // Length + __ movl(len, Address(rsp, wordSize)); // Length __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len); // result in rax @@ -1031,6 +1031,7 @@ } return generate_native_entry(false); } + // Interpreter stub for calling a native method. (asm interpreter) // This sets up a somewhat different looking stack for calling the // native method than the typical interpreter frame setup. diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/vtableStubs_x86_32.cpp --- a/src/cpu/x86/vm/vtableStubs_x86_32.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/vtableStubs_x86_32.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -58,6 +58,11 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { const int i486_code_length = VtableStub::pd_code_size_limit(true); VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index); + // Can be NULL if there is no free space in the code cache. + if (s == NULL) { + return NULL; + } + ResourceMark rm; CodeBuffer cb(s->entry_point(), i486_code_length); MacroAssembler* masm = new MacroAssembler(&cb); @@ -132,6 +137,11 @@ // add code here, bump the code stub size returned by pd_code_size_limit! const int i486_code_length = VtableStub::pd_code_size_limit(false); VtableStub* s = new(i486_code_length) VtableStub(false, itable_index); + // Can be NULL if there is no free space in the code cache. + if (s == NULL) { + return NULL; + } + ResourceMark rm; CodeBuffer cb(s->entry_point(), i486_code_length); MacroAssembler* masm = new MacroAssembler(&cb); diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/vtableStubs_x86_64.cpp --- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -49,6 +49,11 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { const int amd64_code_length = VtableStub::pd_code_size_limit(true); VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index); + // Can be NULL if there is no free space in the code cache. + if (s == NULL) { + return NULL; + } + ResourceMark rm; CodeBuffer cb(s->entry_point(), amd64_code_length); MacroAssembler* masm = new MacroAssembler(&cb); @@ -126,6 +131,11 @@ // returned by pd_code_size_limit! const int amd64_code_length = VtableStub::pd_code_size_limit(false); VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index); + // Can be NULL if there is no free space in the code cache. + if (s == NULL) { + return NULL; + } + ResourceMark rm; CodeBuffer cb(s->entry_point(), amd64_code_length); MacroAssembler* masm = new MacroAssembler(&cb); @@ -211,11 +221,11 @@ if (is_vtable_stub) { // Vtable stub size return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) + - (UseCompressedKlassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0); + (UseCompressedClassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0); } else { // Itable stub size return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) + - (UseCompressedKlassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0); + (UseCompressedClassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0); } // In order to tune these parameters, run the JVM with VM options // +PrintMiscellaneous and +WizardMode to see information about diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/x86_32.ad --- a/src/cpu/x86/vm/x86_32.ad Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/x86_32.ad Fri Oct 11 10:38:03 2013 +0200 @@ -351,7 +351,7 @@ int format) { #ifdef ASSERT if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (int)Universe::non_oop_word()) { - assert(oop(d32)->is_oop() && (ScavengeRootsInCode || !oop(d32)->is_scavengable()), "cannot embed scavengable oops in code"); + assert(cast_to_oop(d32)->is_oop() && (ScavengeRootsInCode || !cast_to_oop(d32)->is_scavengable()), "cannot embed scavengable oops in code"); } #endif cbuf.relocate(cbuf.insts_mark(), rspec, format); @@ -1534,6 +1534,14 @@ return EBP_REG_mask(); } +const RegMask Matcher::mathExactI_result_proj_mask() { + return EAX_REG_mask(); +} + +const RegMask Matcher::mathExactI_flags_proj_mask() { + return INT_FLAGS_mask(); +} + // Returns true if the high 32 bits of the value is known to be zero. bool is_operand_hi32_zero(Node* n) { int opc = n->Opcode(); @@ -4922,6 +4930,8 @@ greater_equal(0xD, "ge"); less_equal(0xE, "le"); greater(0xF, "g"); + overflow(0x0, "o"); + no_overflow(0x1, "no"); %} %} @@ -4939,6 +4949,8 @@ greater_equal(0x3, "nb"); less_equal(0x6, "be"); greater(0x7, "nbe"); + overflow(0x0, "o"); + no_overflow(0x1, "no"); %} %} @@ -4957,6 +4969,8 @@ greater_equal(0x3, "nb"); less_equal(0x6, "be"); greater(0x7, "nbe"); + overflow(0x0, "o"); + no_overflow(0x1, "no"); %} %} @@ -4974,6 +4988,8 @@ greater_equal(0x3, "nb"); less_equal(0x6, "be"); greater(0x7, "nbe"); + overflow(0x0, "o"); + no_overflow(0x1, "no"); %} %} @@ -4981,6 +4997,8 @@ operand cmpOp_fcmov() %{ match(Bool); + predicate(n->as_Bool()->_test._test != BoolTest::overflow && + n->as_Bool()->_test._test != BoolTest::no_overflow); format %{ "" %} interface(COND_INTER) %{ equal (0x0C8); @@ -4989,6 +5007,8 @@ greater_equal(0x1C0); less_equal (0x0D0); greater (0x1D0); + overflow(0x0, "o"); // not really supported by the instruction + no_overflow(0x1, "no"); // not really supported by the instruction %} %} @@ -5004,6 +5024,8 @@ greater_equal(0xE, "le"); less_equal(0xD, "ge"); greater(0xC, "l"); + overflow(0x0, "o"); + no_overflow(0x1, "no"); %} %} @@ -7496,6 +7518,31 @@ //----------Arithmetic Instructions-------------------------------------------- //----------Addition Instructions---------------------------------------------- + +instruct addExactI_rReg(eAXRegI dst, rRegI src, eFlagsReg cr) +%{ + match(AddExactI dst src); + effect(DEF cr); + + format %{ "ADD $dst, $src\t# addExact int" %} + ins_encode %{ + __ addl($dst$$Register, $src$$Register); + %} + ins_pipe(ialu_reg_reg); +%} + +instruct addExactI_rReg_imm(eAXRegI dst, immI src, eFlagsReg cr) +%{ + match(AddExactI dst src); + effect(DEF cr); + + format %{ "ADD $dst, $src\t# addExact int" %} + ins_encode %{ + __ addl($dst$$Register, $src$$constant); + %} + ins_pipe(ialu_reg_reg); +%} + // Integer Addition Instructions instruct addI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{ match(Set dst (AddI dst src)); diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/x86/vm/x86_64.ad --- a/src/cpu/x86/vm/x86_64.ad Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/x86/vm/x86_64.ad Fri Oct 11 10:38:03 2013 +0200 @@ -529,7 +529,7 @@ if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) { assert(Universe::heap()->is_in_reserved((address)(intptr_t)d32), "should be real oop"); - assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code"); + assert(cast_to_oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !cast_to_oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code"); } #endif cbuf.relocate(cbuf.insts_mark(), rspec, format); @@ -556,7 +556,7 @@ if (rspec.reloc()->type() == relocInfo::oop_type && d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) { assert(Universe::heap()->is_in_reserved((address)d64), "should be real oop"); - assert(oop(d64)->is_oop() && (ScavengeRootsInCode || !oop(d64)->is_scavengable()), + assert(cast_to_oop(d64)->is_oop() && (ScavengeRootsInCode || !cast_to_oop(d64)->is_scavengable()), "cannot embed scavengable oops in code"); } #endif @@ -1391,7 +1391,7 @@ #ifndef PRODUCT void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const { - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1"); st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check"); @@ -1408,7 +1408,7 @@ { MacroAssembler masm(&cbuf); uint insts_size = cbuf.insts_size(); - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { masm.load_klass(rscratch1, j_rarg0); masm.cmpptr(rax, rscratch1); } else { @@ -1557,7 +1557,7 @@ } bool Matcher::narrow_klass_use_complex_address() { - assert(UseCompressedKlassPointers, "only for compressed klass code"); + assert(UseCompressedClassPointers, "only for compressed klass code"); return (LogKlassAlignmentInBytes <= 3); } @@ -1649,6 +1649,14 @@ return PTR_RBP_REG_mask(); } +const RegMask Matcher::mathExactI_result_proj_mask() { + return INT_RAX_REG_mask(); +} + +const RegMask Matcher::mathExactI_flags_proj_mask() { + return INT_FLAGS_mask(); +} + %} //----------ENCODING BLOCK----------------------------------------------------- @@ -4133,6 +4141,8 @@ greater_equal(0xD, "ge"); less_equal(0xE, "le"); greater(0xF, "g"); + overflow(0x0, "o"); + no_overflow(0x1, "no"); %} %} @@ -4151,6 +4161,8 @@ greater_equal(0x3, "nb"); less_equal(0x6, "be"); greater(0x7, "nbe"); + overflow(0x0, "o"); + no_overflow(0x1, "no"); %} %} @@ -4170,6 +4182,8 @@ greater_equal(0x3, "nb"); less_equal(0x6, "be"); greater(0x7, "nbe"); + overflow(0x0, "o"); + no_overflow(0x1, "no"); %} %} @@ -4187,6 +4201,8 @@ greater_equal(0x3, "nb"); less_equal(0x6, "be"); greater(0x7, "nbe"); + overflow(0x0, "o"); + no_overflow(0x1, "no"); %} %} @@ -6922,6 +6938,30 @@ //----------Arithmetic Instructions-------------------------------------------- //----------Addition Instructions---------------------------------------------- +instruct addExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr) +%{ + match(AddExactI dst src); + effect(DEF cr); + + format %{ "addl $dst, $src\t# addExact int" %} + ins_encode %{ + __ addl($dst$$Register, $src$$Register); + %} + ins_pipe(ialu_reg_reg); +%} + +instruct addExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr) +%{ + match(AddExactI dst src); + effect(DEF cr); + + format %{ "addl $dst, $src\t# addExact int" %} + ins_encode %{ + __ addl($dst$$Register, $src$$constant); + %} + ins_pipe(ialu_reg_reg); +%} + instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr) %{ match(Set dst (AddI dst src)); diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/zero/vm/assembler_zero.cpp --- a/src/cpu/zero/vm/assembler_zero.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/zero/vm/assembler_zero.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -50,6 +50,7 @@ #ifdef ASSERT bool AbstractAssembler::pd_check_instruction_mark() { ShouldNotCallThis(); + return false; } #endif @@ -73,6 +74,7 @@ RegisterOrConstant MacroAssembler::delayed_value_impl( intptr_t* delayed_value_addr, Register tmpl, int offset) { ShouldNotCallThis(); + return RegisterOrConstant(); } void MacroAssembler::store_oop(jobject obj) { diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/zero/vm/cppInterpreter_zero.cpp --- a/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1008,6 +1008,7 @@ address CppInterpreter::return_entry(TosState state, int length) { ShouldNotCallThis(); + return NULL; } address CppInterpreter::deopt_entry(TosState state, int length) { diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/zero/vm/frame_zero.cpp --- a/src/cpu/zero/vm/frame_zero.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/zero/vm/frame_zero.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -116,6 +116,7 @@ bool frame::safe_for_sender(JavaThread *thread) { ShouldNotCallThis(); + return false; } void frame::pd_gc_epilog() { @@ -123,6 +124,7 @@ bool frame::is_interpreted_frame_valid(JavaThread *thread) const { ShouldNotCallThis(); + return false; } BasicType frame::interpreter_frame_result(oop* oop_result, @@ -184,9 +186,8 @@ int frame::frame_size(RegisterMap* map) const { #ifdef PRODUCT ShouldNotCallThis(); -#else +#endif // PRODUCT return 0; // make javaVFrame::print_value work -#endif // PRODUCT } intptr_t* frame::interpreter_frame_tos_at(jint offset) const { diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/zero/vm/frame_zero.inline.hpp --- a/src/cpu/zero/vm/frame_zero.inline.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/zero/vm/frame_zero.inline.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -36,7 +36,7 @@ _deopt_state = unknown; } -inline address frame::sender_pc() const { ShouldNotCallThis(); } +inline address frame::sender_pc() const { ShouldNotCallThis(); return NULL; } inline frame::frame(ZeroFrame* zf, intptr_t* sp) { _zeroframe = zf; @@ -89,6 +89,7 @@ inline intptr_t* frame::link() const { ShouldNotCallThis(); + return NULL; } #ifdef CC_INTERP @@ -151,14 +152,17 @@ inline oop frame::saved_oop_result(RegisterMap* map) const { ShouldNotCallThis(); + return NULL; } inline bool frame::is_older(intptr_t* id) const { ShouldNotCallThis(); + return false; } inline intptr_t* frame::entry_frame_argument_at(int offset) const { ShouldNotCallThis(); + return NULL; } inline intptr_t* frame::unextended_sp() const { diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/zero/vm/icBuffer_zero.cpp --- a/src/cpu/zero/vm/icBuffer_zero.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/zero/vm/icBuffer_zero.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -49,8 +49,10 @@ address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) { // NB ic_stub_code_size() must return the size of the code we generate ShouldNotCallThis(); + return NULL; } void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) { ShouldNotCallThis(); + return NULL; } diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/zero/vm/interp_masm_zero.hpp --- a/src/cpu/zero/vm/interp_masm_zero.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/zero/vm/interp_masm_zero.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -40,6 +40,7 @@ Register tmp, int offset) { ShouldNotCallThis(); + return RegisterOrConstant(); } }; diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/zero/vm/interpreter_zero.cpp --- a/src/cpu/zero/vm/interpreter_zero.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/zero/vm/interpreter_zero.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -64,6 +64,7 @@ return NULL; Unimplemented(); + return NULL; } address InterpreterGenerator::generate_abstract_entry() { diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/zero/vm/nativeInst_zero.hpp --- a/src/cpu/zero/vm/nativeInst_zero.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/zero/vm/nativeInst_zero.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -51,15 +51,18 @@ public: bool is_jump() { ShouldNotCallThis(); + return false; } bool is_safepoint_poll() { ShouldNotCallThis(); + return false; } }; inline NativeInstruction* nativeInstruction_at(address address) { ShouldNotCallThis(); + return NULL; } class NativeCall : public NativeInstruction { @@ -70,18 +73,22 @@ address instruction_address() const { ShouldNotCallThis(); + return NULL; } address next_instruction_address() const { ShouldNotCallThis(); + return NULL; } address return_address() const { ShouldNotCallThis(); + return NULL; } address destination() const { ShouldNotCallThis(); + return NULL; } void set_destination_mt_safe(address dest) { @@ -98,25 +105,30 @@ static bool is_call_before(address return_address) { ShouldNotCallThis(); + return false; } }; inline NativeCall* nativeCall_before(address return_address) { ShouldNotCallThis(); + return NULL; } inline NativeCall* nativeCall_at(address address) { ShouldNotCallThis(); + return NULL; } class NativeMovConstReg : public NativeInstruction { public: address next_instruction_address() const { ShouldNotCallThis(); + return NULL; } intptr_t data() const { ShouldNotCallThis(); + return 0; } void set_data(intptr_t x) { @@ -126,12 +138,14 @@ inline NativeMovConstReg* nativeMovConstReg_at(address address) { ShouldNotCallThis(); + return NULL; } class NativeMovRegMem : public NativeInstruction { public: int offset() const { ShouldNotCallThis(); + return 0; } void set_offset(intptr_t x) { @@ -145,6 +159,7 @@ inline NativeMovRegMem* nativeMovRegMem_at(address address) { ShouldNotCallThis(); + return NULL; } class NativeJump : public NativeInstruction { @@ -155,6 +170,7 @@ address jump_destination() const { ShouldNotCallThis(); + return NULL; } void set_jump_destination(address dest) { @@ -172,12 +188,14 @@ inline NativeJump* nativeJump_at(address address) { ShouldNotCallThis(); + return NULL; } class NativeGeneralJump : public NativeInstruction { public: address jump_destination() const { ShouldNotCallThis(); + return NULL; } static void insert_unconditional(address code_pos, address entry) { @@ -191,6 +209,7 @@ inline NativeGeneralJump* nativeGeneralJump_at(address address) { ShouldNotCallThis(); + return NULL; } #endif // CPU_ZERO_VM_NATIVEINST_ZERO_HPP diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/zero/vm/register_zero.cpp --- a/src/cpu/zero/vm/register_zero.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/zero/vm/register_zero.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -32,8 +32,10 @@ const char* RegisterImpl::name() const { ShouldNotCallThis(); + return NULL; } const char* FloatRegisterImpl::name() const { ShouldNotCallThis(); + return NULL; } diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/zero/vm/relocInfo_zero.cpp --- a/src/cpu/zero/vm/relocInfo_zero.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/zero/vm/relocInfo_zero.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -37,6 +37,7 @@ address Relocation::pd_call_destination(address orig_addr) { ShouldNotCallThis(); + return NULL; } void Relocation::pd_set_call_destination(address x) { @@ -45,6 +46,7 @@ address Relocation::pd_get_address_from_code() { ShouldNotCallThis(); + return NULL; } address* Relocation::pd_address_in_code() { diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/zero/vm/sharedRuntime_zero.cpp --- a/src/cpu/zero/vm/sharedRuntime_zero.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/zero/vm/sharedRuntime_zero.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -89,6 +89,7 @@ ret_type); #else ShouldNotCallThis(); + return NULL; #endif // SHARK } @@ -99,6 +100,7 @@ uint SharedRuntime::out_preserve_stack_slots() { ShouldNotCallThis(); + return 0; } JRT_LEAF(void, zero_stub()) @@ -135,4 +137,5 @@ VMRegPair *regs, int total_args_passed) { ShouldNotCallThis(); + return 0; } diff -r ccb4f2af2319 -r cefad50507d8 src/cpu/zero/vm/vtableStubs_zero.cpp --- a/src/cpu/zero/vm/vtableStubs_zero.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/cpu/zero/vm/vtableStubs_zero.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -39,16 +39,20 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { ShouldNotCallThis(); + return NULL; } VtableStub* VtableStubs::create_itable_stub(int vtable_index) { ShouldNotCallThis(); + return NULL; } int VtableStub::pd_code_size_limit(bool is_vtable_stub) { ShouldNotCallThis(); + return 0; } int VtableStub::pd_code_alignment() { ShouldNotCallThis(); + return 0; } diff -r ccb4f2af2319 -r cefad50507d8 src/os/bsd/vm/os_bsd.cpp --- a/src/os/bsd/vm/os_bsd.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os/bsd/vm/os_bsd.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -642,13 +642,14 @@ #endif #ifdef __APPLE__ -static uint64_t locate_unique_thread_id() { +static uint64_t locate_unique_thread_id(mach_port_t mach_thread_port) { // Additional thread_id used to correlate threads in SA thread_identifier_info_data_t m_ident_info; mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT; - thread_info(::mach_thread_self(), THREAD_IDENTIFIER_INFO, + thread_info(mach_thread_port, THREAD_IDENTIFIER_INFO, (thread_info_t) &m_ident_info, &count); + return m_ident_info.thread_id; } #endif @@ -679,9 +680,14 @@ } #ifdef __APPLE__ - // thread_id is mach thread on macos - osthread->set_thread_id(::mach_thread_self()); - osthread->set_unique_thread_id(locate_unique_thread_id()); + // thread_id is mach thread on macos, which pthreads graciously caches and provides for us + mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self()); + guarantee(thread_id != 0, "thread id missing from pthreads"); + osthread->set_thread_id(thread_id); + + uint64_t unique_thread_id = locate_unique_thread_id(thread_id); + guarantee(unique_thread_id != 0, "unique thread id was not found"); + osthread->set_unique_thread_id(unique_thread_id); #else // thread_id is pthread_id on BSD osthread->set_thread_id(::pthread_self()); @@ -843,8 +849,14 @@ // Store pthread info into the OSThread #ifdef __APPLE__ - osthread->set_thread_id(::mach_thread_self()); - osthread->set_unique_thread_id(locate_unique_thread_id()); + // thread_id is mach thread on macos, which pthreads graciously caches and provides for us + mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self()); + guarantee(thread_id != 0, "just checking"); + osthread->set_thread_id(thread_id); + + uint64_t unique_thread_id = locate_unique_thread_id(thread_id); + guarantee(unique_thread_id != 0, "just checking"); + osthread->set_unique_thread_id(unique_thread_id); #else osthread->set_thread_id(::pthread_self()); #endif @@ -1115,7 +1127,7 @@ intx os::current_thread_id() { #ifdef __APPLE__ - return (intx)::mach_thread_self(); + return (intx)::pthread_mach_thread_np(::pthread_self()); #else return (intx)::pthread_self(); #endif @@ -2313,7 +2325,9 @@ } -char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) { +char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) { + fatal("This code is not used or maintained."); + // "exec" is passed in but not used. Creating the shared image for // the code cache doesn't have an SHM_X executable permission to check. assert(UseLargePages && UseSHM, "only for SHM large pages"); @@ -3275,11 +3289,15 @@ // and if UserSignalHandler is installed all bets are off if (CheckJNICalls) { if (libjsig_is_loaded) { - tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); + if (PrintJNIResolving) { + tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); + } check_signals = false; } if (AllowUserSignalHandlers) { - tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); + if (PrintJNIResolving) { + tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); + } check_signals = false; } } @@ -3571,8 +3589,6 @@ #endif } - os::large_page_init(); - // initialize suspend/resume support - must do this before signal_sets_init() if (SR_initialize() != 0) { perror("SR_initialize failed"); @@ -4736,3 +4752,8 @@ return n; } +#ifndef PRODUCT +void TestReserveMemorySpecial_test() { + // No tests available for this platform +} +#endif diff -r ccb4f2af2319 -r cefad50507d8 src/os/linux/vm/globals_linux.hpp --- a/src/os/linux/vm/globals_linux.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os/linux/vm/globals_linux.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -40,6 +40,9 @@ product(bool, UseHugeTLBFS, false, \ "Use MAP_HUGETLB for large pages") \ \ + product(bool, UseTransparentHugePages, false, \ + "Use MADV_HUGEPAGE for large pages") \ + \ product(bool, LoadExecStackDllInVMThread, true, \ "Load DLLs with executable-stack attribute in the VM Thread") \ \ diff -r ccb4f2af2319 -r cefad50507d8 src/os/linux/vm/os_linux.cpp --- a/src/os/linux/vm/os_linux.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os/linux/vm/os_linux.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -131,6 +131,7 @@ bool os::Linux::_supports_fast_thread_cpu_time = false; const char * os::Linux::_glibc_version = NULL; const char * os::Linux::_libpthread_version = NULL; +pthread_condattr_t os::Linux::_condattr[1]; static jlong initial_time_count=0; @@ -1399,12 +1400,15 @@ clock_gettime_func(CLOCK_MONOTONIC, &tp) == 0) { // yes, monotonic clock is supported _clock_gettime = clock_gettime_func; + return; } else { // close librt if there is no monotonic clock dlclose(handle); } } } + warning("No monotonic clock was available - timed services may " \ + "be adversely affected if the time-of-day clock changes"); } #ifndef SYS_clock_getres @@ -2165,23 +2169,49 @@ } // Try to identify popular distros. -// Most Linux distributions have /etc/XXX-release file, which contains -// the OS version string. Some have more than one /etc/XXX-release file -// (e.g. Mandrake has both /etc/mandrake-release and /etc/redhat-release.), -// so the order is important. +// Most Linux distributions have a /etc/XXX-release file, which contains +// the OS version string. Newer Linux distributions have a /etc/lsb-release +// file that also contains the OS version string. Some have more than one +// /etc/XXX-release file (e.g. Mandrake has both /etc/mandrake-release and +// /etc/redhat-release.), so the order is important. +// Any Linux that is based on Redhat (i.e. Oracle, Mandrake, Sun JDS...) have +// their own specific XXX-release file as well as a redhat-release file. +// Because of this the XXX-release file needs to be searched for before the +// redhat-release file. +// Since Red Hat has a lsb-release file that is not very descriptive the +// search for redhat-release needs to be before lsb-release. +// Since the lsb-release file is the new standard it needs to be searched +// before the older style release files. +// Searching system-release (Red Hat) and os-release (other Linuxes) are a +// next to last resort. The os-release file is a new standard that contains +// distribution information and the system-release file seems to be an old +// standard that has been replaced by the lsb-release and os-release files. +// Searching for the debian_version file is the last resort. It contains +// an informative string like "6.0.6" or "wheezy/sid". Because of this +// "Debian " is printed before the contents of the debian_version file. void os::Linux::print_distro_info(outputStream* st) { - if (!_print_ascii_file("/etc/mandrake-release", st) && - !_print_ascii_file("/etc/sun-release", st) && - !_print_ascii_file("/etc/redhat-release", st) && - !_print_ascii_file("/etc/SuSE-release", st) && - !_print_ascii_file("/etc/turbolinux-release", st) && - !_print_ascii_file("/etc/gentoo-release", st) && - !_print_ascii_file("/etc/debian_version", st) && - !_print_ascii_file("/etc/ltib-release", st) && - !_print_ascii_file("/etc/angstrom-version", st)) { - st->print("Linux"); - } - st->cr(); + if (!_print_ascii_file("/etc/oracle-release", st) && + !_print_ascii_file("/etc/mandriva-release", st) && + !_print_ascii_file("/etc/mandrake-release", st) && + !_print_ascii_file("/etc/sun-release", st) && + !_print_ascii_file("/etc/redhat-release", st) && + !_print_ascii_file("/etc/lsb-release", st) && + !_print_ascii_file("/etc/SuSE-release", st) && + !_print_ascii_file("/etc/turbolinux-release", st) && + !_print_ascii_file("/etc/gentoo-release", st) && + !_print_ascii_file("/etc/ltib-release", st) && + !_print_ascii_file("/etc/angstrom-version", st) && + !_print_ascii_file("/etc/system-release", st) && + !_print_ascii_file("/etc/os-release", st)) { + + if (file_exists("/etc/debian_version")) { + st->print("Debian "); + _print_ascii_file("/etc/debian_version", st); + } else { + st->print("Linux"); + } + } + st->cr(); } void os::Linux::print_libversion_info(outputStream* st) { @@ -2720,36 +2750,7 @@ int os::Linux::commit_memory_impl(char* addr, size_t size, size_t alignment_hint, bool exec) { - int err; - if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { - int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; - uintptr_t res = - (uintptr_t) ::mmap(addr, size, prot, - MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB, - -1, 0); - if (res != (uintptr_t) MAP_FAILED) { - if (UseNUMAInterleaving) { - numa_make_global(addr, size); - } - return 0; - } - - err = errno; // save errno from mmap() call above - - if (!recoverable_mmap_error(err)) { - // However, it is not clear that this loss of our reserved mapping - // happens with large pages on Linux or that we cannot recover - // from the loss. For now, we just issue a warning and we don't - // call vm_exit_out_of_memory(). This issue is being tracked by - // JBS-8007074. - warn_fail_commit_memory(addr, size, alignment_hint, exec, err); -// vm_exit_out_of_memory(size, OOM_MMAP_ERROR, -// "committing reserved memory."); - } - // Fall through and try to use small pages - } - - err = os::Linux::commit_memory_impl(addr, size, exec); + int err = os::Linux::commit_memory_impl(addr, size, exec); if (err == 0) { realign_memory(addr, size, alignment_hint); } @@ -2774,7 +2775,7 @@ } void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { - if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { + if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) { // We don't check the return value: madvise(MADV_HUGEPAGE) may not // be supported or the memory may already be backed by huge pages. ::madvise(addr, bytes, MADV_HUGEPAGE); @@ -2787,7 +2788,7 @@ // uncommitted at all. We don't do anything in this case to avoid creating a segment with // small pages on top of the SHM segment. This method always works for small pages, so we // allow that in any case. - if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) { + if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) { commit_memory(addr, bytes, alignment_hint, !ExecMem); } } @@ -2796,7 +2797,19 @@ Linux::numa_interleave_memory(addr, bytes); } +// Define for numa_set_bind_policy(int). Setting the argument to 0 will set the +// bind policy to MPOL_PREFERRED for the current thread. +#define USE_MPOL_PREFERRED 0 + void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { + // To make NUMA and large pages more robust when both enabled, we need to ease + // the requirements on where the memory should be allocated. MPOL_BIND is the + // default policy and it will force memory to be allocated on the specified + // node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on + // the specified node, but will not force it. Using this policy will prevent + // getting SIGBUS when trying to allocate large pages on NUMA nodes with no + // free large pages. + Linux::numa_set_bind_policy(USE_MPOL_PREFERRED); Linux::numa_tonode_memory(addr, bytes, lgrp_hint); } @@ -2898,6 +2911,8 @@ libnuma_dlsym(handle, "numa_tonode_memory"))); set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t, libnuma_dlsym(handle, "numa_interleave_memory"))); + set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t, + libnuma_dlsym(handle, "numa_set_bind_policy"))); if (numa_available() != -1) { @@ -2964,6 +2979,7 @@ os::Linux::numa_available_func_t os::Linux::_numa_available; os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory; os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory; +os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy; unsigned long* os::Linux::_numa_all_nodes; bool os::pd_uncommit_memory(char* addr, size_t size) { @@ -2972,6 +2988,53 @@ return res != (uintptr_t) MAP_FAILED; } +static +address get_stack_commited_bottom(address bottom, size_t size) { + address nbot = bottom; + address ntop = bottom + size; + + size_t page_sz = os::vm_page_size(); + unsigned pages = size / page_sz; + + unsigned char vec[1]; + unsigned imin = 1, imax = pages + 1, imid; + int mincore_return_value; + + while (imin < imax) { + imid = (imax + imin) / 2; + nbot = ntop - (imid * page_sz); + + // Use a trick with mincore to check whether the page is mapped or not. + // mincore sets vec to 1 if page resides in memory and to 0 if page + // is swapped output but if page we are asking for is unmapped + // it returns -1,ENOMEM + mincore_return_value = mincore(nbot, page_sz, vec); + + if (mincore_return_value == -1) { + // Page is not mapped go up + // to find first mapped page + if (errno != EAGAIN) { + assert(errno == ENOMEM, "Unexpected mincore errno"); + imax = imid; + } + } else { + // Page is mapped go down + // to find first not mapped page + imin = imid + 1; + } + } + + nbot = nbot + page_sz; + + // Adjust stack bottom one page up if last checked page is not mapped + if (mincore_return_value == -1) { + nbot = nbot + page_sz; + } + + return nbot; +} + + // Linux uses a growable mapping for the stack, and if the mapping for // the stack guard pages is not removed when we detach a thread the // stack cannot grow beyond the pages where the stack guard was @@ -2986,59 +3049,37 @@ // So, we need to know the extent of the stack mapping when // create_stack_guard_pages() is called. -// Find the bounds of the stack mapping. Return true for success. -// // We only need this for stacks that are growable: at the time of // writing thread stacks don't use growable mappings (i.e. those // creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this // only applies to the main thread. -static -bool get_stack_bounds(uintptr_t *bottom, uintptr_t *top) { - - char buf[128]; - int fd, sz; - - if ((fd = ::open("/proc/self/maps", O_RDONLY)) < 0) { - return false; - } - - const char kw[] = "[stack]"; - const int kwlen = sizeof(kw)-1; - - // Address part of /proc/self/maps couldn't be more than 128 bytes - while ((sz = os::get_line_chars(fd, buf, sizeof(buf))) > 0) { - if (sz > kwlen && ::memcmp(buf+sz-kwlen, kw, kwlen) == 0) { - // Extract addresses - if (sscanf(buf, "%" SCNxPTR "-%" SCNxPTR, bottom, top) == 2) { - uintptr_t sp = (uintptr_t) __builtin_frame_address(0); - if (sp >= *bottom && sp <= *top) { - ::close(fd); - return true; - } - } - } - } - - ::close(fd); - return false; -} - - // If the (growable) stack mapping already extends beyond the point // where we're going to put our guard pages, truncate the mapping at // that point by munmap()ping it. This ensures that when we later // munmap() the guard pages we don't leave a hole in the stack -// mapping. This only affects the main/initial thread, but guard -// against future OS changes +// mapping. This only affects the main/initial thread + bool os::pd_create_stack_guard_pages(char* addr, size_t size) { - uintptr_t stack_extent, stack_base; - bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true); - if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) { - assert(os::Linux::is_initial_thread(), - "growable stack in non-initial thread"); - if (stack_extent < (uintptr_t)addr) - ::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent); + + if (os::Linux::is_initial_thread()) { + // As we manually grow stack up to bottom inside create_attached_thread(), + // it's likely that os::Linux::initial_thread_stack_bottom is mapped and + // we don't need to do anything special. + // Check it first, before calling heavy function. + uintptr_t stack_extent = (uintptr_t) os::Linux::initial_thread_stack_bottom(); + unsigned char vec[1]; + + if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) { + // Fallback to slow path on all errors, including EAGAIN + stack_extent = (uintptr_t) get_stack_commited_bottom( + os::Linux::initial_thread_stack_bottom(), + (size_t)addr - stack_extent); + } + + if (stack_extent < (uintptr_t)addr) { + ::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent)); + } } return os::commit_memory(addr, size, !ExecMem); @@ -3047,13 +3088,13 @@ // If this is a growable mapping, remove the guard pages entirely by // munmap()ping them. If not, just call uncommit_memory(). This only // affects the main/initial thread, but guard against future OS changes +// It's safe to always unmap guard pages for initial thread because we +// always place it right after end of the mapped region + bool os::remove_stack_guard_pages(char* addr, size_t size) { uintptr_t stack_extent, stack_base; - bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true); - if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) { - assert(os::Linux::is_initial_thread(), - "growable stack in non-initial thread"); - + + if (os::Linux::is_initial_thread()) { return ::munmap(addr, size) == 0; } @@ -3157,11 +3198,31 @@ return linux_mprotect(addr, size, PROT_READ|PROT_WRITE); } +bool os::Linux::transparent_huge_pages_sanity_check(bool warn, size_t page_size) { + bool result = false; + void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE, + MAP_ANONYMOUS|MAP_PRIVATE, + -1, 0); + if (p != MAP_FAILED) { + void *aligned_p = align_ptr_up(p, page_size); + + result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0; + + munmap(p, page_size * 2); + } + + if (warn && !result) { + warning("TransparentHugePages is not supported by the operating system."); + } + + return result; +} + bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) { bool result = false; - void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE, - MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB, - -1, 0); + void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE, + MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB, + -1, 0); if (p != MAP_FAILED) { // We don't know if this really is a huge page or not. @@ -3182,12 +3243,10 @@ } fclose(fp); } - munmap (p, page_size); - if (result) - return true; - } - - if (warn) { + munmap(p, page_size); + } + + if (warn && !result) { warning("HugeTLBFS is not supported by the operating system."); } @@ -3235,82 +3294,114 @@ static size_t _large_page_size = 0; -void os::large_page_init() { - if (!UseLargePages) { - UseHugeTLBFS = false; - UseSHM = false; - return; - } - - if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) { - // If UseLargePages is specified on the command line try both methods, - // if it's default, then try only HugeTLBFS. - if (FLAG_IS_DEFAULT(UseLargePages)) { - UseHugeTLBFS = true; - } else { - UseHugeTLBFS = UseSHM = true; - } - } - - if (LargePageSizeInBytes) { - _large_page_size = LargePageSizeInBytes; - } else { - // large_page_size on Linux is used to round up heap size. x86 uses either - // 2M or 4M page, depending on whether PAE (Physical Address Extensions) - // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use - // page as large as 256M. - // - // Here we try to figure out page size by parsing /proc/meminfo and looking - // for a line with the following format: - // Hugepagesize: 2048 kB - // - // If we can't determine the value (e.g. /proc is not mounted, or the text - // format has been changed), we'll use the largest page size supported by - // the processor. +size_t os::Linux::find_large_page_size() { + size_t large_page_size = 0; + + // large_page_size on Linux is used to round up heap size. x86 uses either + // 2M or 4M page, depending on whether PAE (Physical Address Extensions) + // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use + // page as large as 256M. + // + // Here we try to figure out page size by parsing /proc/meminfo and looking + // for a line with the following format: + // Hugepagesize: 2048 kB + // + // If we can't determine the value (e.g. /proc is not mounted, or the text + // format has been changed), we'll use the largest page size supported by + // the processor. #ifndef ZERO - _large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M) - ARM_ONLY(2 * M) PPC_ONLY(4 * M); + large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M) + ARM_ONLY(2 * M) PPC_ONLY(4 * M); #endif // ZERO - FILE *fp = fopen("/proc/meminfo", "r"); - if (fp) { - while (!feof(fp)) { - int x = 0; - char buf[16]; - if (fscanf(fp, "Hugepagesize: %d", &x) == 1) { - if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) { - _large_page_size = x * K; - break; - } - } else { - // skip to next line - for (;;) { - int ch = fgetc(fp); - if (ch == EOF || ch == (int)'\n') break; - } + FILE *fp = fopen("/proc/meminfo", "r"); + if (fp) { + while (!feof(fp)) { + int x = 0; + char buf[16]; + if (fscanf(fp, "Hugepagesize: %d", &x) == 1) { + if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) { + large_page_size = x * K; + break; + } + } else { + // skip to next line + for (;;) { + int ch = fgetc(fp); + if (ch == EOF || ch == (int)'\n') break; } } - fclose(fp); } - } - - // print a warning if any large page related flag is specified on command line - bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS); - + fclose(fp); + } + + if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != large_page_size) { + warning("Setting LargePageSizeInBytes has no effect on this OS. Large page size is " + SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size), + proper_unit_for_byte_size(large_page_size)); + } + + return large_page_size; +} + +size_t os::Linux::setup_large_page_size() { + _large_page_size = Linux::find_large_page_size(); const size_t default_page_size = (size_t)Linux::page_size(); if (_large_page_size > default_page_size) { _page_sizes[0] = _large_page_size; _page_sizes[1] = default_page_size; _page_sizes[2] = 0; } - UseHugeTLBFS = UseHugeTLBFS && - Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size); - - if (UseHugeTLBFS) + + return _large_page_size; +} + +bool os::Linux::setup_large_page_type(size_t page_size) { + if (FLAG_IS_DEFAULT(UseHugeTLBFS) && + FLAG_IS_DEFAULT(UseSHM) && + FLAG_IS_DEFAULT(UseTransparentHugePages)) { + // If UseLargePages is specified on the command line try all methods, + // if it's default, then try only UseTransparentHugePages. + if (FLAG_IS_DEFAULT(UseLargePages)) { + UseTransparentHugePages = true; + } else { + UseHugeTLBFS = UseTransparentHugePages = UseSHM = true; + } + } + + if (UseTransparentHugePages) { + bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages); + if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) { + UseHugeTLBFS = false; + UseSHM = false; + return true; + } + UseTransparentHugePages = false; + } + + if (UseHugeTLBFS) { + bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS); + if (hugetlbfs_sanity_check(warn_on_failure, page_size)) { + UseSHM = false; + return true; + } + UseHugeTLBFS = false; + } + + return UseSHM; +} + +void os::large_page_init() { + if (!UseLargePages) { + UseHugeTLBFS = false; + UseTransparentHugePages = false; UseSHM = false; - - UseLargePages = UseHugeTLBFS || UseSHM; + return; + } + + size_t large_page_size = Linux::setup_large_page_size(); + UseLargePages = Linux::setup_large_page_type(large_page_size); set_coredump_filter(); } @@ -3319,16 +3410,22 @@ #define SHM_HUGETLB 04000 #endif -char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) { +char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) { // "exec" is passed in but not used. Creating the shared image for // the code cache doesn't have an SHM_X executable permission to check. assert(UseLargePages && UseSHM, "only for SHM large pages"); + assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address"); + + if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { + return NULL; // Fallback to small pages. + } key_t key = IPC_PRIVATE; char *addr; bool warn_on_failure = UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) || + !FLAG_IS_DEFAULT(UseSHM) || !FLAG_IS_DEFAULT(LargePageSizeInBytes) ); char msg[128]; @@ -3376,42 +3473,219 @@ return NULL; } - if ((addr != NULL) && UseNUMAInterleaving) { - numa_make_global(addr, bytes); - } - - // The memory is committed - MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC); + return addr; +} + +static void warn_on_large_pages_failure(char* req_addr, size_t bytes, int error) { + assert(error == ENOMEM, "Only expect to fail if no memory is available"); + + bool warn_on_failure = UseLargePages && + (!FLAG_IS_DEFAULT(UseLargePages) || + !FLAG_IS_DEFAULT(UseHugeTLBFS) || + !FLAG_IS_DEFAULT(LargePageSizeInBytes)); + + if (warn_on_failure) { + char msg[128]; + jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: " + PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error); + warning(msg); + } +} + +char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) { + assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages"); + assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size"); + assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address"); + + int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; + char* addr = (char*)::mmap(req_addr, bytes, prot, + MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB, + -1, 0); + + if (addr == MAP_FAILED) { + warn_on_large_pages_failure(req_addr, bytes, errno); + return NULL; + } + + assert(is_ptr_aligned(addr, os::large_page_size()), "Must be"); return addr; } +char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) { + size_t large_page_size = os::large_page_size(); + + assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes"); + + // Allocate small pages. + + char* start; + if (req_addr != NULL) { + assert(is_ptr_aligned(req_addr, alignment), "Must be"); + assert(is_size_aligned(bytes, alignment), "Must be"); + start = os::reserve_memory(bytes, req_addr); + assert(start == NULL || start == req_addr, "Must be"); + } else { + start = os::reserve_memory_aligned(bytes, alignment); + } + + if (start == NULL) { + return NULL; + } + + assert(is_ptr_aligned(start, alignment), "Must be"); + + // os::reserve_memory_special will record this memory area. + // Need to release it here to prevent overlapping reservations. + MemTracker::record_virtual_memory_release((address)start, bytes); + + char* end = start + bytes; + + // Find the regions of the allocated chunk that can be promoted to large pages. + char* lp_start = (char*)align_ptr_up(start, large_page_size); + char* lp_end = (char*)align_ptr_down(end, large_page_size); + + size_t lp_bytes = lp_end - lp_start; + + assert(is_size_aligned(lp_bytes, large_page_size), "Must be"); + + if (lp_bytes == 0) { + // The mapped region doesn't even span the start and the end of a large page. + // Fall back to allocate a non-special area. + ::munmap(start, end - start); + return NULL; + } + + int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; + + + void* result; + + if (start != lp_start) { + result = ::mmap(start, lp_start - start, prot, + MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, + -1, 0); + if (result == MAP_FAILED) { + ::munmap(lp_start, end - lp_start); + return NULL; + } + } + + result = ::mmap(lp_start, lp_bytes, prot, + MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB, + -1, 0); + if (result == MAP_FAILED) { + warn_on_large_pages_failure(req_addr, bytes, errno); + // If the mmap above fails, the large pages region will be unmapped and we + // have regions before and after with small pages. Release these regions. + // + // | mapped | unmapped | mapped | + // ^ ^ ^ ^ + // start lp_start lp_end end + // + ::munmap(start, lp_start - start); + ::munmap(lp_end, end - lp_end); + return NULL; + } + + if (lp_end != end) { + result = ::mmap(lp_end, end - lp_end, prot, + MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, + -1, 0); + if (result == MAP_FAILED) { + ::munmap(start, lp_end - start); + return NULL; + } + } + + return start; +} + +char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) { + assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages"); + assert(is_ptr_aligned(req_addr, alignment), "Must be"); + assert(is_power_of_2(alignment), "Must be"); + assert(is_power_of_2(os::large_page_size()), "Must be"); + assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes"); + + if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) { + return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec); + } else { + return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec); + } +} + +char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) { + assert(UseLargePages, "only for large pages"); + + char* addr; + if (UseSHM) { + addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec); + } else { + assert(UseHugeTLBFS, "must be"); + addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, req_addr, exec); + } + + if (addr != NULL) { + if (UseNUMAInterleaving) { + numa_make_global(addr, bytes); + } + + // The memory is committed + MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC); + } + + return addr; +} + +bool os::Linux::release_memory_special_shm(char* base, size_t bytes) { + // detaching the SHM segment will also delete it, see reserve_memory_special_shm() + return shmdt(base) == 0; +} + +bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) { + return pd_release_memory(base, bytes); +} + bool os::release_memory_special(char* base, size_t bytes) { + assert(UseLargePages, "only for large pages"); + MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); - // detaching the SHM segment will also delete it, see reserve_memory_special() - int rslt = shmdt(base); - if (rslt == 0) { + + bool res; + if (UseSHM) { + res = os::Linux::release_memory_special_shm(base, bytes); + } else { + assert(UseHugeTLBFS, "must be"); + res = os::Linux::release_memory_special_huge_tlbfs(base, bytes); + } + + if (res) { tkr.record((address)base, bytes); - return true; } else { tkr.discard(); - return false; - } + } + + return res; } size_t os::large_page_size() { return _large_page_size; } -// HugeTLBFS allows application to commit large page memory on demand; -// with SysV SHM the entire memory region must be allocated as shared +// With SysV SHM the entire memory region must be allocated as shared // memory. +// HugeTLBFS allows application to commit large page memory on demand. +// However, when committing memory with HugeTLBFS fails, the region +// that was supposed to be committed will lose the old reservation +// and allow other threads to steal that memory region. Because of this +// behavior we can't commit HugeTLBFS memory. bool os::can_commit_large_page_memory() { - return UseHugeTLBFS; + return UseTransparentHugePages; } bool os::can_execute_large_page_memory() { - return UseHugeTLBFS; + return UseTransparentHugePages || UseHugeTLBFS; } // Reserve memory at an arbitrary address, only if that area is @@ -4465,6 +4739,26 @@ Linux::clock_init(); initial_time_count = os::elapsed_counter(); + + // pthread_condattr initialization for monotonic clock + int status; + pthread_condattr_t* _condattr = os::Linux::condAttr(); + if ((status = pthread_condattr_init(_condattr)) != 0) { + fatal(err_msg("pthread_condattr_init: %s", strerror(status))); + } + // Only set the clock if CLOCK_MONOTONIC is available + if (Linux::supports_monotonic_clock()) { + if ((status = pthread_condattr_setclock(_condattr, CLOCK_MONOTONIC)) != 0) { + if (status == EINVAL) { + warning("Unable to use monotonic clock with relative timed-waits" \ + " - changes to the time-of-day clock may have adverse affects"); + } else { + fatal(err_msg("pthread_condattr_setclock: %s", strerror(status))); + } + } + } + // else it defaults to CLOCK_REALTIME + pthread_mutex_init(&dl_mutex, NULL); // If the pagesize of the VM is greater than 8K determine the appropriate @@ -4511,8 +4805,6 @@ #endif } - os::large_page_init(); - // initialize suspend/resume support - must do this before signal_sets_init() if (SR_initialize() != 0) { perror("SR_initialize failed"); @@ -4547,6 +4839,10 @@ Linux::capture_initial_stack(JavaThread::stack_size_at_create()); +#if defined(IA32) + workaround_expand_exec_shield_cs_limit(); +#endif + Linux::libpthread_init(); if (PrintMiscellaneous && (Verbose || WizardMode)) { tty->print_cr("[HotSpot is running with %s, %s(%s)]\n", @@ -4563,21 +4859,23 @@ UseNUMA = false; } } - // With SHM large pages we cannot uncommit a page, so there's not way + // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way // we can make the adaptive lgrp chunk resizing work. If the user specified - // both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and + // both UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn and // disable adaptive resizing. - if (UseNUMA && UseLargePages && UseSHM) { - if (!FLAG_IS_DEFAULT(UseNUMA)) { - if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) { + if (UseNUMA && UseLargePages && !can_commit_large_page_memory()) { + if (FLAG_IS_DEFAULT(UseNUMA)) { + UseNUMA = false; + } else { + if (FLAG_IS_DEFAULT(UseLargePages) && + FLAG_IS_DEFAULT(UseSHM) && + FLAG_IS_DEFAULT(UseHugeTLBFS)) { UseLargePages = false; } else { - warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing"); + warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, disabling adaptive resizing"); UseAdaptiveSizePolicy = false; UseAdaptiveNUMAChunkSizing = false; } - } else { - UseNUMA = false; } } if (!UseNUMA && ForceNUMA) { @@ -5273,21 +5571,36 @@ static struct timespec* compute_abstime(timespec* abstime, jlong millis) { if (millis < 0) millis = 0; - struct timeval now; - int status = gettimeofday(&now, NULL); - assert(status == 0, "gettimeofday"); + jlong seconds = millis / 1000; millis %= 1000; if (seconds > 50000000) { // see man cond_timedwait(3T) seconds = 50000000; } - abstime->tv_sec = now.tv_sec + seconds; - long usec = now.tv_usec + millis * 1000; - if (usec >= 1000000) { - abstime->tv_sec += 1; - usec -= 1000000; - } - abstime->tv_nsec = usec * 1000; + + if (os::Linux::supports_monotonic_clock()) { + struct timespec now; + int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now); + assert_status(status == 0, status, "clock_gettime"); + abstime->tv_sec = now.tv_sec + seconds; + long nanos = now.tv_nsec + millis * NANOSECS_PER_MILLISEC; + if (nanos >= NANOSECS_PER_SEC) { + abstime->tv_sec += 1; + nanos -= NANOSECS_PER_SEC; + } + abstime->tv_nsec = nanos; + } else { + struct timeval now; + int status = gettimeofday(&now, NULL); + assert(status == 0, "gettimeofday"); + abstime->tv_sec = now.tv_sec + seconds; + long usec = now.tv_usec + millis * 1000; + if (usec >= 1000000) { + abstime->tv_sec += 1; + usec -= 1000000; + } + abstime->tv_nsec = usec * 1000; + } return abstime; } @@ -5379,7 +5692,7 @@ status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst); if (status != 0 && WorkAroundNPTLTimedWaitHang) { pthread_cond_destroy (_cond); - pthread_cond_init (_cond, NULL) ; + pthread_cond_init (_cond, os::Linux::condAttr()) ; } assert_status(status == 0 || status == EINTR || status == ETIME || status == ETIMEDOUT, @@ -5480,32 +5793,50 @@ static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) { assert (time > 0, "convertTime"); - - struct timeval now; - int status = gettimeofday(&now, NULL); - assert(status == 0, "gettimeofday"); - - time_t max_secs = now.tv_sec + MAX_SECS; - - if (isAbsolute) { - jlong secs = time / 1000; - if (secs > max_secs) { - absTime->tv_sec = max_secs; + time_t max_secs = 0; + + if (!os::Linux::supports_monotonic_clock() || isAbsolute) { + struct timeval now; + int status = gettimeofday(&now, NULL); + assert(status == 0, "gettimeofday"); + + max_secs = now.tv_sec + MAX_SECS; + + if (isAbsolute) { + jlong secs = time / 1000; + if (secs > max_secs) { + absTime->tv_sec = max_secs; + } else { + absTime->tv_sec = secs; + } + absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; + } else { + jlong secs = time / NANOSECS_PER_SEC; + if (secs >= MAX_SECS) { + absTime->tv_sec = max_secs; + absTime->tv_nsec = 0; + } else { + absTime->tv_sec = now.tv_sec + secs; + absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; + if (absTime->tv_nsec >= NANOSECS_PER_SEC) { + absTime->tv_nsec -= NANOSECS_PER_SEC; + ++absTime->tv_sec; // note: this must be <= max_secs + } + } } - else { - absTime->tv_sec = secs; - } - absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; - } - else { + } else { + // must be relative using monotonic clock + struct timespec now; + int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now); + assert_status(status == 0, status, "clock_gettime"); + max_secs = now.tv_sec + MAX_SECS; jlong secs = time / NANOSECS_PER_SEC; if (secs >= MAX_SECS) { absTime->tv_sec = max_secs; absTime->tv_nsec = 0; - } - else { + } else { absTime->tv_sec = now.tv_sec + secs; - absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; + absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_nsec; if (absTime->tv_nsec >= NANOSECS_PER_SEC) { absTime->tv_nsec -= NANOSECS_PER_SEC; ++absTime->tv_sec; // note: this must be <= max_secs @@ -5585,15 +5916,19 @@ jt->set_suspend_equivalent(); // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() + assert(_cur_index == -1, "invariant"); if (time == 0) { - status = pthread_cond_wait (_cond, _mutex) ; + _cur_index = REL_INDEX; // arbitrary choice when not timed + status = pthread_cond_wait (&_cond[_cur_index], _mutex) ; } else { - status = os::Linux::safe_cond_timedwait (_cond, _mutex, &absTime) ; + _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX; + status = os::Linux::safe_cond_timedwait (&_cond[_cur_index], _mutex, &absTime) ; if (status != 0 && WorkAroundNPTLTimedWaitHang) { - pthread_cond_destroy (_cond) ; - pthread_cond_init (_cond, NULL); + pthread_cond_destroy (&_cond[_cur_index]) ; + pthread_cond_init (&_cond[_cur_index], isAbsolute ? NULL : os::Linux::condAttr()); } } + _cur_index = -1; assert_status(status == 0 || status == EINTR || status == ETIME || status == ETIMEDOUT, status, "cond_timedwait"); @@ -5622,17 +5957,24 @@ s = _counter; _counter = 1; if (s < 1) { - if (WorkAroundNPTLTimedWaitHang) { - status = pthread_cond_signal (_cond) ; - assert (status == 0, "invariant") ; + // thread might be parked + if (_cur_index != -1) { + // thread is definitely parked + if (WorkAroundNPTLTimedWaitHang) { + status = pthread_cond_signal (&_cond[_cur_index]); + assert (status == 0, "invariant"); status = pthread_mutex_unlock(_mutex); - assert (status == 0, "invariant") ; - } else { + assert (status == 0, "invariant"); + } else { status = pthread_mutex_unlock(_mutex); - assert (status == 0, "invariant") ; - status = pthread_cond_signal (_cond) ; - assert (status == 0, "invariant") ; - } + assert (status == 0, "invariant"); + status = pthread_cond_signal (&_cond[_cur_index]); + assert (status == 0, "invariant"); + } + } else { + pthread_mutex_unlock(_mutex); + assert (status == 0, "invariant") ; + } } else { pthread_mutex_unlock(_mutex); assert (status == 0, "invariant") ; @@ -5848,3 +6190,149 @@ } #endif // JAVASE_EMBEDDED + + +/////////////// Unit tests /////////////// + +#ifndef PRODUCT + +#define test_log(...) \ + do {\ + if (VerboseInternalVMTests) { \ + tty->print_cr(__VA_ARGS__); \ + tty->flush(); \ + }\ + } while (false) + +class TestReserveMemorySpecial : AllStatic { + public: + static void small_page_write(void* addr, size_t size) { + size_t page_size = os::vm_page_size(); + + char* end = (char*)addr + size; + for (char* p = (char*)addr; p < end; p += page_size) { + *p = 1; + } + } + + static void test_reserve_memory_special_huge_tlbfs_only(size_t size) { + if (!UseHugeTLBFS) { + return; + } + + test_log("test_reserve_memory_special_huge_tlbfs_only(" SIZE_FORMAT ")", size); + + char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false); + + if (addr != NULL) { + small_page_write(addr, size); + + os::Linux::release_memory_special_huge_tlbfs(addr, size); + } + } + + static void test_reserve_memory_special_huge_tlbfs_only() { + if (!UseHugeTLBFS) { + return; + } + + size_t lp = os::large_page_size(); + + for (size_t size = lp; size <= lp * 10; size += lp) { + test_reserve_memory_special_huge_tlbfs_only(size); + } + } + + static void test_reserve_memory_special_huge_tlbfs_mixed(size_t size, size_t alignment) { + if (!UseHugeTLBFS) { + return; + } + + test_log("test_reserve_memory_special_huge_tlbfs_mixed(" SIZE_FORMAT ", " SIZE_FORMAT ")", + size, alignment); + + assert(size >= os::large_page_size(), "Incorrect input to test"); + + char* addr = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false); + + if (addr != NULL) { + small_page_write(addr, size); + + os::Linux::release_memory_special_huge_tlbfs(addr, size); + } + } + + static void test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(size_t size) { + size_t lp = os::large_page_size(); + size_t ag = os::vm_allocation_granularity(); + + for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) { + test_reserve_memory_special_huge_tlbfs_mixed(size, alignment); + } + } + + static void test_reserve_memory_special_huge_tlbfs_mixed() { + size_t lp = os::large_page_size(); + size_t ag = os::vm_allocation_granularity(); + + test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp); + test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + ag); + test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + lp / 2); + test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2); + test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + ag); + test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 - ag); + test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + lp / 2); + test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10); + test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10 + lp / 2); + } + + static void test_reserve_memory_special_huge_tlbfs() { + if (!UseHugeTLBFS) { + return; + } + + test_reserve_memory_special_huge_tlbfs_only(); + test_reserve_memory_special_huge_tlbfs_mixed(); + } + + static void test_reserve_memory_special_shm(size_t size, size_t alignment) { + if (!UseSHM) { + return; + } + + test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment); + + char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false); + + if (addr != NULL) { + assert(is_ptr_aligned(addr, alignment), "Check"); + assert(is_ptr_aligned(addr, os::large_page_size()), "Check"); + + small_page_write(addr, size); + + os::Linux::release_memory_special_shm(addr, size); + } + } + + static void test_reserve_memory_special_shm() { + size_t lp = os::large_page_size(); + size_t ag = os::vm_allocation_granularity(); + + for (size_t size = ag; size < lp * 3; size += ag) { + for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) { + test_reserve_memory_special_shm(size, alignment); + } + } + } + + static void test() { + test_reserve_memory_special_huge_tlbfs(); + test_reserve_memory_special_shm(); + } +}; + +void TestReserveMemorySpecial_test() { + TestReserveMemorySpecial::test(); +} + +#endif diff -r ccb4f2af2319 -r cefad50507d8 src/os/linux/vm/os_linux.hpp --- a/src/os/linux/vm/os_linux.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os/linux/vm/os_linux.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -32,6 +32,7 @@ class Linux { friend class os; + friend class TestReserveMemorySpecial; // For signal-chaining #define MAXSIGNUM 32 @@ -92,8 +93,21 @@ static void rebuild_cpu_to_node_map(); static GrowableArray* cpu_to_node() { return _cpu_to_node; } + static size_t find_large_page_size(); + static size_t setup_large_page_size(); + + static bool setup_large_page_type(size_t page_size); + static bool transparent_huge_pages_sanity_check(bool warn, size_t pages_size); static bool hugetlbfs_sanity_check(bool warn, size_t page_size); + static char* reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec); + static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec); + static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec); + static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec); + + static bool release_memory_special_shm(char* base, size_t bytes); + static bool release_memory_special_huge_tlbfs(char* base, size_t bytes); + static void print_full_memory_info(outputStream* st); static void print_distro_info(outputStream* st); static void print_libversion_info(outputStream* st); @@ -207,6 +221,13 @@ static jlong fast_thread_cpu_time(clockid_t clockid); + // pthread_cond clock suppport + private: + static pthread_condattr_t _condattr[1]; + + public: + static pthread_condattr_t* condAttr() { return _condattr; } + // Stack repair handling // none present @@ -221,6 +242,7 @@ typedef int (*numa_available_func_t)(void); typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node); typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask); + typedef void (*numa_set_bind_policy_func_t)(int policy); static sched_getcpu_func_t _sched_getcpu; static numa_node_to_cpus_func_t _numa_node_to_cpus; @@ -228,6 +250,7 @@ static numa_available_func_t _numa_available; static numa_tonode_memory_func_t _numa_tonode_memory; static numa_interleave_memory_func_t _numa_interleave_memory; + static numa_set_bind_policy_func_t _numa_set_bind_policy; static unsigned long* _numa_all_nodes; static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; } @@ -236,6 +259,7 @@ static void set_numa_available(numa_available_func_t func) { _numa_available = func; } static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; } static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; } + static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; } static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; } static int sched_getcpu_syscall(void); public: @@ -253,6 +277,11 @@ _numa_interleave_memory(start, size, _numa_all_nodes); } } + static void numa_set_bind_policy(int policy) { + if (_numa_set_bind_policy != NULL) { + _numa_set_bind_policy(policy); + } + } static int get_node_by_cpu(int cpu_id); }; @@ -273,7 +302,7 @@ public: PlatformEvent() { int status; - status = pthread_cond_init (_cond, NULL); + status = pthread_cond_init (_cond, os::Linux::condAttr()); assert_status(status == 0, status, "cond_init"); status = pthread_mutex_init (_mutex, NULL); assert_status(status == 0, status, "mutex_init"); @@ -288,14 +317,19 @@ void park () ; void unpark () ; int TryPark () ; - int park (jlong millis) ; + int park (jlong millis) ; // relative timed-wait only void SetAssociation (Thread * a) { _Assoc = a ; } } ; class PlatformParker : public CHeapObj { protected: + enum { + REL_INDEX = 0, + ABS_INDEX = 1 + }; + int _cur_index; // which cond is in use: -1, 0, 1 pthread_mutex_t _mutex [1] ; - pthread_cond_t _cond [1] ; + pthread_cond_t _cond [2] ; // one for relative times and one for abs. public: // TODO-FIXME: make dtor private ~PlatformParker() { guarantee (0, "invariant") ; } @@ -303,10 +337,13 @@ public: PlatformParker() { int status; - status = pthread_cond_init (_cond, NULL); - assert_status(status == 0, status, "cond_init"); + status = pthread_cond_init (&_cond[REL_INDEX], os::Linux::condAttr()); + assert_status(status == 0, status, "cond_init rel"); + status = pthread_cond_init (&_cond[ABS_INDEX], NULL); + assert_status(status == 0, status, "cond_init abs"); status = pthread_mutex_init (_mutex, NULL); assert_status(status == 0, status, "mutex_init"); + _cur_index = -1; // mark as unused } }; diff -r ccb4f2af2319 -r cefad50507d8 src/os/posix/vm/os_posix.cpp --- a/src/os/posix/vm/os_posix.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os/posix/vm/os_posix.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* -* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,8 @@ #include #include #include +#include +#include // Check core dump limit and report possible place where core can be found @@ -260,6 +262,55 @@ return ::fdopen(fd, mode); } +void* os::get_default_process_handle() { + return (void*)::dlopen(NULL, RTLD_LAZY); +} + +// Builds a platform dependent Agent_OnLoad_ function name +// which is used to find statically linked in agents. +// Parameters: +// sym_name: Symbol in library we are looking for +// lib_name: Name of library to look in, NULL for shared libs. +// is_absolute_path == true if lib_name is absolute path to agent +// such as "/a/b/libL.so" +// == false if only the base name of the library is passed in +// such as "L" +char* os::build_agent_function_name(const char *sym_name, const char *lib_name, + bool is_absolute_path) { + char *agent_entry_name; + size_t len; + size_t name_len; + size_t prefix_len = strlen(JNI_LIB_PREFIX); + size_t suffix_len = strlen(JNI_LIB_SUFFIX); + const char *start; + + if (lib_name != NULL) { + len = name_len = strlen(lib_name); + if (is_absolute_path) { + // Need to strip path, prefix and suffix + if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { + lib_name = ++start; + } + if (len <= (prefix_len + suffix_len)) { + return NULL; + } + lib_name += prefix_len; + name_len = strlen(lib_name) - suffix_len; + } + } + len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; + agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); + if (agent_entry_name == NULL) { + return NULL; + } + strcpy(agent_entry_name, sym_name); + if (lib_name != NULL) { + strcat(agent_entry_name, "_"); + strncat(agent_entry_name, lib_name, name_len); + } + return agent_entry_name; +} + os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); } @@ -271,11 +322,17 @@ * The callback is supposed to provide the method that should be protected. */ bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) { + sigset_t saved_sig_mask; + assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread"); assert(!WatcherThread::watcher_thread()->has_crash_protection(), "crash_protection already set?"); - if (sigsetjmp(_jmpbuf, 1) == 0) { + // we cannot rely on sigsetjmp/siglongjmp to save/restore the signal mask + // since on at least some systems (OS X) siglongjmp will restore the mask + // for the process, not the thread + pthread_sigmask(0, NULL, &saved_sig_mask); + if (sigsetjmp(_jmpbuf, 0) == 0) { // make sure we can see in the signal handler that we have crash protection // installed WatcherThread::watcher_thread()->set_crash_protection(this); @@ -285,6 +342,7 @@ return true; } // this happens when we siglongjmp() back + pthread_sigmask(SIG_SETMASK, &saved_sig_mask, NULL); WatcherThread::watcher_thread()->set_crash_protection(NULL); return false; } diff -r ccb4f2af2319 -r cefad50507d8 src/os/solaris/vm/os_solaris.cpp --- a/src/os/solaris/vm/os_solaris.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os/solaris/vm/os_solaris.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -3385,7 +3385,7 @@ return true; } -char* os::reserve_memory_special(size_t size, char* addr, bool exec) { +char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) { fatal("os::reserve_memory_special should not be called on Solaris."); return NULL; } @@ -5178,9 +5178,7 @@ if(Verbose && PrintMiscellaneous) tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); #endif -} - - os::large_page_init(); + } // Check minimum allowable stack size for thread creation and to initialize // the java system classes, including StackOverflowError - depends on page @@ -6601,3 +6599,9 @@ return strlen(buffer); } + +#ifndef PRODUCT +void TestReserveMemorySpecial_test() { + // No tests available for this platform +} +#endif diff -r ccb4f2af2319 -r cefad50507d8 src/os/windows/vm/decoder_windows.cpp --- a/src/os/windows/vm/decoder_windows.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os/windows/vm/decoder_windows.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -32,7 +32,11 @@ _can_decode_in_vm = false; _pfnSymGetSymFromAddr64 = NULL; _pfnUndecorateSymbolName = NULL; - +#ifdef AMD64 + _pfnStackWalk64 = NULL; + _pfnSymFunctionTableAccess64 = NULL; + _pfnSymGetModuleBase64 = NULL; +#endif _decoder_status = no_error; initialize(); } @@ -53,14 +57,24 @@ _pfnUndecorateSymbolName = (pfn_UndecorateSymbolName)::GetProcAddress(handle, "UnDecorateSymbolName"); if (_pfnSymSetOptions == NULL || _pfnSymInitialize == NULL || _pfnSymGetSymFromAddr64 == NULL) { - _pfnSymGetSymFromAddr64 = NULL; - _pfnUndecorateSymbolName = NULL; - ::FreeLibrary(handle); - _dbghelp_handle = NULL; + uninitialize(); _decoder_status = helper_func_error; return; } +#ifdef AMD64 + _pfnStackWalk64 = (pfn_StackWalk64)::GetProcAddress(handle, "StackWalk64"); + _pfnSymFunctionTableAccess64 = (pfn_SymFunctionTableAccess64)::GetProcAddress(handle, "SymFunctionTableAccess64"); + _pfnSymGetModuleBase64 = (pfn_SymGetModuleBase64)::GetProcAddress(handle, "SymGetModuleBase64"); + if (_pfnStackWalk64 == NULL || _pfnSymFunctionTableAccess64 == NULL || _pfnSymGetModuleBase64 == NULL) { + // We can't call StackWalk64 to walk the stack, but we are still + // able to decode the symbols. Let's limp on. + _pfnStackWalk64 = NULL; + _pfnSymFunctionTableAccess64 = NULL; + _pfnSymGetModuleBase64 = NULL; + } +#endif + HANDLE hProcess = ::GetCurrentProcess(); _pfnSymSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS | SYMOPT_EXACT_SYMBOLS); if (!_pfnSymInitialize(hProcess, NULL, TRUE)) { @@ -156,6 +170,11 @@ void WindowsDecoder::uninitialize() { _pfnSymGetSymFromAddr64 = NULL; _pfnUndecorateSymbolName = NULL; +#ifdef AMD64 + _pfnStackWalk64 = NULL; + _pfnSymFunctionTableAccess64 = NULL; + _pfnSymGetModuleBase64 = NULL; +#endif if (_dbghelp_handle != NULL) { ::FreeLibrary(_dbghelp_handle); } @@ -195,3 +214,65 @@ _pfnUndecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE); } +#ifdef AMD64 +BOOL WindowsDbgHelp::StackWalk64(DWORD MachineType, + HANDLE hProcess, + HANDLE hThread, + LPSTACKFRAME64 StackFrame, + PVOID ContextRecord, + PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine, + PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine, + PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine, + PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress) { + DecoderLocker locker; + WindowsDecoder* wd = (WindowsDecoder*)locker.decoder(); + + if (!wd->has_error() && wd->_pfnStackWalk64) { + return wd->_pfnStackWalk64(MachineType, + hProcess, + hThread, + StackFrame, + ContextRecord, + ReadMemoryRoutine, + FunctionTableAccessRoutine, + GetModuleBaseRoutine, + TranslateAddress); + } else { + return false; + } +} + +PVOID WindowsDbgHelp::SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase) { + DecoderLocker locker; + WindowsDecoder* wd = (WindowsDecoder*)locker.decoder(); + + if (!wd->has_error() && wd->_pfnSymFunctionTableAccess64) { + return wd->_pfnSymFunctionTableAccess64(hProcess, AddrBase); + } else { + return NULL; + } +} + +pfn_SymFunctionTableAccess64 WindowsDbgHelp::pfnSymFunctionTableAccess64() { + DecoderLocker locker; + WindowsDecoder* wd = (WindowsDecoder*)locker.decoder(); + + if (!wd->has_error()) { + return wd->_pfnSymFunctionTableAccess64; + } else { + return NULL; + } +} + +pfn_SymGetModuleBase64 WindowsDbgHelp::pfnSymGetModuleBase64() { + DecoderLocker locker; + WindowsDecoder* wd = (WindowsDecoder*)locker.decoder(); + + if (!wd->has_error()) { + return wd->_pfnSymGetModuleBase64; + } else { + return NULL; + } +} + +#endif // AMD64 diff -r ccb4f2af2319 -r cefad50507d8 src/os/windows/vm/decoder_windows.hpp --- a/src/os/windows/vm/decoder_windows.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os/windows/vm/decoder_windows.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -38,6 +38,20 @@ typedef BOOL (WINAPI *pfn_SymSetSearchPath)(HANDLE, PCTSTR); typedef BOOL (WINAPI *pfn_SymGetSearchPath)(HANDLE, PTSTR, int); +#ifdef AMD64 +typedef BOOL (WINAPI *pfn_StackWalk64)(DWORD MachineType, + HANDLE hProcess, + HANDLE hThread, + LPSTACKFRAME64 StackFrame, + PVOID ContextRecord, + PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine, + PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine, + PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine, + PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress); +typedef PVOID (WINAPI *pfn_SymFunctionTableAccess64)(HANDLE hProcess, DWORD64 AddrBase); +typedef DWORD64 (WINAPI *pfn_SymGetModuleBase64)(HANDLE hProcess, DWORD64 dwAddr); +#endif + class WindowsDecoder : public AbstractDecoder { public: @@ -61,7 +75,34 @@ bool _can_decode_in_vm; pfn_SymGetSymFromAddr64 _pfnSymGetSymFromAddr64; pfn_UndecorateSymbolName _pfnUndecorateSymbolName; +#ifdef AMD64 + pfn_StackWalk64 _pfnStackWalk64; + pfn_SymFunctionTableAccess64 _pfnSymFunctionTableAccess64; + pfn_SymGetModuleBase64 _pfnSymGetModuleBase64; + + friend class WindowsDbgHelp; +#endif }; +#ifdef AMD64 +// TODO: refactor and move the handling of dbghelp.dll outside of Decoder +class WindowsDbgHelp : public Decoder { +public: + static BOOL StackWalk64(DWORD MachineType, + HANDLE hProcess, + HANDLE hThread, + LPSTACKFRAME64 StackFrame, + PVOID ContextRecord, + PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine, + PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine, + PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine, + PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress); + static PVOID SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase); + + static pfn_SymFunctionTableAccess64 pfnSymFunctionTableAccess64(); + static pfn_SymGetModuleBase64 pfnSymGetModuleBase64(); +}; +#endif + #endif // OS_WINDOWS_VM_DECODER_WINDOWS_HPP diff -r ccb4f2af2319 -r cefad50507d8 src/os/windows/vm/os_windows.cpp --- a/src/os/windows/vm/os_windows.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os/windows/vm/os_windows.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -3173,7 +3173,12 @@ return true; } -char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) { +char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) { + assert(UseLargePages, "only for large pages"); + + if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { + return NULL; // Fallback to small pages. + } const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; @@ -3201,9 +3206,12 @@ return p_buf; } else { + if (TracePageSizes && Verbose) { + tty->print_cr("Reserving large pages in a single large chunk."); + } // normal policy just allocate it all at once DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; - char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot); + char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); if (res != NULL) { address pc = CALLER_PC; MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc); @@ -3929,8 +3937,6 @@ #endif } - os::large_page_init(); - // Setup Windows Exceptions // for debugging float code generation bugs @@ -5411,6 +5417,75 @@ return true; } +void* os::get_default_process_handle() { + return (void*)GetModuleHandle(NULL); +} + +// Builds a platform dependent Agent_OnLoad_ function name +// which is used to find statically linked in agents. +// Additionally for windows, takes into account __stdcall names. +// Parameters: +// sym_name: Symbol in library we are looking for +// lib_name: Name of library to look in, NULL for shared libs. +// is_absolute_path == true if lib_name is absolute path to agent +// such as "C:/a/b/L.dll" +// == false if only the base name of the library is passed in +// such as "L" +char* os::build_agent_function_name(const char *sym_name, const char *lib_name, + bool is_absolute_path) { + char *agent_entry_name; + size_t len; + size_t name_len; + size_t prefix_len = strlen(JNI_LIB_PREFIX); + size_t suffix_len = strlen(JNI_LIB_SUFFIX); + const char *start; + + if (lib_name != NULL) { + len = name_len = strlen(lib_name); + if (is_absolute_path) { + // Need to strip path, prefix and suffix + if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { + lib_name = ++start; + } else { + // Need to check for drive prefix + if ((start = strchr(lib_name, ':')) != NULL) { + lib_name = ++start; + } + } + if (len <= (prefix_len + suffix_len)) { + return NULL; + } + lib_name += prefix_len; + name_len = strlen(lib_name) - suffix_len; + } + } + len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; + agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); + if (agent_entry_name == NULL) { + return NULL; + } + if (lib_name != NULL) { + const char *p = strrchr(sym_name, '@'); + if (p != NULL && p != sym_name) { + // sym_name == _Agent_OnLoad@XX + strncpy(agent_entry_name, sym_name, (p - sym_name)); + agent_entry_name[(p-sym_name)] = '\0'; + // agent_entry_name == _Agent_OnLoad + strcat(agent_entry_name, "_"); + strncat(agent_entry_name, lib_name, name_len); + strcat(agent_entry_name, p); + // agent_entry_name == _Agent_OnLoad_lib_name@XX + } else { + strcpy(agent_entry_name, sym_name); + strcat(agent_entry_name, "_"); + strncat(agent_entry_name, lib_name, name_len); + } + } else { + strcpy(agent_entry_name, sym_name); + } + return agent_entry_name; +} + #else // Kernel32 API typedef BOOL (WINAPI* SwitchToThread_Fn)(void); @@ -5655,3 +5730,68 @@ } #endif + +#ifndef PRODUCT + +// test the code path in reserve_memory_special() that tries to allocate memory in a single +// contiguous memory block at a particular address. +// The test first tries to find a good approximate address to allocate at by using the same +// method to allocate some memory at any address. The test then tries to allocate memory in +// the vicinity (not directly after it to avoid possible by-chance use of that location) +// This is of course only some dodgy assumption, there is no guarantee that the vicinity of +// the previously allocated memory is available for allocation. The only actual failure +// that is reported is when the test tries to allocate at a particular location but gets a +// different valid one. A NULL return value at this point is not considered an error but may +// be legitimate. +// If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. +void TestReserveMemorySpecial_test() { + if (!UseLargePages) { + if (VerboseInternalVMTests) { + gclog_or_tty->print("Skipping test because large pages are disabled"); + } + return; + } + // save current value of globals + bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; + bool old_use_numa_interleaving = UseNUMAInterleaving; + + // set globals to make sure we hit the correct code path + UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; + + // do an allocation at an address selected by the OS to get a good one. + const size_t large_allocation_size = os::large_page_size() * 4; + char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); + if (result == NULL) { + if (VerboseInternalVMTests) { + gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.", + large_allocation_size); + } + } else { + os::release_memory_special(result, large_allocation_size); + + // allocate another page within the recently allocated memory area which seems to be a good location. At least + // we managed to get it once. + const size_t expected_allocation_size = os::large_page_size(); + char* expected_location = result + os::large_page_size(); + char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); + if (actual_location == NULL) { + if (VerboseInternalVMTests) { + gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.", + expected_location, large_allocation_size); + } + } else { + // release memory + os::release_memory_special(actual_location, expected_allocation_size); + // only now check, after releasing any memory to avoid any leaks. + assert(actual_location == expected_location, + err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead", + expected_location, expected_allocation_size, actual_location)); + } + } + + // restore globals + UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; + UseNUMAInterleaving = old_use_numa_interleaving; +} +#endif // PRODUCT + diff -r ccb4f2af2319 -r cefad50507d8 src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp --- a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -715,6 +715,7 @@ err.report_and_die(); ShouldNotReachHere(); + return false; } // From solaris_i486.s ported to bsd_i486.s diff -r ccb4f2af2319 -r cefad50507d8 src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp --- a/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -66,6 +66,7 @@ frame os::get_sender_for_C_frame(frame* fr) { ShouldNotCallThis(); + return frame(); } frame os::current_frame() { @@ -103,16 +104,19 @@ address os::Bsd::ucontext_get_pc(ucontext_t* uc) { ShouldNotCallThis(); + return NULL; } ExtendedPC os::fetch_frame_from_context(void* ucVoid, intptr_t** ret_sp, intptr_t** ret_fp) { ShouldNotCallThis(); + return ExtendedPC(); } frame os::fetch_frame_from_context(void* ucVoid) { ShouldNotCallThis(); + return frame(); } extern "C" JNIEXPORT int @@ -240,6 +244,7 @@ sprintf(buf, fmt, sig, info->si_addr); fatal(buf); + return false; } void os::Bsd::init_thread_fpu_state(void) { @@ -373,17 +378,7 @@ extern "C" { int SpinPause() { - } - - int SafeFetch32(int *adr, int errValue) { - int value = errValue; - value = *adr; - return value; - } - intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) { - intptr_t value = errValue; - value = *adr; - return value; + return 1; } void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { diff -r ccb4f2af2319 -r cefad50507d8 src/os_cpu/bsd_zero/vm/thread_bsd_zero.hpp --- a/src/os_cpu/bsd_zero/vm/thread_bsd_zero.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os_cpu/bsd_zero/vm/thread_bsd_zero.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -110,6 +110,7 @@ void* ucontext, bool isInJava) { ShouldNotCallThis(); + return false; } // These routines are only used on cpu architectures that diff -r ccb4f2af2319 -r cefad50507d8 src/os_cpu/linux_x86/vm/os_linux_x86.cpp --- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -876,3 +876,46 @@ #endif } #endif + + +/* + * IA32 only: execute code at a high address in case buggy NX emulation is present. I.e. avoid CS limit + * updates (JDK-8023956). + */ +void os::workaround_expand_exec_shield_cs_limit() { +#if defined(IA32) + size_t page_size = os::vm_page_size(); + /* + * Take the highest VA the OS will give us and exec + * + * Although using -(pagesz) as mmap hint works on newer kernel as you would + * think, older variants affected by this work-around don't (search forward only). + * + * On the affected distributions, we understand the memory layout to be: + * + * TASK_LIMIT= 3G, main stack base close to TASK_LIMT. + * + * A few pages south main stack will do it. + * + * If we are embedded in an app other than launcher (initial != main stack), + * we don't have much control or understanding of the address space, just let it slide. + */ + char* hint = (char*) (Linux::initial_thread_stack_bottom() - + ((StackYellowPages + StackRedPages + 1) * page_size)); + char* codebuf = os::reserve_memory(page_size, hint); + if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) { + return; // No matter, we tried, best effort. + } + if (PrintMiscellaneous && (Verbose || WizardMode)) { + tty->print_cr("[CS limit NX emulation work-around, exec code at: %p]", codebuf); + } + + // Some code to exec: the 'ret' instruction + codebuf[0] = 0xC3; + + // Call the code in the codebuf + __asm__ volatile("call *%0" : : "r"(codebuf)); + + // keep the page mapped so CS limit isn't reduced. +#endif +} diff -r ccb4f2af2319 -r cefad50507d8 src/os_cpu/linux_x86/vm/os_linux_x86.hpp --- a/src/os_cpu/linux_x86/vm/os_linux_x86.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os_cpu/linux_x86/vm/os_linux_x86.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -36,4 +36,17 @@ // Note: Currently only used in 64 bit Windows implementations static bool register_code_area(char *low, char *high) { return true; } + /* + * Work-around for broken NX emulation using CS limit, Red Hat patch "Exec-Shield" + * (IA32 only). + * + * Map and execute at a high VA to prevent CS lazy updates race with SMP MM + * invalidation.Further code generation by the JVM will no longer cause CS limit + * updates. + * + * Affects IA32: RHEL 5 & 6, Ubuntu 10.04 (LTS), 10.10, 11.04, 11.10, 12.04. + * @see JDK-8023956 + */ + static void workaround_expand_exec_shield_cs_limit(); + #endif // OS_CPU_LINUX_X86_VM_OS_LINUX_X86_HPP diff -r ccb4f2af2319 -r cefad50507d8 src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp --- a/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -35,7 +35,9 @@ // Used on 64 bit platforms for UseCompressedOops base address #ifdef _LP64 -define_pd_global(uintx, HeapBaseMinAddress, CONST64(4)*G); +// use 6G as default base address because by default the OS maps the application +// to 4G on Solaris-Sparc. This leaves at least 2G for the native heap. +define_pd_global(uintx, HeapBaseMinAddress, CONST64(6)*G); #else define_pd_global(uintx, HeapBaseMinAddress, 2*G); #endif diff -r ccb4f2af2319 -r cefad50507d8 src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp --- a/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -44,6 +44,6 @@ define_pd_global(intx, CompilerThreadStackSize, 0); // Used on 64 bit platforms for UseCompressedOops base address -define_pd_global(uintx,HeapBaseMinAddress, 256*M); +define_pd_global(uintx,HeapBaseMinAddress, 2*G); #endif // OS_CPU_SOLARIS_X86_VM_GLOBALS_SOLARIS_X86_HPP diff -r ccb4f2af2319 -r cefad50507d8 src/os_cpu/windows_x86/vm/os_windows_x86.cpp --- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -29,6 +29,7 @@ #include "classfile/vmSymbols.hpp" #include "code/icBuffer.hpp" #include "code/vtableStubs.hpp" +#include "decoder_windows.hpp" #include "interpreter/interpreter.hpp" #include "jvm_windows.h" #include "memory/allocation.inline.hpp" @@ -327,6 +328,94 @@ cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap; +#ifdef AMD64 +/* + * Windows/x64 does not use stack frames the way expected by Java: + * [1] in most cases, there is no frame pointer. All locals are addressed via RSP + * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may + * not be RBP. + * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx + * + * So it's not possible to print the native stack using the + * while (...) {... fr = os::get_sender_for_C_frame(&fr); } + * loop in vmError.cpp. We need to roll our own loop. + */ +bool os::platform_print_native_stack(outputStream* st, void* context, + char *buf, int buf_size) +{ + CONTEXT ctx; + if (context != NULL) { + memcpy(&ctx, context, sizeof(ctx)); + } else { + RtlCaptureContext(&ctx); + } + + st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)"); + + STACKFRAME stk; + memset(&stk, 0, sizeof(stk)); + stk.AddrStack.Offset = ctx.Rsp; + stk.AddrStack.Mode = AddrModeFlat; + stk.AddrFrame.Offset = ctx.Rbp; + stk.AddrFrame.Mode = AddrModeFlat; + stk.AddrPC.Offset = ctx.Rip; + stk.AddrPC.Mode = AddrModeFlat; + + int count = 0; + address lastpc = 0; + while (count++ < StackPrintLimit) { + intptr_t* sp = (intptr_t*)stk.AddrStack.Offset; + intptr_t* fp = (intptr_t*)stk.AddrFrame.Offset; // NOT necessarily the same as ctx.Rbp! + address pc = (address)stk.AddrPC.Offset; + + if (pc != NULL && sp != NULL && fp != NULL) { + if (count == 2 && lastpc == pc) { + // Skip it -- StackWalk64() may return the same PC + // (but different SP) on the first try. + } else { + // Don't try to create a frame(sp, fp, pc) -- on WinX64, stk.AddrFrame + // may not contain what Java expects, and may cause the frame() constructor + // to crash. Let's just print out the symbolic address. + frame::print_C_frame(st, buf, buf_size, pc); + st->cr(); + } + lastpc = pc; + } else { + break; + } + + PVOID p = WindowsDbgHelp::SymFunctionTableAccess64(GetCurrentProcess(), stk.AddrPC.Offset); + if (!p) { + // StackWalk64() can't handle this PC. Calling StackWalk64 again may cause crash. + break; + } + + BOOL result = WindowsDbgHelp::StackWalk64( + IMAGE_FILE_MACHINE_AMD64, // __in DWORD MachineType, + GetCurrentProcess(), // __in HANDLE hProcess, + GetCurrentThread(), // __in HANDLE hThread, + &stk, // __inout LP STACKFRAME64 StackFrame, + &ctx, // __inout PVOID ContextRecord, + NULL, // __in_opt PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine, + WindowsDbgHelp::pfnSymFunctionTableAccess64(), + // __in_opt PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine, + WindowsDbgHelp::pfnSymGetModuleBase64(), + // __in_opt PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine, + NULL); // __in_opt PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress + + if (!result) { + break; + } + } + if (count > StackPrintLimit) { + st->print_cr("......"); + } + st->cr(); + + return true; +} +#endif // AMD64 + ExtendedPC os::fetch_frame_from_context(void* ucVoid, intptr_t** ret_sp, intptr_t** ret_fp) { @@ -401,6 +490,9 @@ StubRoutines::x86::get_previous_fp_entry()); if (func == NULL) return frame(); intptr_t* fp = (*func)(); + if (fp == NULL) { + return frame(); + } #else intptr_t* fp = _get_previous_fp(); #endif // AMD64 diff -r ccb4f2af2319 -r cefad50507d8 src/os_cpu/windows_x86/vm/os_windows_x86.hpp --- a/src/os_cpu/windows_x86/vm/os_windows_x86.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/os_cpu/windows_x86/vm/os_windows_x86.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -62,4 +62,10 @@ static bool register_code_area(char *low, char *high); +#ifdef AMD64 +#define PLATFORM_PRINT_NATIVE_STACK 1 +static bool platform_print_native_stack(outputStream* st, void* context, + char *buf, int buf_size); +#endif + #endif // OS_CPU_WINDOWS_X86_VM_OS_WINDOWS_X86_HPP diff -r ccb4f2af2319 -r cefad50507d8 src/share/tools/LogCompilation/README --- a/src/share/tools/LogCompilation/README Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/tools/LogCompilation/README Fri Oct 11 10:38:03 2013 +0200 @@ -4,14 +4,14 @@ requires a 1.5 JDK to build and simply typing make should build it. It produces a jar file, logc.jar, that can be run on the -hotspot.log from LogCompilation output like this: +HotSpot log (by default, hotspot_pid{pid}.log) from LogCompilation output like this: - java -jar logc.jar hotspot.log + java -jar logc.jar hotspot_pid1234.log This will produce something like the normal PrintCompilation output. Adding the -i option with also report inlining like PrintInlining. -More information about the LogCompilation output can be found at +More information about the LogCompilation output can be found at https://wikis.oracle.com/display/HotSpotInternals/LogCompilation+overview https://wikis.oracle.com/display/HotSpotInternals/PrintCompilation diff -r ccb4f2af2319 -r cefad50507d8 src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java --- a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java Fri Oct 11 10:38:03 2013 +0200 @@ -106,10 +106,12 @@ " (" + getMethod().getBytes() + " bytes) " + getReason()); } } + stream.printf(" (end time: %6.4f", getTimeStamp()); if (getEndNodes() > 0) { - stream.printf(" (end time: %6.4f nodes: %d live: %d)", getTimeStamp(), getEndNodes(), getEndLiveNodes()); + stream.printf(" nodes: %d live: %d", getEndNodes(), getEndLiveNodes()); } - stream.println(""); + stream.println(")"); + if (getReceiver() != null) { emit(stream, indent + 4); // stream.println("type profile " + method.holder + " -> " + receiver + " (" + diff -r ccb4f2af2319 -r cefad50507d8 src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java --- a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java Fri Oct 11 10:38:03 2013 +0200 @@ -207,7 +207,12 @@ } String search(Attributes attr, String name) { - return search(attr, name, null); + String result = attr.getValue(name); + if (result != null) { + return result; + } else { + throw new InternalError("can't find " + name); + } } String search(Attributes attr, String name, String defaultValue) { @@ -215,13 +220,7 @@ if (result != null) { return result; } - if (defaultValue != null) { - return defaultValue; - } - for (int i = 0; i < attr.getLength(); i++) { - System.out.println(attr.getQName(i) + " " + attr.getValue(attr.getQName(i))); - } - throw new InternalError("can't find " + name); + return defaultValue; } int indent = 0; @@ -268,17 +267,18 @@ Phase p = new Phase(search(atts, "name"), Double.parseDouble(search(atts, "stamp")), Integer.parseInt(search(atts, "nodes", "0")), - Integer.parseInt(search(atts, "live"))); + Integer.parseInt(search(atts, "live", "0"))); phaseStack.push(p); } else if (qname.equals("phase_done")) { Phase p = phaseStack.pop(); - if (! p.getId().equals(search(atts, "name"))) { + String phaseName = search(atts, "name", null); + if (phaseName != null && !p.getId().equals(phaseName)) { System.out.println("phase: " + p.getId()); throw new InternalError("phase name mismatch"); } p.setEnd(Double.parseDouble(search(atts, "stamp"))); p.setEndNodes(Integer.parseInt(search(atts, "nodes", "0"))); - p.setEndLiveNodes(Integer.parseInt(search(atts, "live"))); + p.setEndLiveNodes(Integer.parseInt(search(atts, "live", "0"))); compile.getPhases().add(p); } else if (qname.equals("task")) { compile = new Compilation(Integer.parseInt(search(atts, "compile_id", "-1"))); @@ -413,8 +413,8 @@ } } else if (qname.equals("parse_done")) { CallSite call = scopes.pop(); - call.setEndNodes(Integer.parseInt(search(atts, "nodes", "1"))); - call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "1"))); + call.setEndNodes(Integer.parseInt(search(atts, "nodes", "0"))); + call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "0"))); call.setTimeStamp(Double.parseDouble(search(atts, "stamp"))); scopes.push(call); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/adlc/adlparse.cpp --- a/src/share/vm/adlc/adlparse.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/adlc/adlparse.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -3395,12 +3395,16 @@ char *greater_equal; char *less_equal; char *greater; + char *overflow; + char *no_overflow; const char *equal_format = "eq"; const char *not_equal_format = "ne"; const char *less_format = "lt"; const char *greater_equal_format = "ge"; const char *less_equal_format = "le"; const char *greater_format = "gt"; + const char *overflow_format = "o"; + const char *no_overflow_format = "no"; if (_curchar != '%') { parse_err(SYNERR, "Missing '%%{' for 'cond_interface' block.\n"); @@ -3437,6 +3441,12 @@ else if ( strcmp(field,"greater") == 0 ) { greater = interface_field_parse(&greater_format); } + else if ( strcmp(field,"overflow") == 0 ) { + overflow = interface_field_parse(&overflow_format); + } + else if ( strcmp(field,"no_overflow") == 0 ) { + no_overflow = interface_field_parse(&no_overflow_format); + } else { parse_err(SYNERR, "Expected keyword, base|index|scale|disp, or '%%}' ending interface.\n"); return NULL; @@ -3455,7 +3465,9 @@ less, less_format, greater_equal, greater_equal_format, less_equal, less_equal_format, - greater, greater_format); + greater, greater_format, + overflow, overflow_format, + no_overflow, no_overflow_format); return inter; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/adlc/archDesc.cpp --- a/src/share/vm/adlc/archDesc.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/adlc/archDesc.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1192,6 +1192,8 @@ || strcmp(idealName,"CmpF") == 0 || strcmp(idealName,"FastLock") == 0 || strcmp(idealName,"FastUnlock") == 0 + || strcmp(idealName,"AddExactI") == 0 + || strcmp(idealName,"FlagsProj") == 0 || strcmp(idealName,"Bool") == 0 || strcmp(idealName,"Binary") == 0 ) { // Removed ConI from the must_clone list. CPUs that cannot use diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/adlc/arena.cpp --- a/src/share/vm/adlc/arena.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/adlc/arena.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,7 +24,7 @@ #include "adlc.hpp" -void* Chunk::operator new(size_t requested_size, size_t length) { +void* Chunk::operator new(size_t requested_size, size_t length) throw() { return CHeapObj::operator new(requested_size + length); } @@ -163,7 +163,7 @@ //----------------------------------------------------------------------------- // CHeapObj -void* CHeapObj::operator new(size_t size){ +void* CHeapObj::operator new(size_t size) throw() { return (void *) malloc(size); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/adlc/arena.hpp --- a/src/share/vm/adlc/arena.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/adlc/arena.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,7 +42,7 @@ class CHeapObj { public: - void* operator new(size_t size); + void* operator new(size_t size) throw(); void operator delete(void* p); void* new_array(size_t size); }; @@ -53,7 +53,7 @@ class ValueObj { public: - void* operator new(size_t size); + void* operator new(size_t size) throw(); void operator delete(void* p); }; @@ -61,7 +61,7 @@ class AllStatic { public: - void* operator new(size_t size); + void* operator new(size_t size) throw(); void operator delete(void* p); }; @@ -70,7 +70,7 @@ // Linked list of raw memory chunks class Chunk: public CHeapObj { public: - void* operator new(size_t size, size_t length); + void* operator new(size_t size, size_t length) throw(); void operator delete(void* p, size_t length); Chunk(size_t length); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/adlc/formssel.cpp --- a/src/share/vm/adlc/formssel.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/adlc/formssel.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -2757,14 +2757,18 @@ const char* less, const char* less_format, const char* greater_equal, const char* greater_equal_format, const char* less_equal, const char* less_equal_format, - const char* greater, const char* greater_format) + const char* greater, const char* greater_format, + const char* overflow, const char* overflow_format, + const char* no_overflow, const char* no_overflow_format) : Interface("COND_INTER"), _equal(equal), _equal_format(equal_format), _not_equal(not_equal), _not_equal_format(not_equal_format), _less(less), _less_format(less_format), _greater_equal(greater_equal), _greater_equal_format(greater_equal_format), _less_equal(less_equal), _less_equal_format(less_equal_format), - _greater(greater), _greater_format(greater_format) { + _greater(greater), _greater_format(greater_format), + _overflow(overflow), _overflow_format(overflow_format), + _no_overflow(no_overflow), _no_overflow_format(no_overflow_format) { } CondInterface::~CondInterface() { // not owner of any character arrays @@ -2777,12 +2781,14 @@ // Write info to output files void CondInterface::output(FILE *fp) { Interface::output(fp); - if ( _equal != NULL ) fprintf(fp," equal == %s\n", _equal); - if ( _not_equal != NULL ) fprintf(fp," not_equal == %s\n", _not_equal); - if ( _less != NULL ) fprintf(fp," less == %s\n", _less); - if ( _greater_equal != NULL ) fprintf(fp," greater_equal == %s\n", _greater_equal); - if ( _less_equal != NULL ) fprintf(fp," less_equal == %s\n", _less_equal); - if ( _greater != NULL ) fprintf(fp," greater == %s\n", _greater); + if ( _equal != NULL ) fprintf(fp," equal == %s\n", _equal); + if ( _not_equal != NULL ) fprintf(fp," not_equal == %s\n", _not_equal); + if ( _less != NULL ) fprintf(fp," less == %s\n", _less); + if ( _greater_equal != NULL ) fprintf(fp," greater_equal == %s\n", _greater_equal); + if ( _less_equal != NULL ) fprintf(fp," less_equal == %s\n", _less_equal); + if ( _greater != NULL ) fprintf(fp," greater == %s\n", _greater); + if ( _overflow != NULL ) fprintf(fp," overflow == %s\n", _overflow); + if ( _no_overflow != NULL ) fprintf(fp," no_overflow == %s\n", _no_overflow); // fprintf(fp,"\n"); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/adlc/formssel.hpp --- a/src/share/vm/adlc/formssel.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/adlc/formssel.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -798,12 +798,16 @@ const char *_greater_equal; const char *_less_equal; const char *_greater; + const char *_overflow; + const char *_no_overflow; const char *_equal_format; const char *_not_equal_format; const char *_less_format; const char *_greater_equal_format; const char *_less_equal_format; const char *_greater_format; + const char *_overflow_format; + const char *_no_overflow_format; // Public Methods CondInterface(const char* equal, const char* equal_format, @@ -811,7 +815,9 @@ const char* less, const char* less_format, const char* greater_equal, const char* greater_equal_format, const char* less_equal, const char* less_equal_format, - const char* greater, const char* greater_format); + const char* greater, const char* greater_format, + const char* overflow, const char* overflow_format, + const char* no_overflow, const char* no_overflow_format); ~CondInterface(); void dump(); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/adlc/main.cpp --- a/src/share/vm/adlc/main.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/adlc/main.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -485,7 +485,7 @@ // VS2005 has its own definition, identical to this one. #if !defined(_WIN32) || defined(_WIN64) || _MSC_VER < 1400 -void *operator new( size_t size, int, const char *, int ) { +void *operator new( size_t size, int, const char *, int ) throw() { return ::operator new( size ); } #endif diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/adlc/output_c.cpp --- a/src/share/vm/adlc/output_c.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/adlc/output_c.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1095,7 +1095,7 @@ fprintf(fp, " // Identify previous instruction if inside this block\n"); fprintf(fp, " if( "); print_block_index(fp, inst_position); - fprintf(fp, " > 0 ) {\n Node *n = block->_nodes.at("); + fprintf(fp, " > 0 ) {\n Node *n = block->get_node("); print_block_index(fp, inst_position); fprintf(fp, ");\n inst%d = (n->is_Mach()) ? ", inst_position); fprintf(fp, "n->as_Mach() : NULL;\n }\n"); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/adlc/output_h.cpp --- a/src/share/vm/adlc/output_h.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/adlc/output_h.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -388,6 +388,8 @@ fprintf(fp, " else if( _c%d == BoolTest::ge ) st->print(\"%s\");\n",i,cond->_greater_equal_format); fprintf(fp, " else if( _c%d == BoolTest::lt ) st->print(\"%s\");\n",i,cond->_less_format); fprintf(fp, " else if( _c%d == BoolTest::gt ) st->print(\"%s\");\n",i,cond->_greater_format); + fprintf(fp, " else if( _c%d == BoolTest::overflow ) st->print(\"%s\");\n",i,cond->_overflow_format); + fprintf(fp, " else if( _c%d == BoolTest::no_overflow ) st->print(\"%s\");\n",i,cond->_no_overflow_format); } // Output code that dumps constant values, increment "i" if type is constant @@ -1208,6 +1210,8 @@ fprintf(fp," case BoolTest::ne : return not_equal();\n"); fprintf(fp," case BoolTest::le : return less_equal();\n"); fprintf(fp," case BoolTest::ge : return greater_equal();\n"); + fprintf(fp," case BoolTest::overflow : return overflow();\n"); + fprintf(fp," case BoolTest::no_overflow: return no_overflow();\n"); fprintf(fp," default : ShouldNotReachHere(); return 0;\n"); fprintf(fp," }\n"); fprintf(fp," };\n"); @@ -1373,6 +1377,14 @@ if( greater != NULL ) { define_oper_interface(fp, *oper, _globalNames, "greater", greater); } + const char *overflow = cInterface->_overflow; + if( overflow != NULL ) { + define_oper_interface(fp, *oper, _globalNames, "overflow", overflow); + } + const char *no_overflow = cInterface->_no_overflow; + if( no_overflow != NULL ) { + define_oper_interface(fp, *oper, _globalNames, "no_overflow", no_overflow); + } } // end Conditional Interface // Check if it is a Constant Interface else if (oper->_interface->is_ConstInterface() != NULL ) { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/asm/codeBuffer.hpp --- a/src/share/vm/asm/codeBuffer.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/asm/codeBuffer.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -296,8 +296,8 @@ // CodeBuffers must be allocated on the stack except for a single // special case during expansion which is handled internally. This // is done to guarantee proper cleanup of resources. - void* operator new(size_t size) { return ResourceObj::operator new(size); } - void operator delete(void* p) { ShouldNotCallThis(); } + void* operator new(size_t size) throw() { return ResourceObj::operator new(size); } + void operator delete(void* p) { ShouldNotCallThis(); } public: typedef int csize_t; // code size type; would be size_t except for history diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/c1/c1_CodeStubs.hpp --- a/src/share/vm/c1/c1_CodeStubs.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/c1/c1_CodeStubs.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -364,7 +364,8 @@ enum PatchID { access_field_id, load_klass_id, - load_mirror_id + load_mirror_id, + load_appendix_id }; enum constants { patch_info_size = 3 @@ -417,7 +418,7 @@ } NativeMovRegMem* n_move = nativeMovRegMem_at(pc_start()); n_move->set_offset(field_offset); - } else if (_id == load_klass_id || _id == load_mirror_id) { + } else if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { assert(_obj != noreg, "must have register object for load_klass/load_mirror"); #ifdef ASSERT // verify that we're pointing at a NativeMovConstReg diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/c1/c1_Compilation.cpp --- a/src/share/vm/c1/c1_Compilation.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/c1/c1_Compilation.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -74,16 +74,19 @@ private: JavaThread* _thread; CompileLog* _log; + TimerName _timer; public: PhaseTraceTime(TimerName timer) - : TraceTime("", &timers[timer], CITime || CITimeEach, Verbose), _log(NULL) { + : TraceTime("", &timers[timer], CITime || CITimeEach, Verbose), + _log(NULL), _timer(timer) + { if (Compilation::current() != NULL) { _log = Compilation::current()->log(); } if (_log != NULL) { - _log->begin_head("phase name='%s'", timer_name[timer]); + _log->begin_head("phase name='%s'", timer_name[_timer]); _log->stamp(); _log->end_head(); } @@ -91,7 +94,7 @@ ~PhaseTraceTime() { if (_log != NULL) - _log->done("phase"); + _log->done("phase name='%s'", timer_name[_timer]); } }; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/c1/c1_Compilation.hpp --- a/src/share/vm/c1/c1_Compilation.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/c1/c1_Compilation.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -279,8 +279,8 @@ // Base class for objects allocated by the compiler in the compilation arena class CompilationResourceObj ALLOCATION_SUPER_CLASS_SPEC { public: - void* operator new(size_t size) { return Compilation::current()->arena()->Amalloc(size); } - void* operator new(size_t size, Arena* arena) { + void* operator new(size_t size) throw() { return Compilation::current()->arena()->Amalloc(size); } + void* operator new(size_t size, Arena* arena) throw() { return arena->Amalloc(size); } void operator delete(void* p) {} // nothing to do diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/c1/c1_GraphBuilder.cpp --- a/src/share/vm/c1/c1_GraphBuilder.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1583,7 +1583,7 @@ ObjectType* obj_type = obj->type()->as_ObjectType(); if (obj_type->is_constant() && !PatchALot) { ciObject* const_oop = obj_type->constant_value(); - if (!const_oop->is_null_object()) { + if (!const_oop->is_null_object() && const_oop->is_loaded()) { if (field->is_constant()) { ciConstant field_val = field->constant_value_of(const_oop); BasicType field_type = field_val.basic_type(); @@ -1667,9 +1667,8 @@ const Bytecodes::Code bc_raw = stream()->cur_bc_raw(); assert(declared_signature != NULL, "cannot be null"); - // FIXME bail out for now - if (Bytecodes::has_optional_appendix(bc_raw) && !will_link) { - BAILOUT("unlinked call site (FIXME needs patching or recompile support)"); + if (!C1PatchInvokeDynamic && Bytecodes::has_optional_appendix(bc_raw) && !will_link) { + BAILOUT("unlinked call site (C1PatchInvokeDynamic is off)"); } // we have to make sure the argument size (incl. the receiver) @@ -1713,10 +1712,23 @@ code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial; break; } + } else { + if (bc_raw == Bytecodes::_invokehandle) { + assert(!will_link, "should come here only for unlinked call"); + code = Bytecodes::_invokespecial; + } } // Push appendix argument (MethodType, CallSite, etc.), if one. - if (stream()->has_appendix()) { + bool patch_for_appendix = false; + int patching_appendix_arg = 0; + if (C1PatchInvokeDynamic && + (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot))) { + Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before())); + apush(arg); + patch_for_appendix = true; + patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1; + } else if (stream()->has_appendix()) { ciObject* appendix = stream()->get_appendix(); Value arg = append(new Constant(new ObjectConstant(appendix))); apush(arg); @@ -1732,7 +1744,8 @@ if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() && !(// %%% FIXME: Are both of these relevant? target->is_method_handle_intrinsic() || - target->is_compiled_lambda_form())) { + target->is_compiled_lambda_form()) && + !patch_for_appendix) { Value receiver = NULL; ciInstanceKlass* receiver_klass = NULL; bool type_is_exact = false; @@ -1850,7 +1863,8 @@ // check if we could do inlining if (!PatchALot && Inline && klass->is_loaded() && (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized()) - && target->is_loaded()) { + && target->is_loaded() + && !patch_for_appendix) { // callee is known => check if we have static binding assert(target->is_loaded(), "callee must be known"); if (code == Bytecodes::_invokestatic || @@ -1901,7 +1915,7 @@ code == Bytecodes::_invokespecial || code == Bytecodes::_invokevirtual || code == Bytecodes::_invokeinterface; - Values* args = state()->pop_arguments(target->arg_size_no_receiver()); + Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg); Value recv = has_receiver ? apop() : NULL; int vtable_index = Method::invalid_vtable_index; @@ -4207,7 +4221,9 @@ } } - if (!PrintInlining) return; + if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) { + return; + } CompileTask::print_inlining(callee, scope()->level(), bci(), msg); if (success && CIPrintMethodCodes) { callee->print_codes(); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/c1/c1_Instruction.hpp --- a/src/share/vm/c1/c1_Instruction.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/c1/c1_Instruction.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -323,7 +323,7 @@ } public: - void* operator new(size_t size) { + void* operator new(size_t size) throw() { Compilation* c = Compilation::current(); void* res = c->arena()->Amalloc(size); ((Instruction*)res)->_id = c->get_next_id(); @@ -1611,7 +1611,7 @@ friend class SuxAndWeightAdjuster; public: - void* operator new(size_t size) { + void* operator new(size_t size) throw() { Compilation* c = Compilation::current(); void* res = c->arena()->Amalloc(size); ((BlockBegin*)res)->_id = c->get_next_id(); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/c1/c1_LIR.hpp --- a/src/share/vm/c1/c1_LIR.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/c1/c1_LIR.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1211,8 +1211,6 @@ bool is_invokedynamic() const { return code() == lir_dynamic_call; } bool is_method_handle_invoke() const { return - is_invokedynamic() // An invokedynamic is always a MethodHandle call site. - || method()->is_compiled_lambda_form() // Java-generated adapter || method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/c1/c1_LIRAssembler.cpp --- a/src/share/vm/c1/c1_LIRAssembler.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -93,12 +93,23 @@ default: ShouldNotReachHere(); } + } else if (patch->id() == PatchingStub::load_appendix_id) { + Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci()); + assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution"); } else { ShouldNotReachHere(); } #endif } +PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) { + IRScope* scope = info->scope(); + Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci()); + if (Bytecodes::has_optional_appendix(bc_raw)) { + return PatchingStub::load_appendix_id; + } + return PatchingStub::load_mirror_id; +} //--------------------------------------------------------------- diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/c1/c1_LIRAssembler.hpp --- a/src/share/vm/c1/c1_LIRAssembler.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -119,6 +119,8 @@ void comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op); + PatchingStub::PatchID patching_id(CodeEmitInfo* info); + public: LIR_Assembler(Compilation* c); ~LIR_Assembler(); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/c1/c1_Runtime1.cpp --- a/src/share/vm/c1/c1_Runtime1.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/c1/c1_Runtime1.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -709,10 +709,10 @@ Bytecodes::Code code = field_access.code(); // We must load class, initialize class and resolvethe field - FieldAccessInfo result; // initialize class if needed + fieldDescriptor result; // initialize class if needed constantPoolHandle constants(THREAD, caller->constants()); - LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK_NULL); - return result.klass()(); + LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL); + return result.field_holder(); } @@ -819,17 +819,18 @@ KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code + Handle appendix(THREAD, NULL); // oop needed by appendix_patching code bool load_klass_or_mirror_patch_id = (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id); if (stub_id == Runtime1::access_field_patching_id) { Bytecode_field field_access(caller_method, bci); - FieldAccessInfo result; // initialize class if needed + fieldDescriptor result; // initialize class if needed Bytecodes::Code code = field_access.code(); constantPoolHandle constants(THREAD, caller_method->constants()); - LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK); - patch_field_offset = result.field_offset(); + LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK); + patch_field_offset = result.offset(); // If we're patching a field which is volatile then at compile it // must not have been know to be volatile, so the generated code @@ -888,10 +889,32 @@ mirror = Handle(THREAD, m); } break; - default: Unimplemented(); + default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id"); } // convert to handle load_klass = KlassHandle(THREAD, k); + } else if (stub_id == load_appendix_patching_id) { + Bytecode_invoke bytecode(caller_method, bci); + Bytecodes::Code bc = bytecode.invoke_code(); + + CallInfo info; + constantPoolHandle pool(thread, caller_method->constants()); + int index = bytecode.index(); + LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK); + appendix = info.resolved_appendix(); + switch (bc) { + case Bytecodes::_invokehandle: { + int cache_index = ConstantPool::decode_cpcache_index(index, true); + assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index"); + pool->cache()->entry_at(cache_index)->set_method_handle(pool, info); + break; + } + case Bytecodes::_invokedynamic: { + pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info); + break; + } + default: fatal("unexpected bytecode for load_appendix_patching_id"); + } } else { ShouldNotReachHere(); } @@ -915,16 +938,6 @@ // Return to the now deoptimized frame. } - // If we are patching in a non-perm oop, make sure the nmethod - // is on the right list. - if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) { - MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag); - nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); - guarantee(nm != NULL, "only nmethods can contain non-perm oops"); - if (!nm->on_scavenge_root_list()) - CodeCache::add_scavenge_root_nmethod(nm); - } - // Now copy code back { @@ -1002,65 +1015,80 @@ n_copy->data() == (intptr_t)Universe::non_oop_word(), "illegal init value"); if (stub_id == Runtime1::load_klass_patching_id) { - assert(load_klass() != NULL, "klass not set"); - n_copy->set_data((intx) (load_klass())); + assert(load_klass() != NULL, "klass not set"); + n_copy->set_data((intx) (load_klass())); } else { assert(mirror() != NULL, "klass not set"); - n_copy->set_data((intx) (mirror())); + n_copy->set_data(cast_from_oop(mirror())); } if (TracePatching) { Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); } + } + } else if (stub_id == Runtime1::load_appendix_patching_id) { + NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); + assert(n_copy->data() == 0 || + n_copy->data() == (intptr_t)Universe::non_oop_word(), + "illegal init value"); + n_copy->set_data(cast_from_oop(appendix())); -#if defined(SPARC) || defined(PPC) - // Update the location in the nmethod with the proper - // metadata. When the code was generated, a NULL was stuffed - // in the metadata table and that table needs to be update to - // have the right value. On intel the value is kept - // directly in the instruction instead of in the metadata - // table, so set_data above effectively updated the value. - nmethod* nm = CodeCache::find_nmethod(instr_pc); - assert(nm != NULL, "invalid nmethod_pc"); - RelocIterator mds(nm, copy_buff, copy_buff + 1); - bool found = false; - while (mds.next() && !found) { - if (mds.type() == relocInfo::oop_type) { - assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id"); - oop_Relocation* r = mds.oop_reloc(); - oop* oop_adr = r->oop_addr(); - *oop_adr = mirror(); - r->fix_oop_relocation(); - found = true; - } else if (mds.type() == relocInfo::metadata_type) { - assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id"); - metadata_Relocation* r = mds.metadata_reloc(); - Metadata** metadata_adr = r->metadata_addr(); - *metadata_adr = load_klass(); - r->fix_metadata_relocation(); - found = true; - } - } - assert(found, "the metadata must exist!"); -#endif - + if (TracePatching) { + Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); } } else { ShouldNotReachHere(); } +#if defined(SPARC) || defined(PPC) + if (load_klass_or_mirror_patch_id || + stub_id == Runtime1::load_appendix_patching_id) { + // Update the location in the nmethod with the proper + // metadata. When the code was generated, a NULL was stuffed + // in the metadata table and that table needs to be update to + // have the right value. On intel the value is kept + // directly in the instruction instead of in the metadata + // table, so set_data above effectively updated the value. + nmethod* nm = CodeCache::find_nmethod(instr_pc); + assert(nm != NULL, "invalid nmethod_pc"); + RelocIterator mds(nm, copy_buff, copy_buff + 1); + bool found = false; + while (mds.next() && !found) { + if (mds.type() == relocInfo::oop_type) { + assert(stub_id == Runtime1::load_mirror_patching_id || + stub_id == Runtime1::load_appendix_patching_id, "wrong stub id"); + oop_Relocation* r = mds.oop_reloc(); + oop* oop_adr = r->oop_addr(); + *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix(); + r->fix_oop_relocation(); + found = true; + } else if (mds.type() == relocInfo::metadata_type) { + assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id"); + metadata_Relocation* r = mds.metadata_reloc(); + Metadata** metadata_adr = r->metadata_addr(); + *metadata_adr = load_klass(); + r->fix_metadata_relocation(); + found = true; + } + } + assert(found, "the metadata must exist!"); + } +#endif if (do_patch) { // replace instructions // first replace the tail, then the call #ifdef ARM - if(load_klass_or_mirror_patch_id && !VM_Version::supports_movw()) { + if((load_klass_or_mirror_patch_id || + stub_id == Runtime1::load_appendix_patching_id) && + !VM_Version::supports_movw()) { nmethod* nm = CodeCache::find_nmethod(instr_pc); address addr = NULL; assert(nm != NULL, "invalid nmethod_pc"); RelocIterator mds(nm, copy_buff, copy_buff + 1); while (mds.next()) { if (mds.type() == relocInfo::oop_type) { - assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id"); + assert(stub_id == Runtime1::load_mirror_patching_id || + stub_id == Runtime1::load_appendix_patching_id, "wrong stub id"); oop_Relocation* r = mds.oop_reloc(); addr = (address)r->oop_addr(); break; @@ -1087,7 +1115,8 @@ ICache::invalidate_range(instr_pc, *byte_count); NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff); - if (load_klass_or_mirror_patch_id) { + if (load_klass_or_mirror_patch_id || + stub_id == Runtime1::load_appendix_patching_id) { relocInfo::relocType rtype = (stub_id == Runtime1::load_klass_patching_id) ? relocInfo::metadata_type : @@ -1125,6 +1154,22 @@ } } } + + // If we are patching in a non-perm oop, make sure the nmethod + // is on the right list. + if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) || + (appendix.not_null() && appendix->is_scavengable()))) { + MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag); + nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); + guarantee(nm != NULL, "only nmethods can contain non-perm oops"); + if (!nm->on_scavenge_root_list()) { + CodeCache::add_scavenge_root_nmethod(nm); + } + + // Since we've patched some oops in the nmethod, + // (re)register it with the heap. + Universe::heap()->register_nmethod(nm); + } JRT_END // @@ -1174,6 +1219,24 @@ return caller_is_deopted(); } +int Runtime1::move_appendix_patching(JavaThread* thread) { +// +// NOTE: we are still in Java +// + Thread* THREAD = thread; + debug_only(NoHandleMark nhm;) + { + // Enter VM mode + + ResetNoHandleMark rnhm; + patch_code(thread, load_appendix_patching_id); + } + // Back in JAVA, use no oops DON'T safepoint + + // Return true if calling code is deoptimized + + return caller_is_deopted(); +} // // Entry point for compiled code. We want to patch a nmethod. // We don't do a normal VM transition here because we want to diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/c1/c1_Runtime1.hpp --- a/src/share/vm/c1/c1_Runtime1.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/c1/c1_Runtime1.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -67,6 +67,7 @@ stub(access_field_patching) \ stub(load_klass_patching) \ stub(load_mirror_patching) \ + stub(load_appendix_patching) \ stub(g1_pre_barrier_slow) \ stub(g1_post_barrier_slow) \ stub(fpu2long_stub) \ @@ -160,6 +161,7 @@ static int access_field_patching(JavaThread* thread); static int move_klass_patching(JavaThread* thread); static int move_mirror_patching(JavaThread* thread); + static int move_appendix_patching(JavaThread* thread); static void patch_code(JavaThread* thread, StubID stub_id); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/c1/c1_globals.cpp --- a/src/share/vm/c1/c1_globals.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/c1/c1_globals.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -25,4 +25,4 @@ #include "precompiled.hpp" #include "c1/c1_globals.hpp" -C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_NOTPRODUCT_FLAG) +C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG) diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/c1/c1_globals.hpp --- a/src/share/vm/c1/c1_globals.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/c1/c1_globals.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -54,7 +54,7 @@ // // Defines all global flags used by the client compiler. // -#define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct) \ +#define C1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \ \ /* Printing */ \ notproduct(bool, PrintC1Statistics, false, \ @@ -333,15 +333,19 @@ "Use CHA and exact type results at call sites when updating MDOs")\ \ product(bool, C1UpdateMethodData, trueInTiered, \ - "Update MethodData*s in Tier1-generated code") \ + "Update MethodData*s in Tier1-generated code") \ \ develop(bool, PrintCFGToFile, false, \ "print control flow graph to a separate file during compilation") \ \ + diagnostic(bool, C1PatchInvokeDynamic, true, \ + "Patch invokedynamic appendix not known at compile time") \ + \ + \ // Read default values for c1 globals -C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_NOTPRODUCT_FLAG) +C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG) #endif // SHARE_VM_C1_C1_GLOBALS_HPP diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/ci/ciArray.cpp --- a/src/share/vm/ci/ciArray.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/ci/ciArray.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -24,13 +24,92 @@ #include "precompiled.hpp" #include "ci/ciArray.hpp" +#include "ci/ciArrayKlass.hpp" +#include "ci/ciConstant.hpp" #include "ci/ciKlass.hpp" #include "ci/ciUtilities.hpp" +#include "oops/objArrayOop.hpp" +#include "oops/typeArrayOop.hpp" // ciArray // // This class represents an arrayOop in the HotSpot virtual // machine. +static BasicType fixup_element_type(BasicType bt) { + if (bt == T_ARRAY) return T_OBJECT; + if (bt == T_BOOLEAN) return T_BYTE; + return bt; +} + +ciConstant ciArray::element_value_impl(BasicType elembt, + arrayOop ary, + int index) { + if (ary == NULL) + return ciConstant(); + assert(ary->is_array(), ""); + if (index < 0 || index >= ary->length()) + return ciConstant(); + ArrayKlass* ak = (ArrayKlass*) ary->klass(); + BasicType abt = ak->element_type(); + if (fixup_element_type(elembt) != + fixup_element_type(abt)) + return ciConstant(); + switch (elembt) { + case T_ARRAY: + case T_OBJECT: + { + assert(ary->is_objArray(), ""); + objArrayOop objary = (objArrayOop) ary; + oop elem = objary->obj_at(index); + ciEnv* env = CURRENT_ENV; + ciObject* box = env->get_object(elem); + return ciConstant(T_OBJECT, box); + } + } + assert(ary->is_typeArray(), ""); + typeArrayOop tary = (typeArrayOop) ary; + jint value = 0; + switch (elembt) { + case T_LONG: return ciConstant(tary->long_at(index)); + case T_FLOAT: return ciConstant(tary->float_at(index)); + case T_DOUBLE: return ciConstant(tary->double_at(index)); + default: return ciConstant(); + case T_BYTE: value = tary->byte_at(index); break; + case T_BOOLEAN: value = tary->byte_at(index) & 1; break; + case T_SHORT: value = tary->short_at(index); break; + case T_CHAR: value = tary->char_at(index); break; + case T_INT: value = tary->int_at(index); break; + } + return ciConstant(elembt, value); +} + +// ------------------------------------------------------------------ +// ciArray::element_value +// +// Current value of an element. +// Returns T_ILLEGAL if there is no element at the given index. +ciConstant ciArray::element_value(int index) { + BasicType elembt = element_basic_type(); + GUARDED_VM_ENTRY( + return element_value_impl(elembt, get_arrayOop(), index); + ) +} + +// ------------------------------------------------------------------ +// ciArray::element_value_by_offset +// +// Current value of an element at the specified offset. +// Returns T_ILLEGAL if there is no element at the given offset. +ciConstant ciArray::element_value_by_offset(intptr_t element_offset) { + BasicType elembt = element_basic_type(); + intptr_t shift = exact_log2(type2aelembytes(elembt)); + intptr_t header = arrayOopDesc::base_offset_in_bytes(elembt); + intptr_t index = (element_offset - header) >> shift; + intptr_t offset = header + ((intptr_t)index << shift); + if (offset != element_offset || index != (jint)index) + return ciConstant(); + return element_value((jint) index); +} // ------------------------------------------------------------------ // ciArray::print_impl diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/ci/ciArray.hpp --- a/src/share/vm/ci/ciArray.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/ci/ciArray.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -25,6 +25,8 @@ #ifndef SHARE_VM_CI_CIARRAY_HPP #define SHARE_VM_CI_CIARRAY_HPP +#include "ci/ciArrayKlass.hpp" +#include "ci/ciConstant.hpp" #include "ci/ciObject.hpp" #include "oops/arrayOop.hpp" #include "oops/objArrayOop.hpp" @@ -45,15 +47,30 @@ ciArray(ciKlass* klass, int len) : ciObject(klass), _length(len) {} - arrayOop get_arrayOop() { return (arrayOop)get_oop(); } + arrayOop get_arrayOop() const { return (arrayOop)get_oop(); } const char* type_string() { return "ciArray"; } void print_impl(outputStream* st); + ciConstant element_value_impl(BasicType elembt, arrayOop ary, int index); + public: int length() { return _length; } + // Convenience routines. + ciArrayKlass* array_type() { return klass()->as_array_klass(); } + ciType* element_type() { return array_type()->element_type(); } + BasicType element_basic_type() { return element_type()->basic_type(); } + + // Current value of an element. + // Returns T_ILLEGAL if there is no element at the given index. + ciConstant element_value(int index); + + // Current value of an element at the specified offset. + // Returns T_ILLEGAL if there is no element at the given offset. + ciConstant element_value_by_offset(intptr_t element_offset); + // What kind of ciObject is this? bool is_array() { return true; } bool is_java_object() { return true; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/ci/ciConstant.hpp --- a/src/share/vm/ci/ciConstant.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/ci/ciConstant.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -41,7 +41,6 @@ union { jint _int; jlong _long; - jint _long_half[2]; jfloat _float; jdouble _double; ciObject* _object; @@ -111,6 +110,20 @@ return _value._object; } + bool is_null_or_zero() const { + if (!is_java_primitive(basic_type())) { + return as_object()->is_null_object(); + } else if (type2size[basic_type()] == 1) { + // treat float bits as int, to avoid comparison with -0 and NaN + return (_value._int == 0); + } else if (type2size[basic_type()] == 2) { + // treat double bits as long, to avoid comparison with -0 and NaN + return (_value._long == 0); + } else { + return false; + } + } + // Debugging output void print(); }; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/ci/ciEnv.cpp --- a/src/share/vm/ci/ciEnv.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/ci/ciEnv.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1150,6 +1150,10 @@ record_method_not_compilable("out of memory"); } +ciInstance* ciEnv::unloaded_ciinstance() { + GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();) +} + void ciEnv::dump_replay_data(outputStream* out) { VM_ENTRY_MARK; MutexLocker ml(Compile_lock); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/ci/ciEnv.hpp --- a/src/share/vm/ci/ciEnv.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/ci/ciEnv.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -400,6 +400,7 @@ static ciInstanceKlass* unloaded_ciinstance_klass() { return _unloaded_ciinstance_klass; } + ciInstance* unloaded_ciinstance(); ciKlass* find_system_klass(ciSymbol* klass_name); // Note: To find a class from its name string, use ciSymbol::make, diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/ci/ciField.cpp --- a/src/share/vm/ci/ciField.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/ci/ciField.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,7 +75,6 @@ assert(klass->get_instanceKlass()->is_linked(), "must be linked before using its constan-pool"); - _cp_index = index; constantPoolHandle cpool(thread, klass->get_instanceKlass()->constants()); // Get the field's name, signature, and type. @@ -116,7 +115,7 @@ // The declared holder of this field may not have been loaded. // Bail out with partial field information. if (!holder_is_accessible) { - // _cp_index and _type have already been set. + // _type has already been set. // The default values for _flags and _constant_value will suffice. // We need values for _holder, _offset, and _is_constant, _holder = declared_holder; @@ -146,8 +145,6 @@ ciField::ciField(fieldDescriptor *fd): _known_to_link_with_put(NULL), _known_to_link_with_get(NULL) { ASSERT_IN_VM; - _cp_index = -1; - // Get the field's name, signature, and type. ciEnv* env = CURRENT_ENV; _name = env->get_symbol(fd->name()); @@ -189,12 +186,14 @@ _holder = CURRENT_ENV->get_instance_klass(fd->field_holder()); // Check to see if the field is constant. - if (_holder->is_initialized() && this->is_final()) { + bool is_final = this->is_final(); + bool is_stable = FoldStableValues && this->is_stable(); + if (_holder->is_initialized() && (is_final || is_stable)) { if (!this->is_static()) { // A field can be constant if it's a final static field or if // it's a final non-static field of a trusted class (classes in // java.lang.invoke and sun.invoke packages and subpackages). - if (trust_final_non_static_fields(_holder)) { + if (is_stable || trust_final_non_static_fields(_holder)) { _is_constant = true; return; } @@ -227,7 +226,6 @@ Handle mirror = k->java_mirror(); - _is_constant = true; switch(type()->basic_type()) { case T_BYTE: _constant_value = ciConstant(type()->basic_type(), mirror->byte_field(_offset)); @@ -273,6 +271,12 @@ } } } + if (is_stable && _constant_value.is_null_or_zero()) { + // It is not a constant after all; treat it as uninitialized. + _is_constant = false; + } else { + _is_constant = true; + } } else { _is_constant = false; } @@ -344,12 +348,11 @@ } } - FieldAccessInfo result; - constantPoolHandle c_pool(THREAD, - accessing_klass->get_instanceKlass()->constants()); - LinkResolver::resolve_field(result, c_pool, _cp_index, - Bytecodes::java_code(bc), - true, false, KILL_COMPILE_ON_FATAL_(false)); + fieldDescriptor result; + LinkResolver::resolve_field(result, _holder->get_instanceKlass(), + _name->get_symbol(), _signature->get_symbol(), + accessing_klass->get_Klass(), bc, true, false, + KILL_COMPILE_ON_FATAL_(false)); // update the hit-cache, unless there is a problem with memory scoping: if (accessing_klass->is_shared() || !is_shared()) { @@ -373,8 +376,11 @@ tty->print(" signature="); _signature->print_symbol(); tty->print(" offset=%d type=", _offset); - if (_type != NULL) _type->print_name(); - else tty->print("(reference)"); + if (_type != NULL) + _type->print_name(); + else + tty->print("(reference)"); + tty->print(" flags=%04x", flags().as_int()); tty->print(" is_constant=%s", bool_to_str(_is_constant)); if (_is_constant && is_static()) { tty->print(" constant_value="); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/ci/ciField.hpp --- a/src/share/vm/ci/ciField.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/ci/ciField.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,9 +53,6 @@ ciInstanceKlass* _known_to_link_with_get; ciConstant _constant_value; - // Used for will_link - int _cp_index; - ciType* compute_type(); ciType* compute_type_impl(); @@ -139,7 +136,10 @@ // non-constant fields. These are java.lang.System.in // and java.lang.System.out. Abomination. // - // Note: the check for case 4 is not yet implemented. + // A field is also considered constant if it is marked @Stable + // and is non-null (or non-zero, if a primitive). + // For non-static fields, the null/zero check must be + // arranged by the user, as constant_value().is_null_or_zero(). bool is_constant() { return _is_constant; } // Get the constant value of this field. @@ -173,6 +173,7 @@ bool is_protected () { return flags().is_protected(); } bool is_static () { return flags().is_static(); } bool is_final () { return flags().is_final(); } + bool is_stable () { return flags().is_stable(); } bool is_volatile () { return flags().is_volatile(); } bool is_transient () { return flags().is_transient(); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/ci/ciFlags.hpp --- a/src/share/vm/ci/ciFlags.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/ci/ciFlags.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -59,6 +59,7 @@ bool is_interface () const { return (_flags & JVM_ACC_INTERFACE ) != 0; } bool is_abstract () const { return (_flags & JVM_ACC_ABSTRACT ) != 0; } bool is_strict () const { return (_flags & JVM_ACC_STRICT ) != 0; } + bool is_stable () const { return (_flags & JVM_ACC_FIELD_STABLE) != 0; } // Conversion jint as_int() { return _flags; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/ci/ciInstance.cpp --- a/src/share/vm/ci/ciInstance.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/ci/ciInstance.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -60,10 +60,10 @@ // // Constant value of a field. ciConstant ciInstance::field_value(ciField* field) { - assert(is_loaded() && - field->holder()->is_loaded() && - klass()->is_subclass_of(field->holder()), - "invalid access"); + assert(is_loaded(), "invalid access - must be loaded"); + assert(field->holder()->is_loaded(), "invalid access - holder must be loaded"); + assert(klass()->is_subclass_of(field->holder()), "invalid access - must be subclass"); + VM_ENTRY_MARK; ciConstant result; Handle obj = get_oop(); @@ -127,6 +127,8 @@ ciConstant ciInstance::field_value_by_offset(int field_offset) { ciInstanceKlass* ik = klass()->as_instance_klass(); ciField* field = ik->get_field_by_offset(field_offset, false); + if (field == NULL) + return ciConstant(); // T_ILLEGAL return field_value(field); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/ci/ciInstanceKlass.cpp --- a/src/share/vm/ci/ciInstanceKlass.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/ci/ciInstanceKlass.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -522,8 +522,7 @@ for (JavaFieldStream fs(k); !fs.done(); fs.next()) { if (fs.access_flags().is_static()) continue; - fieldDescriptor fd; - fd.initialize(k, fs.index()); + fieldDescriptor& fd = fs.field_descriptor(); ciField* field = new (arena) ciField(&fd); fields->append(field); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/ci/ciMethod.cpp --- a/src/share/vm/ci/ciMethod.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/ci/ciMethod.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -286,7 +286,10 @@ check_is_loaded(); assert(holder()->is_linked(), "must be linked"); VM_ENTRY_MARK; - return klassItable::compute_itable_index(get_Method()); + Method* m = get_Method(); + if (!m->has_itable_index()) + return Method::nonvirtual_vtable_index; + return m->itable_index(); } #endif // SHARK @@ -1137,6 +1140,10 @@ // ------------------------------------------------------------------ // ciMethod::check_call bool ciMethod::check_call(int refinfo_index, bool is_static) const { + // This method is used only in C2 from InlineTree::ok_to_inline, + // and is only used under -Xcomp or -XX:CompileTheWorld. + // It appears to fail when applied to an invokeinterface call site. + // FIXME: Remove this method and resolve_method_statically; refactor to use the other LinkResolver entry points. VM_ENTRY_MARK; { EXCEPTION_MARK; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/ci/ciMethod.hpp --- a/src/share/vm/ci/ciMethod.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/ci/ciMethod.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -177,6 +177,10 @@ address bcp = code() + bci; return Bytecodes::java_code_at(NULL, bcp); } + Bytecodes::Code raw_code_at_bci(int bci) { + address bcp = code() + bci; + return Bytecodes::code_at(NULL, bcp); + } BCEscapeAnalyzer *get_bcea(); ciMethodBlocks *get_method_blocks(); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/ci/ciObjectFactory.cpp --- a/src/share/vm/ci/ciObjectFactory.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/ci/ciObjectFactory.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -563,7 +563,10 @@ return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass()); } - +ciInstance* ciObjectFactory::get_unloaded_object_constant() { + if (ciEnv::_Object_klass == NULL) return NULL; + return get_unloaded_instance(ciEnv::_Object_klass->as_instance_klass()); +} //------------------------------------------------------------------ // ciObjectFactory::get_empty_methodData diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/ci/ciObjectFactory.hpp --- a/src/share/vm/ci/ciObjectFactory.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/ci/ciObjectFactory.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -131,6 +131,8 @@ ciInstance* get_unloaded_method_type_constant(ciSymbol* signature); + ciInstance* get_unloaded_object_constant(); + // Get the ciMethodData representing the methodData for a method // with none. ciMethodData* get_empty_methodData(); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/ci/ciSymbol.hpp --- a/src/share/vm/ci/ciSymbol.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/ci/ciSymbol.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,6 +44,7 @@ friend class ciInstanceKlass; friend class ciSignature; friend class ciMethod; + friend class ciField; friend class ciObjArrayKlass; private: diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/ci/ciTypeArray.cpp --- a/src/share/vm/ci/ciTypeArray.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/ci/ciTypeArray.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -39,5 +39,10 @@ jchar ciTypeArray::char_at(int index) { VM_ENTRY_MARK; assert(index >= 0 && index < length(), "out of range"); - return get_typeArrayOop()->char_at(index); + jchar c = get_typeArrayOop()->char_at(index); +#ifdef ASSERT + jchar d = element_value(index).as_char(); + assert(c == d, ""); +#endif //ASSERT + return c; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/classfile/classFileParser.cpp --- a/src/share/vm/classfile/classFileParser.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/classfile/classFileParser.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -28,7 +28,6 @@ #include "classfile/classLoaderData.hpp" #include "classfile/classLoaderData.inline.hpp" #include "classfile/defaultMethods.hpp" -#include "classfile/genericSignatures.hpp" #include "classfile/javaClasses.hpp" #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" @@ -889,6 +888,7 @@ int runtime_visible_type_annotations_length = 0; u1* runtime_invisible_type_annotations = NULL; int runtime_invisible_type_annotations_length = 0; + bool runtime_invisible_type_annotations_exists = false; while (attributes_count--) { cfs->guarantee_more(6, CHECK); // attribute_name_index, attribute_length u2 attribute_name_index = cfs->get_u2_fast(); @@ -947,15 +947,27 @@ assert(runtime_invisible_annotations != NULL, "null invisible annotations"); cfs->skip_u1(runtime_invisible_annotations_length, CHECK); } else if (attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) { + if (runtime_visible_type_annotations != NULL) { + classfile_parse_error( + "Multiple RuntimeVisibleTypeAnnotations attributes for field in class file %s", CHECK); + } runtime_visible_type_annotations_length = attribute_length; runtime_visible_type_annotations = cfs->get_u1_buffer(); assert(runtime_visible_type_annotations != NULL, "null visible type annotations"); cfs->skip_u1(runtime_visible_type_annotations_length, CHECK); - } else if (PreserveAllAnnotations && attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) { - runtime_invisible_type_annotations_length = attribute_length; - runtime_invisible_type_annotations = cfs->get_u1_buffer(); - assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations"); - cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK); + } else if (attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) { + if (runtime_invisible_type_annotations_exists) { + classfile_parse_error( + "Multiple RuntimeInvisibleTypeAnnotations attributes for field in class file %s", CHECK); + } else { + runtime_invisible_type_annotations_exists = true; + } + if (PreserveAllAnnotations) { + runtime_invisible_type_annotations_length = attribute_length; + runtime_invisible_type_annotations = cfs->get_u1_buffer(); + assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations"); + } + cfs->skip_u1(attribute_length, CHECK); } else { cfs->skip_u1(attribute_length, CHECK); // Skip unknown attributes } @@ -1775,6 +1787,10 @@ if (_location != _in_method) break; // only allow for methods if (!privileged) break; // only allow in privileged code return _method_LambdaForm_Hidden; + case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_Stable_signature): + if (_location != _in_field) break; // only allow for fields + if (!privileged) break; // only allow in privileged code + return _field_Stable; case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_misc_Contended_signature): if (_location != _in_field && _location != _in_class) break; // only allow for fields and classes if (!EnableContended || (RestrictContended && !privileged)) break; // honor privileges @@ -1787,6 +1803,8 @@ void ClassFileParser::FieldAnnotationCollector::apply_to(FieldInfo* f) { if (is_contended()) f->set_contended_group(contended_group()); + if (is_stable()) + f->set_stable(true); } ClassFileParser::FieldAnnotationCollector::~FieldAnnotationCollector() { @@ -2061,6 +2079,7 @@ int runtime_visible_type_annotations_length = 0; u1* runtime_invisible_type_annotations = NULL; int runtime_invisible_type_annotations_length = 0; + bool runtime_invisible_type_annotations_exists = false; u1* annotation_default = NULL; int annotation_default_length = 0; @@ -2317,16 +2336,30 @@ assert(annotation_default != NULL, "null annotation default"); cfs->skip_u1(annotation_default_length, CHECK_(nullHandle)); } else if (method_attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) { + if (runtime_visible_type_annotations != NULL) { + classfile_parse_error( + "Multiple RuntimeVisibleTypeAnnotations attributes for method in class file %s", + CHECK_(nullHandle)); + } runtime_visible_type_annotations_length = method_attribute_length; runtime_visible_type_annotations = cfs->get_u1_buffer(); assert(runtime_visible_type_annotations != NULL, "null visible type annotations"); // No need for the VM to parse Type annotations cfs->skip_u1(runtime_visible_type_annotations_length, CHECK_(nullHandle)); - } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) { - runtime_invisible_type_annotations_length = method_attribute_length; - runtime_invisible_type_annotations = cfs->get_u1_buffer(); - assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations"); - cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK_(nullHandle)); + } else if (method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) { + if (runtime_invisible_type_annotations_exists) { + classfile_parse_error( + "Multiple RuntimeInvisibleTypeAnnotations attributes for method in class file %s", + CHECK_(nullHandle)); + } else { + runtime_invisible_type_annotations_exists = true; + } + if (PreserveAllAnnotations) { + runtime_invisible_type_annotations_length = method_attribute_length; + runtime_invisible_type_annotations = cfs->get_u1_buffer(); + assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations"); + } + cfs->skip_u1(method_attribute_length, CHECK_(nullHandle)); } else { // Skip unknown attributes cfs->skip_u1(method_attribute_length, CHECK_(nullHandle)); @@ -2512,7 +2545,9 @@ if (method->is_final()) { *has_final_method = true; } - if (is_interface && !method->is_abstract() && !method->is_static()) { + if (is_interface && !(*has_default_methods) + && !method->is_abstract() && !method->is_static() + && !method->is_private()) { // default method *has_default_methods = true; } @@ -2590,7 +2625,7 @@ valid_symbol_at(sourcefile_index), "Invalid SourceFile attribute at constant pool index %u in class file %s", sourcefile_index, CHECK); - set_class_sourcefile(_cp->symbol_at(sourcefile_index)); + set_class_sourcefile_index(sourcefile_index); } @@ -2728,7 +2763,7 @@ valid_symbol_at(signature_index), "Invalid constant pool index %u in Signature attribute in class file %s", signature_index, CHECK); - set_class_generic_signature(_cp->symbol_at(signature_index)); + set_class_generic_signature_index(signature_index); } void ClassFileParser::parse_classfile_bootstrap_methods_attribute(u4 attribute_byte_length, TRAPS) { @@ -2819,6 +2854,7 @@ int runtime_visible_type_annotations_length = 0; u1* runtime_invisible_type_annotations = NULL; int runtime_invisible_type_annotations_length = 0; + bool runtime_invisible_type_annotations_exists = false; u1* inner_classes_attribute_start = NULL; u4 inner_classes_attribute_length = 0; u2 enclosing_method_class_index = 0; @@ -2922,16 +2958,28 @@ parsed_bootstrap_methods_attribute = true; parse_classfile_bootstrap_methods_attribute(attribute_length, CHECK); } else if (tag == vmSymbols::tag_runtime_visible_type_annotations()) { + if (runtime_visible_type_annotations != NULL) { + classfile_parse_error( + "Multiple RuntimeVisibleTypeAnnotations attributes in class file %s", CHECK); + } runtime_visible_type_annotations_length = attribute_length; runtime_visible_type_annotations = cfs->get_u1_buffer(); assert(runtime_visible_type_annotations != NULL, "null visible type annotations"); // No need for the VM to parse Type annotations cfs->skip_u1(runtime_visible_type_annotations_length, CHECK); - } else if (PreserveAllAnnotations && tag == vmSymbols::tag_runtime_invisible_type_annotations()) { - runtime_invisible_type_annotations_length = attribute_length; - runtime_invisible_type_annotations = cfs->get_u1_buffer(); - assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations"); - cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK); + } else if (tag == vmSymbols::tag_runtime_invisible_type_annotations()) { + if (runtime_invisible_type_annotations_exists) { + classfile_parse_error( + "Multiple RuntimeInvisibleTypeAnnotations attributes in class file %s", CHECK); + } else { + runtime_invisible_type_annotations_exists = true; + } + if (PreserveAllAnnotations) { + runtime_invisible_type_annotations_length = attribute_length; + runtime_invisible_type_annotations = cfs->get_u1_buffer(); + assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations"); + } + cfs->skip_u1(attribute_length, CHECK); } else { // Unknown attribute cfs->skip_u1(attribute_length, CHECK); @@ -2975,13 +3023,11 @@ void ClassFileParser::apply_parsed_class_attributes(instanceKlassHandle k) { if (_synthetic_flag) k->set_is_synthetic(); - if (_sourcefile != NULL) { - _sourcefile->increment_refcount(); - k->set_source_file_name(_sourcefile); + if (_sourcefile_index != 0) { + k->set_source_file_name_index(_sourcefile_index); } - if (_generic_signature != NULL) { - _generic_signature->increment_refcount(); - k->set_generic_signature(_generic_signature); + if (_generic_signature_index != 0) { + k->set_generic_signature_index(_generic_signature_index); } if (_sde_buffer != NULL) { k->set_source_debug_extension(_sde_buffer, _sde_length); @@ -3041,35 +3087,6 @@ return annotations; } - -#ifdef ASSERT -static void parseAndPrintGenericSignatures( - instanceKlassHandle this_klass, TRAPS) { - assert(ParseAllGenericSignatures == true, "Shouldn't call otherwise"); - ResourceMark rm; - - if (this_klass->generic_signature() != NULL) { - using namespace generic; - ClassDescriptor* spec = ClassDescriptor::parse_generic_signature(this_klass(), CHECK); - - tty->print_cr("Parsing %s", this_klass->generic_signature()->as_C_string()); - spec->print_on(tty); - - for (int i = 0; i < this_klass->methods()->length(); ++i) { - Method* m = this_klass->methods()->at(i); - MethodDescriptor* method_spec = MethodDescriptor::parse_generic_signature(m, spec); - Symbol* sig = m->generic_signature(); - if (sig == NULL) { - sig = m->signature(); - } - tty->print_cr("Parsing %s", sig->as_C_string()); - method_spec->print_on(tty); - } - } -} -#endif // def ASSERT - - instanceKlassHandle ClassFileParser::parse_super_class(int super_class_index, TRAPS) { instanceKlassHandle super_klass; @@ -3980,9 +3997,8 @@ this_klass->set_has_final_method(); } this_klass->copy_method_ordering(method_ordering, CHECK_NULL); - // The InstanceKlass::_methods_jmethod_ids cache and the - // InstanceKlass::_methods_cached_itable_indices cache are - // both managed on the assumption that the initial cache + // The InstanceKlass::_methods_jmethod_ids cache + // is managed on the assumption that the initial cache // size is equal to the number of methods in the class. If // that changes, then InstanceKlass::idnum_can_increment() // has to be changed accordingly. @@ -4062,12 +4078,6 @@ java_lang_Class::create_mirror(this_klass, protection_domain, CHECK_(nullHandle)); -#ifdef ASSERT - if (ParseAllGenericSignatures) { - parseAndPrintGenericSignatures(this_klass, CHECK_(nullHandle)); - } -#endif - // Generate any default methods - default methods are interface methods // that have a default implementation. This is new with Lambda project. if (has_default_methods && !access_flags.is_interface() && diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/classfile/classFileParser.hpp --- a/src/share/vm/classfile/classFileParser.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/classfile/classFileParser.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -62,8 +62,8 @@ bool _synthetic_flag; int _sde_length; char* _sde_buffer; - Symbol* _sourcefile; - Symbol* _generic_signature; + u2 _sourcefile_index; + u2 _generic_signature_index; // Metadata created before the instance klass is created. Must be deallocated // if not transferred to the InstanceKlass upon successful class loading @@ -81,16 +81,16 @@ Array* _fields_type_annotations; InstanceKlass* _klass; // InstanceKlass once created. - void set_class_synthetic_flag(bool x) { _synthetic_flag = x; } - void set_class_sourcefile(Symbol* x) { _sourcefile = x; } - void set_class_generic_signature(Symbol* x) { _generic_signature = x; } - void set_class_sde_buffer(char* x, int len) { _sde_buffer = x; _sde_length = len; } + void set_class_synthetic_flag(bool x) { _synthetic_flag = x; } + void set_class_sourcefile_index(u2 x) { _sourcefile_index = x; } + void set_class_generic_signature_index(u2 x) { _generic_signature_index = x; } + void set_class_sde_buffer(char* x, int len) { _sde_buffer = x; _sde_length = len; } void init_parsed_class_attributes(ClassLoaderData* loader_data) { _loader_data = loader_data; _synthetic_flag = false; - _sourcefile = NULL; - _generic_signature = NULL; + _sourcefile_index = 0; + _generic_signature_index = 0; _sde_buffer = NULL; _sde_length = 0; // initialize the other flags too: @@ -125,6 +125,7 @@ _method_LambdaForm_Compiled, _method_LambdaForm_Hidden, _sun_misc_Contended, + _field_Stable, _annotation_LIMIT }; const Location _location; @@ -143,14 +144,23 @@ assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob"); _annotations_present |= nth_bit((int)id); } + + void remove_annotation(ID id) { + assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob"); + _annotations_present &= ~nth_bit((int)id); + } + // Report if the annotation is present. - bool has_any_annotations() { return _annotations_present != 0; } - bool has_annotation(ID id) { return (nth_bit((int)id) & _annotations_present) != 0; } + bool has_any_annotations() const { return _annotations_present != 0; } + bool has_annotation(ID id) const { return (nth_bit((int)id) & _annotations_present) != 0; } void set_contended_group(u2 group) { _contended_group = group; } - u2 contended_group() { return _contended_group; } + u2 contended_group() const { return _contended_group; } - bool is_contended() { return has_annotation(_sun_misc_Contended); } + bool is_contended() const { return has_annotation(_sun_misc_Contended); } + + void set_stable(bool stable) { set_annotation(_field_Stable); } + bool is_stable() const { return has_annotation(_field_Stable); } }; // This class also doubles as a holder for metadata cleanup. diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/classfile/classLoader.cpp --- a/src/share/vm/classfile/classLoader.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/classfile/classLoader.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -197,7 +197,7 @@ } -ClassFileStream* ClassPathDirEntry::open_stream(const char* name) { +ClassFileStream* ClassPathDirEntry::open_stream(const char* name, TRAPS) { // construct full path name char path[JVM_MAXPATHLEN]; if (jio_snprintf(path, sizeof(path), "%s%s%s", _dir, os::file_separator(), name) == -1) { @@ -240,7 +240,7 @@ FREE_C_HEAP_ARRAY(char, _zip_name, mtClass); } -ClassFileStream* ClassPathZipEntry::open_stream(const char* name) { +ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) { // enable call to C land JavaThread* thread = JavaThread::current(); ThreadToNativeFromVM ttn(thread); @@ -284,24 +284,24 @@ } } -LazyClassPathEntry::LazyClassPathEntry(char* path, struct stat st) : ClassPathEntry() { +LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() { _path = strdup(path); - _st = st; + _st = *st; _meta_index = NULL; _resolved_entry = NULL; + _has_error = false; } bool LazyClassPathEntry::is_jar_file() { return ((_st.st_mode & S_IFREG) == S_IFREG); } -ClassPathEntry* LazyClassPathEntry::resolve_entry() { +ClassPathEntry* LazyClassPathEntry::resolve_entry(TRAPS) { if (_resolved_entry != NULL) { return (ClassPathEntry*) _resolved_entry; } ClassPathEntry* new_entry = NULL; - ClassLoader::create_class_path_entry(_path, _st, &new_entry, false); - assert(new_entry != NULL, "earlier code should have caught this"); + new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, CHECK_NULL); { ThreadCritical tc; if (_resolved_entry == NULL) { @@ -314,12 +314,21 @@ return (ClassPathEntry*) _resolved_entry; } -ClassFileStream* LazyClassPathEntry::open_stream(const char* name) { +ClassFileStream* LazyClassPathEntry::open_stream(const char* name, TRAPS) { if (_meta_index != NULL && !_meta_index->may_contain(name)) { return NULL; } - return resolve_entry()->open_stream(name); + if (_has_error) { + return NULL; + } + ClassPathEntry* cpe = resolve_entry(THREAD); + if (cpe == NULL) { + _has_error = true; + return NULL; + } else { + return cpe->open_stream(name, THREAD); + } } bool LazyClassPathEntry::is_lazy() { @@ -465,20 +474,19 @@ } } -void ClassLoader::create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy) { +ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) { JavaThread* thread = JavaThread::current(); if (lazy) { - *new_entry = new LazyClassPathEntry(path, st); - return; + return new LazyClassPathEntry(path, st); } - if ((st.st_mode & S_IFREG) == S_IFREG) { + ClassPathEntry* new_entry = NULL; + if ((st->st_mode & S_IFREG) == S_IFREG) { // Regular file, should be a zip file // Canonicalized filename char canonical_path[JVM_MAXPATHLEN]; if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) { // This matches the classic VM - EXCEPTION_MARK; - THROW_MSG(vmSymbols::java_io_IOException(), "Bad pathname"); + THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL); } char* error_msg = NULL; jzfile* zip; @@ -489,7 +497,7 @@ zip = (*ZipOpen)(canonical_path, &error_msg); } if (zip != NULL && error_msg == NULL) { - *new_entry = new ClassPathZipEntry(zip, path); + new_entry = new ClassPathZipEntry(zip, path); if (TraceClassLoading) { tty->print_cr("[Opened %s]", path); } @@ -504,16 +512,16 @@ msg = NEW_RESOURCE_ARRAY(char, len); ; jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path); } - EXCEPTION_MARK; - THROW_MSG(vmSymbols::java_lang_ClassNotFoundException(), msg); + THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL); } } else { // Directory - *new_entry = new ClassPathDirEntry(path); + new_entry = new ClassPathDirEntry(path); if (TraceClassLoading) { tty->print_cr("[Path %s]", path); } } + return new_entry; } @@ -572,13 +580,14 @@ } } -void ClassLoader::update_class_path_entry_list(const char *path, +void ClassLoader::update_class_path_entry_list(char *path, bool check_for_duplicates) { struct stat st; - if (os::stat((char *)path, &st) == 0) { + if (os::stat(path, &st) == 0) { // File or directory found ClassPathEntry* new_entry = NULL; - create_class_path_entry((char *)path, st, &new_entry, LazyBootClassLoader); + Thread* THREAD = Thread::current(); + new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, CHECK); // The kernel VM adds dynamically to the end of the classloader path and // doesn't reorder the bootclasspath which would break java.lang.Package // (see PackageInfo). @@ -897,7 +906,7 @@ PerfClassTraceTime::CLASS_LOAD); ClassPathEntry* e = _first_entry; while (e != NULL) { - stream = e->open_stream(name); + stream = e->open_stream(name, CHECK_NULL); if (stream != NULL) { break; } @@ -1257,11 +1266,16 @@ } void LazyClassPathEntry::compile_the_world(Handle loader, TRAPS) { - resolve_entry()->compile_the_world(loader, CHECK); + ClassPathEntry* cpe = resolve_entry(THREAD); + if (cpe != NULL) { + cpe->compile_the_world(loader, CHECK); + } } bool LazyClassPathEntry::is_rt_jar() { - return resolve_entry()->is_rt_jar(); + Thread* THREAD = Thread::current(); + ClassPathEntry* cpe = resolve_entry(THREAD); + return (cpe != NULL) ? cpe->is_jar_file() : false; } void ClassLoader::compile_the_world() { @@ -1305,6 +1319,25 @@ // The CHECK at the caller will propagate the exception out } +/** + * Returns if the given method should be compiled when doing compile-the-world. + * + * TODO: This should be a private method in a CompileTheWorld class. + */ +static bool can_be_compiled(methodHandle m, int comp_level) { + assert(CompileTheWorld, "must be"); + + // It's not valid to compile a native wrapper for MethodHandle methods + // that take a MemberName appendix since the bytecode signature is not + // correct. + vmIntrinsics::ID iid = m->intrinsic_id(); + if (MethodHandles::is_signature_polymorphic(iid) && MethodHandles::has_member_arg(iid)) { + return false; + } + + return CompilationPolicy::can_be_compiled(m, comp_level); +} + void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) { int len = (int)strlen(name); if (len > 6 && strcmp(".class", name + len - 6) == 0) { @@ -1348,8 +1381,7 @@ int comp_level = CompilationPolicy::policy()->initial_compile_level(); for (int n = 0; n < k->methods()->length(); n++) { methodHandle m (THREAD, k->methods()->at(n)); - if (CompilationPolicy::can_be_compiled(m, comp_level)) { - + if (can_be_compiled(m, comp_level)) { if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) { // Give sweeper a chance to keep up with CTW VM_ForceSafepoint op; @@ -1361,7 +1393,7 @@ methodHandle(), 0, "CTW", THREAD); if (HAS_PENDING_EXCEPTION) { clear_pending_exception_if_not_oom(CHECK); - tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string()); + tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string()); } else { _compile_the_world_method_counter++; } @@ -1377,11 +1409,13 @@ methodHandle(), 0, "CTW", THREAD); if (HAS_PENDING_EXCEPTION) { clear_pending_exception_if_not_oom(CHECK); - tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string()); + tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string()); } else { _compile_the_world_method_counter++; } } + } else { + tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string()); } nmethod* nm = m->code(); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/classfile/classLoader.hpp --- a/src/share/vm/classfile/classLoader.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/classfile/classLoader.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,7 +63,7 @@ ClassPathEntry(); // Attempt to locate file_name through this class path entry. // Returns a class file parsing stream if successfull. - virtual ClassFileStream* open_stream(const char* name) = 0; + virtual ClassFileStream* open_stream(const char* name, TRAPS) = 0; // Debugging NOT_PRODUCT(virtual void compile_the_world(Handle loader, TRAPS) = 0;) NOT_PRODUCT(virtual bool is_rt_jar() = 0;) @@ -77,7 +77,7 @@ bool is_jar_file() { return false; } const char* name() { return _dir; } ClassPathDirEntry(char* dir); - ClassFileStream* open_stream(const char* name); + ClassFileStream* open_stream(const char* name, TRAPS); // Debugging NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);) NOT_PRODUCT(bool is_rt_jar();) @@ -107,7 +107,7 @@ const char* name() { return _zip_name; } ClassPathZipEntry(jzfile* zip, const char* zip_name); ~ClassPathZipEntry(); - ClassFileStream* open_stream(const char* name); + ClassFileStream* open_stream(const char* name, TRAPS); void contents_do(void f(const char* name, void* context), void* context); // Debugging NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);) @@ -125,13 +125,14 @@ char* _path; // dir or file struct stat _st; MetaIndex* _meta_index; + bool _has_error; volatile ClassPathEntry* _resolved_entry; - ClassPathEntry* resolve_entry(); + ClassPathEntry* resolve_entry(TRAPS); public: bool is_jar_file(); const char* name() { return _path; } - LazyClassPathEntry(char* path, struct stat st); - ClassFileStream* open_stream(const char* name); + LazyClassPathEntry(char* path, const struct stat* st); + ClassFileStream* open_stream(const char* name, TRAPS); void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; } virtual bool is_lazy(); // Debugging @@ -207,14 +208,15 @@ static void setup_meta_index(); static void setup_bootstrap_search_path(); static void load_zip_library(); - static void create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy); + static ClassPathEntry* create_class_path_entry(char *path, const struct stat* st, + bool lazy, TRAPS); // Canonicalizes path names, so strcmp will work properly. This is mainly // to avoid confusing the zip library static bool get_canonical_path(char* orig, char* out, int len); public: // Used by the kernel jvm. - static void update_class_path_entry_list(const char *path, + static void update_class_path_entry_list(char *path, bool check_for_duplicates); static void print_bootclasspath(); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/classfile/classLoaderData.cpp --- a/src/share/vm/classfile/classLoaderData.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/classfile/classLoaderData.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -261,7 +261,7 @@ k, k->external_name(), k->class_loader_data(), - k->class_loader(), + (void *)k->class_loader(), loader_name()); } } @@ -297,7 +297,7 @@ if (TraceClassLoaderData) { ResourceMark rm; tty->print("[ClassLoaderData: unload loader data "PTR_FORMAT, this); - tty->print(" for instance "PTR_FORMAT" of %s", class_loader(), + tty->print(" for instance "PTR_FORMAT" of %s", (void *)class_loader(), loader_name()); if (is_anonymous()) { tty->print(" for anonymous class "PTR_FORMAT " ", _klasses); @@ -458,7 +458,7 @@ void ClassLoaderData::dump(outputStream * const out) { ResourceMark rm; out->print("ClassLoaderData CLD: "PTR_FORMAT", loader: "PTR_FORMAT", loader_klass: "PTR_FORMAT" %s {", - this, class_loader(), + this, (void *)class_loader(), class_loader() != NULL ? class_loader()->klass() : NULL, loader_name()); if (claimed()) out->print(" claimed "); if (is_unloading()) out->print(" unloading "); @@ -553,7 +553,7 @@ ResourceMark rm; tty->print("[ClassLoaderData: "); tty->print("create class loader data "PTR_FORMAT, cld); - tty->print(" for instance "PTR_FORMAT" of %s", cld->class_loader(), + tty->print(" for instance "PTR_FORMAT" of %s", (void *)cld->class_loader(), cld->loader_name()); tty->print_cr("]"); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/classfile/defaultMethods.cpp --- a/src/share/vm/classfile/defaultMethods.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/classfile/defaultMethods.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -25,7 +25,6 @@ #include "precompiled.hpp" #include "classfile/bytecodeAssembler.hpp" #include "classfile/defaultMethods.hpp" -#include "classfile/genericSignatures.hpp" #include "classfile/symbolTable.hpp" #include "memory/allocation.hpp" #include "memory/metadataFactory.hpp" @@ -75,14 +74,6 @@ } }; -class ContextMark : public PseudoScopeMark { - private: - generic::Context::Mark _mark; - public: - ContextMark(const generic::Context::Mark& cm) : _mark(cm) {} - virtual void destroy() { _mark.destroy(); } -}; - #ifndef PRODUCT static void print_slot(outputStream* str, Symbol* name, Symbol* signature) { ResourceMark rm; @@ -334,6 +325,7 @@ Method* _selected_target; // Filled in later, if a unique target exists Symbol* _exception_message; // If no unique target is found + Symbol* _exception_name; // If no unique target is found bool contains_method(Method* method) { int* lookup = _member_index.get(method); @@ -359,7 +351,7 @@ public: MethodFamily() - : _selected_target(NULL), _exception_message(NULL) {} + : _selected_target(NULL), _exception_message(NULL), _exception_name(NULL) {} void set_target_if_empty(Method* m) { if (_selected_target == NULL && !m->is_overpass()) { @@ -392,6 +384,7 @@ Method* get_selected_target() { return _selected_target; } Symbol* get_exception_message() { return _exception_message; } + Symbol* get_exception_name() { return _exception_name; } // Either sets the target or the exception error message void determine_target(InstanceKlass* root, TRAPS) { @@ -409,15 +402,18 @@ if (qualified_methods.length() == 0) { _exception_message = generate_no_defaults_message(CHECK); + _exception_name = vmSymbols::java_lang_AbstractMethodError(); } else if (qualified_methods.length() == 1) { Method* method = qualified_methods.at(0); if (method->is_abstract()) { _exception_message = generate_abstract_method_message(method, CHECK); + _exception_name = vmSymbols::java_lang_AbstractMethodError(); } else { _selected_target = qualified_methods.at(0); } } else { _exception_message = generate_conflicts_message(&qualified_methods,CHECK); + _exception_name = vmSymbols::java_lang_IncompatibleClassChangeError(); } assert((has_target() ^ throws_exception()) == 1, @@ -459,13 +455,18 @@ streamIndentor si(str, indent * 2); str->indent().print("Selected method: "); print_method(str, _selected_target); + Klass* method_holder = _selected_target->method_holder(); + if (!method_holder->is_interface()) { + tty->print(" : in superclass"); + } str->print_cr(""); } void print_exception(outputStream* str, int indent) { assert(throws_exception(), "Should be called otherwise"); + assert(_exception_name != NULL, "exception_name should be set"); streamIndentor si(str, indent * 2); - str->indent().print_cr("%s", _exception_message->as_C_string()); + str->indent().print_cr("%s: %s", _exception_name->as_C_string(), _exception_message->as_C_string()); } #endif // ndef PRODUCT }; @@ -503,38 +504,6 @@ return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL); } -// A generic method family contains a set of all methods that implement a single -// language-level method. Because of erasure, these methods may have different -// signatures. As members of the set are collected while walking over the -// hierarchy, they are tagged with a qualification state. The qualification -// state for an erased method is set to disqualified if there exists a path -// from the root of hierarchy to the method that contains an interleaving -// language-equivalent method defined in an interface. -class GenericMethodFamily : public MethodFamily { - private: - - generic::MethodDescriptor* _descriptor; // language-level description - - public: - - GenericMethodFamily(generic::MethodDescriptor* canonical_desc) - : _descriptor(canonical_desc) {} - - generic::MethodDescriptor* descriptor() const { return _descriptor; } - - bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) { - return descriptor()->covariant_match(md, ctx); - } - -#ifndef PRODUCT - Symbol* get_generic_sig() const { - - generic::Context ctx(NULL); // empty, as _descriptor already canonicalized - TempNewSymbol sig = descriptor()->reify_signature(&ctx, Thread::current()); - return sig; - } -#endif // ndef PRODUCT -}; class StateRestorer; @@ -571,26 +540,6 @@ StateRestorer* record_method_and_dq_further(Method* mo); }; - -// StatefulGenericMethodFamily is a wrapper around GenericMethodFamily that maintains the -// qualification state during hierarchy visitation, and applies that state -// when adding members to the GenericMethodFamily. -class StatefulGenericMethodFamily : public StatefulMethodFamily { - - public: - StatefulGenericMethodFamily(generic::MethodDescriptor* md, generic::Context* ctx) - : StatefulMethodFamily(new GenericMethodFamily(md->canonicalize(ctx))) { - - } - GenericMethodFamily* get_method_family() { - return (GenericMethodFamily*)_method_family; - } - - bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) { - return get_method_family()->descriptor_matches(md, ctx); - } -}; - class StateRestorer : public PseudoScopeMark { private: StatefulMethodFamily* _method; @@ -616,39 +565,6 @@ return mark; } -class StatefulGenericMethodFamilies : public ResourceObj { - private: - GrowableArray _methods; - - public: - StatefulGenericMethodFamily* find_matching( - generic::MethodDescriptor* md, generic::Context* ctx) { - for (int i = 0; i < _methods.length(); ++i) { - StatefulGenericMethodFamily* existing = _methods.at(i); - if (existing->descriptor_matches(md, ctx)) { - return existing; - } - } - return NULL; - } - - StatefulGenericMethodFamily* find_matching_or_create( - generic::MethodDescriptor* md, generic::Context* ctx) { - StatefulGenericMethodFamily* method = find_matching(md, ctx); - if (method == NULL) { - method = new StatefulGenericMethodFamily(md, ctx); - _methods.append(method); - } - return method; - } - - void extract_families_into(GrowableArray* array) { - for (int i = 0; i < _methods.length(); ++i) { - array->append(_methods.at(i)->get_method_family()); - } - } -}; - // Represents a location corresponding to a vtable slot for methods that // neither the class nor any of it's ancestors provide an implementaion. // Default methods may be present to fill this slot. @@ -760,7 +676,10 @@ InstanceKlass* iklass = current_class(); Method* m = iklass->find_method(_method_name, _method_signature); - if (m != NULL) { + // private interface methods are not candidates for default methods + // invokespecial to private interface methods doesn't use default method logic + // future: take access controls into account for superclass methods + if (m != NULL && (!iklass->is_interface() || m->is_public())) { if (_family == NULL) { _family = new StatefulMethodFamily(); } @@ -779,146 +698,11 @@ }; -// Iterates over the type hierarchy looking for all methods with a specific -// method name. The result of this is a set of method families each of -// which is populated with a set of methods that implement the same -// language-level signature. -class FindMethodsByGenericSig : public HierarchyVisitor { - private: - // Context data - Thread* THREAD; - generic::DescriptorCache* _cache; - Symbol* _method_name; - generic::Context* _ctx; - StatefulGenericMethodFamilies _families; - public: - - FindMethodsByGenericSig(generic::DescriptorCache* cache, Symbol* name, - generic::Context* ctx, Thread* thread) : - _cache(cache), _method_name(name), _ctx(ctx), THREAD(thread) {} - - void get_discovered_families(GrowableArray* methods) { - _families.extract_families_into(methods); - } - - void* new_node_data(InstanceKlass* cls) { return new PseudoScope(); } - void free_node_data(void* node_data) { - PseudoScope::cast(node_data)->destroy(); - } - - bool visit() { - PseudoScope* scope = PseudoScope::cast(current_data()); - InstanceKlass* klass = current_class(); - InstanceKlass* sub = current_depth() > 0 ? class_at_depth(1) : NULL; - - ContextMark* cm = new ContextMark(_ctx->mark()); - scope->add_mark(cm); // will restore context when scope is freed - - _ctx->apply_type_arguments(sub, klass, THREAD); - - int start, end = 0; - start = klass->find_method_by_name(_method_name, &end); - if (start != -1) { - for (int i = start; i < end; ++i) { - Method* m = klass->methods()->at(i); - // This gets the method's parameter list with its generic type - // parameters resolved - generic::MethodDescriptor* md = _cache->descriptor_for(m, THREAD); - - // Find all methods on this hierarchy that match this method - // (name, signature). This class collects other families of this - // method name. - StatefulGenericMethodFamily* family = - _families.find_matching_or_create(md, _ctx); - - if (klass->is_interface()) { - // ??? - StateRestorer* restorer = family->record_method_and_dq_further(m); - scope->add_mark(restorer); - } else { - // This is the rule that methods in classes "win" (bad word) over - // methods in interfaces. This works because of single inheritance - family->set_target_if_empty(m); - } - } - } - return true; - } -}; - -#ifndef PRODUCT -static void print_generic_families( - GrowableArray* methods, Symbol* match) { - streamIndentor si(tty, 4); - if (methods->length() == 0) { - tty->indent(); - tty->print_cr("No Logical Method found"); - } - for (int i = 0; i < methods->length(); ++i) { - tty->indent(); - GenericMethodFamily* lm = methods->at(i); - if (lm->contains_signature(match)) { - tty->print_cr(""); - } else { - tty->print_cr(""); - } - lm->print_sig_on(tty, lm->get_generic_sig(), 1); - } -} -#endif // ndef PRODUCT static void create_overpasses( GrowableArray* slots, InstanceKlass* klass, TRAPS); -static void generate_generic_defaults( - InstanceKlass* klass, GrowableArray* empty_slots, - EmptyVtableSlot* slot, int current_slot_index, TRAPS) { - - if (slot->is_bound()) { -#ifndef PRODUCT - if (TraceDefaultMethods) { - streamIndentor si(tty, 4); - tty->indent().print_cr("Already bound to logical method:"); - GenericMethodFamily* lm = (GenericMethodFamily*)(slot->get_binding()); - lm->print_sig_on(tty, lm->get_generic_sig(), 1); - } -#endif // ndef PRODUCT - return; // covered by previous processing - } - - generic::DescriptorCache cache; - - generic::Context ctx(&cache); - FindMethodsByGenericSig visitor(&cache, slot->name(), &ctx, CHECK); - visitor.run(klass); - - GrowableArray discovered_families; - visitor.get_discovered_families(&discovered_families); - -#ifndef PRODUCT - if (TraceDefaultMethods) { - print_generic_families(&discovered_families, slot->signature()); - } -#endif // ndef PRODUCT - - // Find and populate any other slots that match the discovered families - for (int j = current_slot_index; j < empty_slots->length(); ++j) { - EmptyVtableSlot* open_slot = empty_slots->at(j); - - if (slot->name() == open_slot->name()) { - for (int k = 0; k < discovered_families.length(); ++k) { - GenericMethodFamily* lm = discovered_families.at(k); - - if (lm->contains_signature(open_slot->signature())) { - lm->determine_target(klass, CHECK); - open_slot->bind_family(lm); - } - } - } - } -} - static void generate_erased_defaults( InstanceKlass* klass, GrowableArray* empty_slots, EmptyVtableSlot* slot, TRAPS) { @@ -943,21 +727,14 @@ // // First if finds any name/signature slots that need any implementation (either // because they are miranda or a superclass's implementation is an overpass -// itself). For each slot, iterate over the hierarchy, using generic signature -// information to partition any methods that match the name into method families -// where each family contains methods whose signatures are equivalent at the -// language level (i.e., their reified parameters match and return values are -// covariant). Check those sets to see if they contain a signature that matches -// the slot we're looking at (if we're lucky, there might be other empty slots -// that we can fill using the same analysis). +// itself). For each slot, iterate over the hierarchy, to see if they contain a +// signature that matches the slot we are looking at. // // For each slot filled, we generate an overpass method that either calls the // unique default method candidate using invokespecial, or throws an exception // (in the case of no default method candidates, or more than one valid -// candidate). These methods are then added to the class's method list. If -// the method set we're using contains methods (qualified or not) with a -// different runtime signature than the method we're creating, then we have to -// create bridges with those signatures too. +// candidate). These methods are then added to the class's method list. +// The JVM does not create bridges nor handle generic signatures here. void DefaultMethods::generate_default_methods( InstanceKlass* klass, GrowableArray* mirandas, TRAPS) { @@ -997,11 +774,7 @@ } #endif // ndef PRODUCT - if (ParseGenericDefaults) { - generate_generic_defaults(klass, empty_slots, slot, i, CHECK); - } else { - generate_erased_defaults(klass, empty_slots, slot, CHECK); - } + generate_erased_defaults(klass, empty_slots, slot, CHECK); } #ifndef PRODUCT if (TraceDefaultMethods) { @@ -1018,303 +791,7 @@ #endif // ndef PRODUCT } -/** - * Generic analysis was used upon interface '_target' and found a unique - * default method candidate with generic signature '_method_desc'. This - * method is only viable if it would also be in the set of default method - * candidates if we ran a full analysis on the current class. - * - * The only reason that the method would not be in the set of candidates for - * the current class is if that there's another covariantly matching method - * which is "more specific" than the found method -- i.e., one could find a - * path in the interface hierarchy in which the matching method appears - * before we get to '_target'. - * - * In order to determine this, we examine all of the implemented - * interfaces. If we find path that leads to the '_target' interface, then - * we examine that path to see if there are any methods that would shadow - * the selected method along that path. - */ -class ShadowChecker : public HierarchyVisitor { - protected: - Thread* THREAD; - InstanceKlass* _target; - - Symbol* _method_name; - InstanceKlass* _method_holder; - bool _found_shadow; - - - public: - - ShadowChecker(Thread* thread, Symbol* name, InstanceKlass* holder, - InstanceKlass* target) - : THREAD(thread), _method_name(name), _method_holder(holder), - _target(target), _found_shadow(false) {} - - void* new_node_data(InstanceKlass* cls) { return NULL; } - void free_node_data(void* data) { return; } - - bool visit() { - InstanceKlass* ik = current_class(); - if (ik == _target && current_depth() == 1) { - return false; // This was the specified super -- no need to search it - } - if (ik == _method_holder || ik == _target) { - // We found a path that should be examined to see if it shadows _method - if (path_has_shadow()) { - _found_shadow = true; - cancel_iteration(); - } - return false; // no need to continue up hierarchy - } - return true; - } - - virtual bool path_has_shadow() = 0; - bool found_shadow() { return _found_shadow; } -}; - -// Used for Invokespecial. -// Invokespecial is allowed to invoke a concrete interface method -// and can be used to disambuiguate among qualified candidates, -// which are methods in immediate superinterfaces, -// but may not be used to invoke a candidate that would be shadowed -// from the perspective of the caller. -// Invokespecial is also used in the overpass generation today -// We re-run the shadowchecker because we can't distinguish this case, -// but it should return the same answer, since the overpass target -// is now the invokespecial caller. -class ErasedShadowChecker : public ShadowChecker { - private: - bool path_has_shadow() { - - for (int i = current_depth() - 1; i > 0; --i) { - InstanceKlass* ik = class_at_depth(i); - - if (ik->is_interface()) { - int end; - int start = ik->find_method_by_name(_method_name, &end); - if (start != -1) { - return true; - } - } - } - return false; - } - public: - - ErasedShadowChecker(Thread* thread, Symbol* name, InstanceKlass* holder, - InstanceKlass* target) - : ShadowChecker(thread, name, holder, target) {} -}; - -class GenericShadowChecker : public ShadowChecker { - private: - generic::DescriptorCache* _cache; - generic::MethodDescriptor* _method_desc; - - bool path_has_shadow() { - generic::Context ctx(_cache); - - for (int i = current_depth() - 1; i > 0; --i) { - InstanceKlass* ik = class_at_depth(i); - InstanceKlass* sub = class_at_depth(i + 1); - ctx.apply_type_arguments(sub, ik, THREAD); - - if (ik->is_interface()) { - int end; - int start = ik->find_method_by_name(_method_name, &end); - if (start != -1) { - for (int j = start; j < end; ++j) { - Method* mo = ik->methods()->at(j); - generic::MethodDescriptor* md = _cache->descriptor_for(mo, THREAD); - if (_method_desc->covariant_match(md, &ctx)) { - return true; - } - } - } - } - } - return false; - } - - public: - - GenericShadowChecker(generic::DescriptorCache* cache, Thread* thread, - Symbol* name, InstanceKlass* holder, generic::MethodDescriptor* desc, - InstanceKlass* target) - : ShadowChecker(thread, name, holder, target) { - _cache = cache; - _method_desc = desc; - } -}; - - - -// Find the unique qualified candidate from the perspective of the super_class -// which is the resolved_klass, which must be an immediate superinterface -// of klass -Method* find_erased_super_default(InstanceKlass* current_class, InstanceKlass* super_class, Symbol* method_name, Symbol* sig, TRAPS) { - - FindMethodsByErasedSig visitor(method_name, sig); - visitor.run(super_class); // find candidates from resolved_klass - - MethodFamily* family; - visitor.get_discovered_family(&family); - - if (family != NULL) { - family->determine_target(current_class, CHECK_NULL); // get target from current_class - } - - if (family->has_target()) { - Method* target = family->get_selected_target(); - InstanceKlass* holder = InstanceKlass::cast(target->method_holder()); - - // Verify that the identified method is valid from the context of - // the current class, which is the caller class for invokespecial - // link resolution, i.e. ensure there it is not shadowed. - // You can use invokespecial to disambiguate interface methods, but - // you can not use it to skip over an interface method that would shadow it. - ErasedShadowChecker checker(THREAD, target->name(), holder, super_class); - checker.run(current_class); - - if (checker.found_shadow()) { -#ifndef PRODUCT - if (TraceDefaultMethods) { - tty->print_cr(" Only candidate found was shadowed."); - } -#endif // ndef PRODUCT - THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(), - "Accessible default method not found", NULL); - } else { -#ifndef PRODUCT - if (TraceDefaultMethods) { - family->print_sig_on(tty, target->signature(), 1); - } -#endif // ndef PRODUCT - return target; - } - } else { - assert(family->throws_exception(), "must have target or throw"); - THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(), - family->get_exception_message()->as_C_string(), NULL); - } -} - -// super_class is assumed to be the direct super of current_class -Method* find_generic_super_default( InstanceKlass* current_class, - InstanceKlass* super_class, - Symbol* method_name, Symbol* sig, TRAPS) { - generic::DescriptorCache cache; - generic::Context ctx(&cache); - - // Prime the initial generic context for current -> super_class - ctx.apply_type_arguments(current_class, super_class, CHECK_NULL); - - FindMethodsByGenericSig visitor(&cache, method_name, &ctx, CHECK_NULL); - visitor.run(super_class); - - GrowableArray families; - visitor.get_discovered_families(&families); - -#ifndef PRODUCT - if (TraceDefaultMethods) { - print_generic_families(&families, sig); - } -#endif // ndef PRODUCT - - GenericMethodFamily* selected_family = NULL; - - for (int i = 0; i < families.length(); ++i) { - GenericMethodFamily* lm = families.at(i); - if (lm->contains_signature(sig)) { - lm->determine_target(current_class, CHECK_NULL); - selected_family = lm; - } - } - - if (selected_family->has_target()) { - Method* target = selected_family->get_selected_target(); - InstanceKlass* holder = InstanceKlass::cast(target->method_holder()); - - // Verify that the identified method is valid from the context of - // the current class - GenericShadowChecker checker(&cache, THREAD, target->name(), - holder, selected_family->descriptor(), super_class); - checker.run(current_class); - - if (checker.found_shadow()) { -#ifndef PRODUCT - if (TraceDefaultMethods) { - tty->print_cr(" Only candidate found was shadowed."); - } -#endif // ndef PRODUCT - THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(), - "Accessible default method not found", NULL); - } else { - return target; - } - } else { - assert(selected_family->throws_exception(), "must have target or throw"); - THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(), - selected_family->get_exception_message()->as_C_string(), NULL); - } -} - -// This is called during linktime when we find an invokespecial call that -// refers to a direct superinterface. It indicates that we should find the -// default method in the hierarchy of that superinterface, and if that method -// would have been a candidate from the point of view of 'this' class, then we -// return that method. -// This logic assumes that the super is a direct superclass of the caller -Method* DefaultMethods::find_super_default( - Klass* cls, Klass* super, Symbol* method_name, Symbol* sig, TRAPS) { - - ResourceMark rm(THREAD); - - assert(cls != NULL && super != NULL, "Need real classes"); - - InstanceKlass* current_class = InstanceKlass::cast(cls); - InstanceKlass* super_class = InstanceKlass::cast(super); - - // Keep entire hierarchy alive for the duration of the computation - KeepAliveRegistrar keepAlive(THREAD); - KeepAliveVisitor loadKeepAlive(&keepAlive); - loadKeepAlive.run(current_class); // get hierarchy from current class - -#ifndef PRODUCT - if (TraceDefaultMethods) { - tty->print_cr("Finding super default method %s.%s%s from %s", - super_class->name()->as_C_string(), - method_name->as_C_string(), sig->as_C_string(), - current_class->name()->as_C_string()); - } -#endif // ndef PRODUCT - - assert(super_class->is_interface(), "only call for default methods"); - - Method* target = NULL; - if (ParseGenericDefaults) { - target = find_generic_super_default(current_class, super_class, - method_name, sig, CHECK_NULL); - } else { - target = find_erased_super_default(current_class, super_class, - method_name, sig, CHECK_NULL); - } - -#ifndef PRODUCT - if (target != NULL) { - if (TraceDefaultMethods) { - tty->print(" Returning "); - print_method(tty, target, true); - tty->print_cr(""); - } - } -#endif // ndef PRODUCT - return target; -} #ifdef ASSERT // Return true is broad type is a covariant return of narrow type @@ -1327,7 +804,7 @@ } return false; } -#endif // def ASSERT +#endif static int assemble_redirect( BytecodeConstantPool* cp, BytecodeBuffer* buffer, @@ -1374,10 +851,9 @@ return parameter_count; } -static int assemble_abstract_method_error( - BytecodeConstantPool* cp, BytecodeBuffer* buffer, Symbol* message, TRAPS) { +static int assemble_method_error( + BytecodeConstantPool* cp, BytecodeBuffer* buffer, Symbol* errorName, Symbol* message, TRAPS) { - Symbol* errorName = vmSymbols::java_lang_AbstractMethodError(); Symbol* init = vmSymbols::object_initializer_name(); Symbol* sig = vmSymbols::string_void_signature(); @@ -1484,19 +960,22 @@ #endif // ndef PRODUCT if (method->has_target()) { Method* selected = method->get_selected_target(); - max_stack = assemble_redirect( + if (selected->method_holder()->is_interface()) { + max_stack = assemble_redirect( &bpool, &buffer, slot->signature(), selected, CHECK); + } } else if (method->throws_exception()) { - max_stack = assemble_abstract_method_error( - &bpool, &buffer, method->get_exception_message(), CHECK); + max_stack = assemble_method_error(&bpool, &buffer, method->get_exception_name(), method->get_exception_message(), CHECK); } - AccessFlags flags = accessFlags_from( + if (max_stack != 0) { + AccessFlags flags = accessFlags_from( JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE); - Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(), + Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(), flags, max_stack, slot->size_of_parameters(), ConstMethod::OVERPASS, CHECK); - if (m != NULL) { - overpasses.push(m); + if (m != NULL) { + overpasses.push(m); + } } } } @@ -1616,4 +1095,3 @@ MetadataFactory::free_array(cld, original_ordering); } } - diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/classfile/defaultMethods.hpp --- a/src/share/vm/classfile/defaultMethods.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/classfile/defaultMethods.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,15 +44,5 @@ // the class. static void generate_default_methods( InstanceKlass* klass, GrowableArray* mirandas, TRAPS); - - - // Called during linking when an invokespecial to an direct interface - // method is found. Selects and returns a method if there is a unique - // default method in the 'super_iface' part of the hierarchy which is - // also a candidate default for 'this_klass'. Otherwise throws an AME. - static Method* find_super_default( - Klass* this_klass, Klass* super_iface, - Symbol* method_name, Symbol* method_sig, TRAPS); }; - #endif // SHARE_VM_CLASSFILE_DEFAULTMETHODS_HPP diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/classfile/dictionary.hpp --- a/src/share/vm/classfile/dictionary.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/classfile/dictionary.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -264,7 +264,7 @@ } if (method_type() != NULL) { if (printed) st->print(" and "); - st->print(INTPTR_FORMAT, method_type()); + st->print(INTPTR_FORMAT, (void *)method_type()); printed = true; } st->print_cr(printed ? "" : "(empty)"); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/classfile/genericSignatures.cpp --- a/src/share/vm/classfile/genericSignatures.cpp Thu Oct 10 18:26:22 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1279 +0,0 @@ -/* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" - -#include "classfile/genericSignatures.hpp" -#include "classfile/symbolTable.hpp" -#include "classfile/systemDictionary.hpp" -#include "memory/resourceArea.hpp" - -namespace generic { - -// Helper class for parsing the generic signature Symbol in klass and methods -class DescriptorStream : public ResourceObj { - private: - Symbol* _symbol; - int _offset; - int _mark; - const char* _parse_error; - - void set_parse_error(const char* error) { - assert(error != NULL, "Can't set NULL error string"); - _parse_error = error; - } - - public: - DescriptorStream(Symbol* sym) - : _symbol(sym), _offset(0), _mark(-1), _parse_error(NULL) {} - - const char* parse_error() const { - return _parse_error; - } - - bool at_end() { return _offset >= _symbol->utf8_length(); } - - char peek() { - if (at_end()) { - set_parse_error("Peeking past end of signature"); - return '\0'; - } else { - return _symbol->byte_at(_offset); - } - } - - char read() { - if (at_end()) { - set_parse_error("Reading past end of signature"); - return '\0'; - } else { - return _symbol->byte_at(_offset++); - } - } - - void read(char expected) { - char c = read(); - assert_char(c, expected, 0); - } - - void assert_char(char c, char expected, int pos = -1) { - if (c != expected) { - const char* fmt = "Parse error at %d: expected %c but got %c"; - size_t len = strlen(fmt) + 5; - char* buffer = NEW_RESOURCE_ARRAY(char, len); - jio_snprintf(buffer, len, fmt, _offset + pos, expected, c); - set_parse_error(buffer); - } - } - - void push(char c) { - assert(c == _symbol->byte_at(_offset - 1), "Pushing back wrong value"); - --_offset; - } - - void expect_end() { - if (!at_end()) { - set_parse_error("Unexpected data trailing signature"); - } - } - - bool has_mark() { return _mark != -1; } - - void set_mark() { - _mark = _offset; - } - - Identifier* identifier_from_mark() { - assert(has_mark(), "Mark should be set"); - if (!has_mark()) { - set_parse_error("Expected mark to be set"); - return NULL; - } else { - Identifier* id = new Identifier(_symbol, _mark, _offset - 1); - _mark = -1; - return id; - } - } -}; - - -#define CHECK_FOR_PARSE_ERROR() \ - if (STREAM->parse_error() != NULL) { \ - if (VerifyGenericSignatures) { \ - fatal(STREAM->parse_error()); \ - } \ - return NULL; \ - } (void)0 - -#define READ() STREAM->read(); CHECK_FOR_PARSE_ERROR() -#define PEEK() STREAM->peek(); CHECK_FOR_PARSE_ERROR() -#define PUSH(c) STREAM->push(c) -#define EXPECT(c) STREAM->read(c); CHECK_FOR_PARSE_ERROR() -#define EXPECTED(c, ch) STREAM->assert_char(c, ch); CHECK_FOR_PARSE_ERROR() -#define EXPECT_END() STREAM->expect_end(); CHECK_FOR_PARSE_ERROR() - -#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); ((void)0 - -#ifndef PRODUCT -void Identifier::print_on(outputStream* str) const { - for (int i = _begin; i < _end; ++i) { - str->print("%c", (char)_sym->byte_at(i)); - } -} -#endif // ndef PRODUCT - -bool Identifier::equals(Identifier* other) { - if (_sym == other->_sym && _begin == other->_begin && _end == other->_end) { - return true; - } else if (_end - _begin != other->_end - other->_begin) { - return false; - } else { - size_t len = _end - _begin; - char* addr = ((char*)_sym->bytes()) + _begin; - char* oaddr = ((char*)other->_sym->bytes()) + other->_begin; - return strncmp(addr, oaddr, len) == 0; - } -} - -bool Identifier::equals(Symbol* sym) { - Identifier id(sym, 0, sym->utf8_length()); - return equals(&id); -} - -/** - * A formal type parameter may be found in the the enclosing class, but it could - * also come from an enclosing method or outer class, in the case of inner-outer - * classes or anonymous classes. For example: - * - * class Outer { - * class Inner { - * void m(T t, V v, W w); - * } - * } - * - * In this case, the type variables in m()'s signature are not all found in the - * immediate enclosing class (Inner). class Inner has only type parameter W, - * but it's outer_class field will reference Outer's descriptor which contains - * T & V (no outer_method in this case). - * - * If you have an anonymous class, it has both an enclosing method *and* an - * enclosing class where type parameters can be declared: - * - * class MOuter { - * void bar(V v) { - * Runnable r = new Runnable() { - * public void run() {} - * public void foo(T t, V v) { ... } - * }; - * } - * } - * - * In this case, foo will be a member of some class, Runnable$1, which has no - * formal parameters itself, but has an outer_method (bar()) which provides - * type parameter V, and an outer class MOuter with type parameter T. - * - * It is also possible that the outer class is itself an inner class to some - * other class (or an anonymous class with an enclosing method), so we need to - * follow the outer_class/outer_method chain to it's end when looking for a - * type parameter. - */ -TypeParameter* Descriptor::find_type_parameter(Identifier* id, int* depth) { - - int current_depth = 0; - - MethodDescriptor* outer_method = as_method_signature(); - ClassDescriptor* outer_class = as_class_signature(); - - if (outer_class == NULL) { // 'this' is a method signature; use the holder - outer_class = outer_method->outer_class(); - } - - while (outer_method != NULL || outer_class != NULL) { - if (outer_method != NULL) { - for (int i = 0; i < outer_method->type_parameters().length(); ++i) { - TypeParameter* p = outer_method->type_parameters().at(i); - if (p->identifier()->equals(id)) { - *depth = -1; // indicates this this is a method parameter - return p; - } - } - } - if (outer_class != NULL) { - for (int i = 0; i < outer_class->type_parameters().length(); ++i) { - TypeParameter* p = outer_class->type_parameters().at(i); - if (p->identifier()->equals(id)) { - *depth = current_depth; - return p; - } - } - outer_method = outer_class->outer_method(); - outer_class = outer_class->outer_class(); - ++current_depth; - } - } - - if (VerifyGenericSignatures) { - fatal("Could not resolve identifier"); - } - - return NULL; -} - -ClassDescriptor* ClassDescriptor::parse_generic_signature(Klass* klass, TRAPS) { - return parse_generic_signature(klass, NULL, CHECK_NULL); -} - -ClassDescriptor* ClassDescriptor::parse_generic_signature( - Klass* klass, Symbol* original_name, TRAPS) { - - InstanceKlass* ik = InstanceKlass::cast(klass); - Symbol* sym = ik->generic_signature(); - - ClassDescriptor* spec; - - if (sym == NULL || (spec = ClassDescriptor::parse_generic_signature(sym)) == NULL) { - spec = ClassDescriptor::placeholder(ik); - } - - u2 outer_index = get_outer_class_index(ik, CHECK_NULL); - if (outer_index != 0) { - if (original_name == NULL) { - original_name = ik->name(); - } - Handle class_loader = Handle(THREAD, ik->class_loader()); - Handle protection_domain = Handle(THREAD, ik->protection_domain()); - - Symbol* outer_name = ik->constants()->klass_name_at(outer_index); - Klass* outer = SystemDictionary::find( - outer_name, class_loader, protection_domain, CHECK_NULL); - if (outer == NULL && !THREAD->is_Compiler_thread()) { - if (outer_name == ik->super()->name()) { - outer = SystemDictionary::resolve_super_or_fail(original_name, outer_name, - class_loader, protection_domain, - false, CHECK_NULL); - } - else { - outer = SystemDictionary::resolve_or_fail(outer_name, class_loader, - protection_domain, false, CHECK_NULL); - } - } - - InstanceKlass* outer_ik; - ClassDescriptor* outer_spec = NULL; - if (outer == NULL) { - outer_spec = ClassDescriptor::placeholder(ik); - assert(false, "Outer class not loaded and not loadable from here"); - } else { - outer_ik = InstanceKlass::cast(outer); - outer_spec = parse_generic_signature(outer, original_name, CHECK_NULL); - } - spec->set_outer_class(outer_spec); - - u2 encl_method_idx = ik->enclosing_method_method_index(); - if (encl_method_idx != 0 && outer_ik != NULL) { - ConstantPool* cp = ik->constants(); - u2 name_index = cp->name_ref_index_at(encl_method_idx); - u2 sig_index = cp->signature_ref_index_at(encl_method_idx); - Symbol* name = cp->symbol_at(name_index); - Symbol* sig = cp->symbol_at(sig_index); - Method* m = outer_ik->find_method(name, sig); - if (m != NULL) { - Symbol* gsig = m->generic_signature(); - if (gsig != NULL) { - MethodDescriptor* gms = MethodDescriptor::parse_generic_signature(gsig, outer_spec); - spec->set_outer_method(gms); - } - } else if (VerifyGenericSignatures) { - ResourceMark rm; - stringStream ss; - ss.print("Could not find method %s %s in class %s", - name->as_C_string(), sig->as_C_string(), outer_name->as_C_string()); - fatal(ss.as_string()); - } - } - } - - spec->bind_variables_to_parameters(); - return spec; -} - -ClassDescriptor* ClassDescriptor::placeholder(InstanceKlass* klass) { - GrowableArray formals; - GrowableArray interfaces; - ClassType* super_type = NULL; - - Klass* super_klass = klass->super(); - if (super_klass != NULL) { - InstanceKlass* super = InstanceKlass::cast(super_klass); - super_type = ClassType::from_symbol(super->name()); - } - - for (int i = 0; i < klass->local_interfaces()->length(); ++i) { - InstanceKlass* iface = InstanceKlass::cast(klass->local_interfaces()->at(i)); - interfaces.append(ClassType::from_symbol(iface->name())); - } - return new ClassDescriptor(formals, super_type, interfaces); -} - -ClassDescriptor* ClassDescriptor::parse_generic_signature(Symbol* sym) { - - DescriptorStream ds(sym); - DescriptorStream* STREAM = &ds; - - GrowableArray parameters(8); - char c = READ(); - if (c == '<') { - c = READ(); - while (c != '>') { - PUSH(c); - TypeParameter* ftp = TypeParameter::parse_generic_signature(CHECK_STREAM); - parameters.append(ftp); - c = READ(); - } - } else { - PUSH(c); - } - - EXPECT('L'); - ClassType* super = ClassType::parse_generic_signature(CHECK_STREAM); - - GrowableArray signatures(2); - while (!STREAM->at_end()) { - EXPECT('L'); - ClassType* iface = ClassType::parse_generic_signature(CHECK_STREAM); - signatures.append(iface); - } - - EXPECT_END(); - - return new ClassDescriptor(parameters, super, signatures); -} - -#ifndef PRODUCT -void ClassDescriptor::print_on(outputStream* str) const { - str->indent().print_cr("ClassDescriptor {"); - { - streamIndentor si(str); - if (_type_parameters.length() > 0) { - str->indent().print_cr("Formals {"); - { - streamIndentor si(str); - for (int i = 0; i < _type_parameters.length(); ++i) { - _type_parameters.at(i)->print_on(str); - } - } - str->indent().print_cr("}"); - } - if (_super != NULL) { - str->indent().print_cr("Superclass: "); - { - streamIndentor si(str); - _super->print_on(str); - } - } - if (_interfaces.length() > 0) { - str->indent().print_cr("SuperInterfaces: {"); - { - streamIndentor si(str); - for (int i = 0; i < _interfaces.length(); ++i) { - _interfaces.at(i)->print_on(str); - } - } - str->indent().print_cr("}"); - } - if (_outer_method != NULL) { - str->indent().print_cr("Outer Method: {"); - { - streamIndentor si(str); - _outer_method->print_on(str); - } - str->indent().print_cr("}"); - } - if (_outer_class != NULL) { - str->indent().print_cr("Outer Class: {"); - { - streamIndentor si(str); - _outer_class->print_on(str); - } - str->indent().print_cr("}"); - } - } - str->indent().print_cr("}"); -} -#endif // ndef PRODUCT - -ClassType* ClassDescriptor::interface_desc(Symbol* sym) { - for (int i = 0; i < _interfaces.length(); ++i) { - if (_interfaces.at(i)->identifier()->equals(sym)) { - return _interfaces.at(i); - } - } - if (VerifyGenericSignatures) { - fatal("Did not find expected interface"); - } - return NULL; -} - -void ClassDescriptor::bind_variables_to_parameters() { - if (_outer_class != NULL) { - _outer_class->bind_variables_to_parameters(); - } - if (_outer_method != NULL) { - _outer_method->bind_variables_to_parameters(); - } - for (int i = 0; i < _type_parameters.length(); ++i) { - _type_parameters.at(i)->bind_variables_to_parameters(this, i); - } - if (_super != NULL) { - _super->bind_variables_to_parameters(this); - } - for (int i = 0; i < _interfaces.length(); ++i) { - _interfaces.at(i)->bind_variables_to_parameters(this); - } -} - -ClassDescriptor* ClassDescriptor::canonicalize(Context* ctx) { - - GrowableArray type_params(_type_parameters.length()); - for (int i = 0; i < _type_parameters.length(); ++i) { - type_params.append(_type_parameters.at(i)->canonicalize(ctx, 0)); - } - - ClassDescriptor* outer = _outer_class == NULL ? NULL : - _outer_class->canonicalize(ctx); - - ClassType* super = _super == NULL ? NULL : _super->canonicalize(ctx, 0); - - GrowableArray interfaces(_interfaces.length()); - for (int i = 0; i < _interfaces.length(); ++i) { - interfaces.append(_interfaces.at(i)->canonicalize(ctx, 0)); - } - - MethodDescriptor* md = _outer_method == NULL ? NULL : - _outer_method->canonicalize(ctx); - - return new ClassDescriptor(type_params, super, interfaces, outer, md); -} - -u2 ClassDescriptor::get_outer_class_index(InstanceKlass* klass, TRAPS) { - int inner_index = InstanceKlass::inner_class_inner_class_info_offset; - int outer_index = InstanceKlass::inner_class_outer_class_info_offset; - int name_offset = InstanceKlass::inner_class_inner_name_offset; - int next_offset = InstanceKlass::inner_class_next_offset; - - if (klass->inner_classes() == NULL || klass->inner_classes()->length() == 0) { - // No inner class info => no declaring class - return 0; - } - - Array* i_icls = klass->inner_classes(); - ConstantPool* i_cp = klass->constants(); - int i_length = i_icls->length(); - - // Find inner_klass attribute - for (int i = 0; i + next_offset < i_length; i += next_offset) { - u2 ioff = i_icls->at(i + inner_index); - u2 ooff = i_icls->at(i + outer_index); - u2 noff = i_icls->at(i + name_offset); - if (ioff != 0) { - // Check to see if the name matches the class we're looking for - // before attempting to find the class. - if (i_cp->klass_name_at_matches(klass, ioff) && ooff != 0) { - return ooff; - } - } - } - - // It may be anonymous; try for that. - u2 encl_method_class_idx = klass->enclosing_method_class_index(); - if (encl_method_class_idx != 0) { - return encl_method_class_idx; - } - - return 0; -} - -MethodDescriptor* MethodDescriptor::parse_generic_signature(Method* m, ClassDescriptor* outer) { - Symbol* generic_sig = m->generic_signature(); - MethodDescriptor* md = NULL; - if (generic_sig == NULL || (md = parse_generic_signature(generic_sig, outer)) == NULL) { - md = parse_generic_signature(m->signature(), outer); - } - assert(md != NULL, "Could not parse method signature"); - md->bind_variables_to_parameters(); - return md; -} - -MethodDescriptor* MethodDescriptor::parse_generic_signature(Symbol* sym, ClassDescriptor* outer) { - - DescriptorStream ds(sym); - DescriptorStream* STREAM = &ds; - - GrowableArray params(8); - char c = READ(); - if (c == '<') { - c = READ(); - while (c != '>') { - PUSH(c); - TypeParameter* ftp = TypeParameter::parse_generic_signature(CHECK_STREAM); - params.append(ftp); - c = READ(); - } - } else { - PUSH(c); - } - - EXPECT('('); - - GrowableArray parameters(8); - c = READ(); - while (c != ')') { - PUSH(c); - Type* arg = Type::parse_generic_signature(CHECK_STREAM); - parameters.append(arg); - c = READ(); - } - - Type* rt = Type::parse_generic_signature(CHECK_STREAM); - - GrowableArray throws; - while (!STREAM->at_end()) { - EXPECT('^'); - Type* spec = Type::parse_generic_signature(CHECK_STREAM); - throws.append(spec); - } - - return new MethodDescriptor(params, outer, parameters, rt, throws); -} - -void MethodDescriptor::bind_variables_to_parameters() { - for (int i = 0; i < _type_parameters.length(); ++i) { - _type_parameters.at(i)->bind_variables_to_parameters(this, i); - } - for (int i = 0; i < _parameters.length(); ++i) { - _parameters.at(i)->bind_variables_to_parameters(this); - } - _return_type->bind_variables_to_parameters(this); - for (int i = 0; i < _throws.length(); ++i) { - _throws.at(i)->bind_variables_to_parameters(this); - } -} - -bool MethodDescriptor::covariant_match(MethodDescriptor* other, Context* ctx) { - - if (_parameters.length() == other->_parameters.length()) { - for (int i = 0; i < _parameters.length(); ++i) { - if (!_parameters.at(i)->covariant_match(other->_parameters.at(i), ctx)) { - return false; - } - } - - if (_return_type->as_primitive() != NULL) { - return _return_type->covariant_match(other->_return_type, ctx); - } else { - // return type is a reference - return other->_return_type->as_class() != NULL || - other->_return_type->as_variable() != NULL || - other->_return_type->as_array() != NULL; - } - } else { - return false; - } -} - -MethodDescriptor* MethodDescriptor::canonicalize(Context* ctx) { - - GrowableArray type_params(_type_parameters.length()); - for (int i = 0; i < _type_parameters.length(); ++i) { - type_params.append(_type_parameters.at(i)->canonicalize(ctx, 0)); - } - - ClassDescriptor* outer = _outer_class == NULL ? NULL : - _outer_class->canonicalize(ctx); - - GrowableArray params(_parameters.length()); - for (int i = 0; i < _parameters.length(); ++i) { - params.append(_parameters.at(i)->canonicalize(ctx, 0)); - } - - Type* rt = _return_type->canonicalize(ctx, 0); - - GrowableArray throws(_throws.length()); - for (int i = 0; i < _throws.length(); ++i) { - throws.append(_throws.at(i)->canonicalize(ctx, 0)); - } - - return new MethodDescriptor(type_params, outer, params, rt, throws); -} - -#ifndef PRODUCT -TempNewSymbol MethodDescriptor::reify_signature(Context* ctx, TRAPS) { - stringStream ss(256); - - ss.print("("); - for (int i = 0; i < _parameters.length(); ++i) { - _parameters.at(i)->reify_signature(&ss, ctx); - } - ss.print(")"); - _return_type->reify_signature(&ss, ctx); - return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD); -} - -void MethodDescriptor::print_on(outputStream* str) const { - str->indent().print_cr("MethodDescriptor {"); - { - streamIndentor si(str); - if (_type_parameters.length() > 0) { - str->indent().print_cr("Formals: {"); - { - streamIndentor si(str); - for (int i = 0; i < _type_parameters.length(); ++i) { - _type_parameters.at(i)->print_on(str); - } - } - str->indent().print_cr("}"); - } - str->indent().print_cr("Parameters: {"); - { - streamIndentor si(str); - for (int i = 0; i < _parameters.length(); ++i) { - _parameters.at(i)->print_on(str); - } - } - str->indent().print_cr("}"); - str->indent().print_cr("Return Type: "); - { - streamIndentor si(str); - _return_type->print_on(str); - } - - if (_throws.length() > 0) { - str->indent().print_cr("Throws: {"); - { - streamIndentor si(str); - for (int i = 0; i < _throws.length(); ++i) { - _throws.at(i)->print_on(str); - } - } - str->indent().print_cr("}"); - } - } - str->indent().print_cr("}"); -} -#endif // ndef PRODUCT - -TypeParameter* TypeParameter::parse_generic_signature(DescriptorStream* STREAM) { - STREAM->set_mark(); - char c = READ(); - while (c != ':') { - c = READ(); - } - - Identifier* id = STREAM->identifier_from_mark(); - - ClassType* class_bound = NULL; - GrowableArray interface_bounds(8); - - c = READ(); - if (c != '>') { - if (c != ':') { - EXPECTED(c, 'L'); - class_bound = ClassType::parse_generic_signature(CHECK_STREAM); - c = READ(); - } - - while (c == ':') { - EXPECT('L'); - ClassType* fts = ClassType::parse_generic_signature(CHECK_STREAM); - interface_bounds.append(fts); - c = READ(); - } - } - PUSH(c); - - return new TypeParameter(id, class_bound, interface_bounds); -} - -void TypeParameter::bind_variables_to_parameters(Descriptor* sig, int position) { - if (_class_bound != NULL) { - _class_bound->bind_variables_to_parameters(sig); - } - for (int i = 0; i < _interface_bounds.length(); ++i) { - _interface_bounds.at(i)->bind_variables_to_parameters(sig); - } - _position = position; -} - -Type* TypeParameter::resolve( - Context* ctx, int inner_depth, int ctx_depth) { - - if (inner_depth == -1) { - // This indicates that the parameter is a method type parameter, which - // isn't resolveable using the class hierarchy context - return bound(); - } - - ClassType* provider = ctx->at_depth(ctx_depth); - if (provider != NULL) { - for (int i = 0; i < inner_depth && provider != NULL; ++i) { - provider = provider->outer_class(); - } - if (provider != NULL) { - TypeArgument* arg = provider->type_argument_at(_position); - if (arg != NULL) { - Type* value = arg->lower_bound(); - return value->canonicalize(ctx, ctx_depth + 1); - } - } - } - - return bound(); -} - -TypeParameter* TypeParameter::canonicalize(Context* ctx, int ctx_depth) { - ClassType* bound = _class_bound == NULL ? NULL : - _class_bound->canonicalize(ctx, ctx_depth); - - GrowableArray ifaces(_interface_bounds.length()); - for (int i = 0; i < _interface_bounds.length(); ++i) { - ifaces.append(_interface_bounds.at(i)->canonicalize(ctx, ctx_depth)); - } - - TypeParameter* ret = new TypeParameter(_identifier, bound, ifaces); - ret->_position = _position; - return ret; -} - -ClassType* TypeParameter::bound() { - if (_class_bound != NULL) { - return _class_bound; - } - - if (_interface_bounds.length() == 1) { - return _interface_bounds.at(0); - } - - return ClassType::java_lang_Object(); // TODO: investigate this case -} - -#ifndef PRODUCT -void TypeParameter::print_on(outputStream* str) const { - str->indent().print_cr("Formal: {"); - { - streamIndentor si(str); - - str->indent().print("Identifier: "); - _identifier->print_on(str); - str->print_cr(""); - if (_class_bound != NULL) { - str->indent().print_cr("Class Bound: "); - streamIndentor si(str); - _class_bound->print_on(str); - } - if (_interface_bounds.length() > 0) { - str->indent().print_cr("Interface Bounds: {"); - { - streamIndentor si(str); - for (int i = 0; i < _interface_bounds.length(); ++i) { - _interface_bounds.at(i)->print_on(str); - } - } - str->indent().print_cr("}"); - } - str->indent().print_cr("Ordinal Position: %d", _position); - } - str->indent().print_cr("}"); -} -#endif // ndef PRODUCT - -Type* Type::parse_generic_signature(DescriptorStream* STREAM) { - char c = READ(); - switch (c) { - case 'L': - return ClassType::parse_generic_signature(CHECK_STREAM); - case 'T': - return TypeVariable::parse_generic_signature(CHECK_STREAM); - case '[': - return ArrayType::parse_generic_signature(CHECK_STREAM); - default: - return new PrimitiveType(c); - } -} - -Identifier* ClassType::parse_generic_signature_simple(GrowableArray* args, - bool* has_inner, DescriptorStream* STREAM) { - STREAM->set_mark(); - - char c = READ(); - while (c != ';' && c != '.' && c != '<') { c = READ(); } - Identifier* id = STREAM->identifier_from_mark(); - - if (c == '<') { - c = READ(); - while (c != '>') { - PUSH(c); - TypeArgument* arg = TypeArgument::parse_generic_signature(CHECK_STREAM); - args->append(arg); - c = READ(); - } - c = READ(); - } - - *has_inner = (c == '.'); - if (!(*has_inner)) { - EXPECTED(c, ';'); - } - - return id; -} - -ClassType* ClassType::parse_generic_signature(DescriptorStream* STREAM) { - return parse_generic_signature(NULL, CHECK_STREAM); -} - -ClassType* ClassType::parse_generic_signature(ClassType* outer, DescriptorStream* STREAM) { - GrowableArray args; - ClassType* gct = NULL; - bool has_inner = false; - - Identifier* id = parse_generic_signature_simple(&args, &has_inner, STREAM); - if (id != NULL) { - gct = new ClassType(id, args, outer); - - if (has_inner) { - gct = parse_generic_signature(gct, CHECK_STREAM); - } - } - return gct; -} - -ClassType* ClassType::from_symbol(Symbol* sym) { - assert(sym != NULL, "Must not be null"); - GrowableArray args; - Identifier* id = new Identifier(sym, 0, sym->utf8_length()); - return new ClassType(id, args, NULL); -} - -ClassType* ClassType::java_lang_Object() { - return from_symbol(vmSymbols::java_lang_Object()); -} - -void ClassType::bind_variables_to_parameters(Descriptor* sig) { - for (int i = 0; i < _type_arguments.length(); ++i) { - _type_arguments.at(i)->bind_variables_to_parameters(sig); - } - if (_outer_class != NULL) { - _outer_class->bind_variables_to_parameters(sig); - } -} - -TypeArgument* ClassType::type_argument_at(int i) { - if (i >= 0 && i < _type_arguments.length()) { - return _type_arguments.at(i); - } else { - return NULL; - } -} - -#ifndef PRODUCT -void ClassType::reify_signature(stringStream* ss, Context* ctx) { - ss->print("L"); - _identifier->print_on(ss); - ss->print(";"); -} - -void ClassType::print_on(outputStream* str) const { - str->indent().print_cr("Class {"); - { - streamIndentor si(str); - str->indent().print("Name: "); - _identifier->print_on(str); - str->print_cr(""); - if (_type_arguments.length() != 0) { - str->indent().print_cr("Type Arguments: {"); - { - streamIndentor si(str); - for (int j = 0; j < _type_arguments.length(); ++j) { - _type_arguments.at(j)->print_on(str); - } - } - str->indent().print_cr("}"); - } - if (_outer_class != NULL) { - str->indent().print_cr("Outer Class: "); - streamIndentor sir(str); - _outer_class->print_on(str); - } - } - str->indent().print_cr("}"); -} -#endif // ndef PRODUCT - -bool ClassType::covariant_match(Type* other, Context* ctx) { - - if (other == this) { - return true; - } - - TypeVariable* variable = other->as_variable(); - if (variable != NULL) { - other = variable->resolve(ctx, 0); - } - - ClassType* outer = outer_class(); - ClassType* other_class = other->as_class(); - - if (other_class == NULL || - (outer == NULL) != (other_class->outer_class() == NULL)) { - return false; - } - - if (!_identifier->equals(other_class->_identifier)) { - return false; - } - - if (outer != NULL && !outer->covariant_match(other_class->outer_class(), ctx)) { - return false; - } - - return true; -} - -ClassType* ClassType::canonicalize(Context* ctx, int ctx_depth) { - - GrowableArray args(_type_arguments.length()); - for (int i = 0; i < _type_arguments.length(); ++i) { - args.append(_type_arguments.at(i)->canonicalize(ctx, ctx_depth)); - } - - ClassType* outer = _outer_class == NULL ? NULL : - _outer_class->canonicalize(ctx, ctx_depth); - - return new ClassType(_identifier, args, outer); -} - -TypeVariable* TypeVariable::parse_generic_signature(DescriptorStream* STREAM) { - STREAM->set_mark(); - char c = READ(); - while (c != ';') { - c = READ(); - } - Identifier* id = STREAM->identifier_from_mark(); - - return new TypeVariable(id); -} - -void TypeVariable::bind_variables_to_parameters(Descriptor* sig) { - _parameter = sig->find_type_parameter(_id, &_inner_depth); - if (VerifyGenericSignatures && _parameter == NULL) { - fatal("Could not find formal parameter"); - } -} - -Type* TypeVariable::resolve(Context* ctx, int ctx_depth) { - if (parameter() != NULL) { - return parameter()->resolve(ctx, inner_depth(), ctx_depth); - } else { - if (VerifyGenericSignatures) { - fatal("Type variable matches no parameter"); - } - return NULL; - } -} - -bool TypeVariable::covariant_match(Type* other, Context* ctx) { - - if (other == this) { - return true; - } - - Context my_context(NULL); // empty, results in erasure - Type* my_type = resolve(&my_context, 0); - if (my_type == NULL) { - return false; - } - - return my_type->covariant_match(other, ctx); -} - -Type* TypeVariable::canonicalize(Context* ctx, int ctx_depth) { - return resolve(ctx, ctx_depth); -} - -#ifndef PRODUCT -void TypeVariable::reify_signature(stringStream* ss, Context* ctx) { - Type* type = resolve(ctx, 0); - if (type != NULL) { - type->reify_signature(ss, ctx); - } -} - -void TypeVariable::print_on(outputStream* str) const { - str->indent().print_cr("Type Variable {"); - { - streamIndentor si(str); - str->indent().print("Name: "); - _id->print_on(str); - str->print_cr(""); - str->indent().print_cr("Inner depth: %d", _inner_depth); - } - str->indent().print_cr("}"); -} -#endif // ndef PRODUCT - -ArrayType* ArrayType::parse_generic_signature(DescriptorStream* STREAM) { - Type* base = Type::parse_generic_signature(CHECK_STREAM); - return new ArrayType(base); -} - -void ArrayType::bind_variables_to_parameters(Descriptor* sig) { - assert(_base != NULL, "Invalid base"); - _base->bind_variables_to_parameters(sig); -} - -bool ArrayType::covariant_match(Type* other, Context* ctx) { - assert(_base != NULL, "Invalid base"); - - if (other == this) { - return true; - } - - ArrayType* other_array = other->as_array(); - return (other_array != NULL && _base->covariant_match(other_array->_base, ctx)); -} - -ArrayType* ArrayType::canonicalize(Context* ctx, int ctx_depth) { - assert(_base != NULL, "Invalid base"); - return new ArrayType(_base->canonicalize(ctx, ctx_depth)); -} - -#ifndef PRODUCT -void ArrayType::reify_signature(stringStream* ss, Context* ctx) { - assert(_base != NULL, "Invalid base"); - ss->print("["); - _base->reify_signature(ss, ctx); -} - -void ArrayType::print_on(outputStream* str) const { - str->indent().print_cr("Array {"); - { - streamIndentor si(str); - _base->print_on(str); - } - str->indent().print_cr("}"); -} -#endif // ndef PRODUCT - -bool PrimitiveType::covariant_match(Type* other, Context* ctx) { - - PrimitiveType* other_prim = other->as_primitive(); - return (other_prim != NULL && _type == other_prim->_type); -} - -PrimitiveType* PrimitiveType::canonicalize(Context* ctx, int ctxd) { - return this; -} - -#ifndef PRODUCT -void PrimitiveType::reify_signature(stringStream* ss, Context* ctx) { - ss->print("%c", _type); -} - -void PrimitiveType::print_on(outputStream* str) const { - str->indent().print_cr("Primitive: '%c'", _type); -} -#endif // ndef PRODUCT - -void PrimitiveType::bind_variables_to_parameters(Descriptor* sig) { -} - -TypeArgument* TypeArgument::parse_generic_signature(DescriptorStream* STREAM) { - char c = READ(); - Type* type = NULL; - - switch (c) { - case '*': - return new TypeArgument(ClassType::java_lang_Object(), NULL); - break; - default: - PUSH(c); - // fall-through - case '+': - case '-': - type = Type::parse_generic_signature(CHECK_STREAM); - if (c == '+') { - return new TypeArgument(type, NULL); - } else if (c == '-') { - return new TypeArgument(ClassType::java_lang_Object(), type); - } else { - return new TypeArgument(type, type); - } - } -} - -void TypeArgument::bind_variables_to_parameters(Descriptor* sig) { - assert(_lower_bound != NULL, "Invalid lower bound"); - _lower_bound->bind_variables_to_parameters(sig); - if (_upper_bound != NULL && _upper_bound != _lower_bound) { - _upper_bound->bind_variables_to_parameters(sig); - } -} - -bool TypeArgument::covariant_match(TypeArgument* other, Context* ctx) { - assert(_lower_bound != NULL, "Invalid lower bound"); - - if (other == this) { - return true; - } - - if (!_lower_bound->covariant_match(other->lower_bound(), ctx)) { - return false; - } - return true; -} - -TypeArgument* TypeArgument::canonicalize(Context* ctx, int ctx_depth) { - assert(_lower_bound != NULL, "Invalid lower bound"); - Type* lower = _lower_bound->canonicalize(ctx, ctx_depth); - Type* upper = NULL; - - if (_upper_bound == _lower_bound) { - upper = lower; - } else if (_upper_bound != NULL) { - upper = _upper_bound->canonicalize(ctx, ctx_depth); - } - - return new TypeArgument(lower, upper); -} - -#ifndef PRODUCT -void TypeArgument::print_on(outputStream* str) const { - str->indent().print_cr("TypeArgument {"); - { - streamIndentor si(str); - if (_lower_bound != NULL) { - str->indent().print("Lower bound: "); - _lower_bound->print_on(str); - } - if (_upper_bound != NULL) { - str->indent().print("Upper bound: "); - _upper_bound->print_on(str); - } - } - str->indent().print_cr("}"); -} -#endif // ndef PRODUCT - -void Context::Mark::destroy() { - if (is_active()) { - _context->reset_to_mark(_marked_size); - } - deactivate(); -} - -void Context::apply_type_arguments( - InstanceKlass* current, InstanceKlass* super, TRAPS) { - assert(_cache != NULL, "Cannot use an empty context"); - ClassType* spec = NULL; - if (current != NULL) { - ClassDescriptor* descriptor = _cache->descriptor_for(current, CHECK); - if (super == current->super()) { - spec = descriptor->super(); - } else { - spec = descriptor->interface_desc(super->name()); - } - if (spec != NULL) { - _type_arguments.push(spec); - } - } -} - -void Context::reset_to_mark(int size) { - _type_arguments.trunc_to(size); -} - -ClassType* Context::at_depth(int i) const { - if (i < _type_arguments.length()) { - return _type_arguments.at(_type_arguments.length() - 1 - i); - } - return NULL; -} - -#ifndef PRODUCT -void Context::print_on(outputStream* str) const { - str->indent().print_cr("Context {"); - for (int i = 0; i < _type_arguments.length(); ++i) { - streamIndentor si(str); - str->indent().print("leval %d: ", i); - ClassType* ct = at_depth(i); - if (ct == NULL) { - str->print_cr(""); - continue; - } else { - str->print_cr("{"); - } - - for (int j = 0; j < ct->type_arguments_length(); ++j) { - streamIndentor si(str); - TypeArgument* ta = ct->type_argument_at(j); - Type* bound = ta->lower_bound(); - bound->print_on(str); - } - str->indent().print_cr("}"); - } - str->indent().print_cr("}"); -} -#endif // ndef PRODUCT - -ClassDescriptor* DescriptorCache::descriptor_for(InstanceKlass* ik, TRAPS) { - - ClassDescriptor** existing = _class_descriptors.get(ik); - if (existing == NULL) { - ClassDescriptor* cd = ClassDescriptor::parse_generic_signature(ik, CHECK_NULL); - _class_descriptors.put(ik, cd); - return cd; - } else { - return *existing; - } -} - -MethodDescriptor* DescriptorCache::descriptor_for( - Method* mh, ClassDescriptor* cd, TRAPS) { - assert(mh != NULL && cd != NULL, "Should not be NULL"); - MethodDescriptor** existing = _method_descriptors.get(mh); - if (existing == NULL) { - MethodDescriptor* md = MethodDescriptor::parse_generic_signature(mh, cd); - _method_descriptors.put(mh, md); - return md; - } else { - return *existing; - } -} -MethodDescriptor* DescriptorCache::descriptor_for(Method* mh, TRAPS) { - ClassDescriptor* cd = descriptor_for( - InstanceKlass::cast(mh->method_holder()), CHECK_NULL); - return descriptor_for(mh, cd, THREAD); -} - -} // namespace generic diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/classfile/genericSignatures.hpp --- a/src/share/vm/classfile/genericSignatures.hpp Thu Oct 10 18:26:22 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,467 +0,0 @@ -/* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP -#define SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP - -#include "classfile/symbolTable.hpp" -#include "memory/allocation.hpp" -#include "runtime/signature.hpp" -#include "utilities/growableArray.hpp" -#include "utilities/resourceHash.hpp" - -class stringStream; - -namespace generic { - -class Identifier; -class ClassDescriptor; -class MethodDescriptor; - -class TypeParameter; // a formal type parameter declared in generic signatures -class TypeArgument; // The "type value" passed to fill parameters in supertypes -class TypeVariable; // A usage of a type parameter as a value -/** - * Example: - * - * class Foo extends Bar { int m(V v) {} } - * ^^^^^^ ^^^^^^ ^^ - * type parameters type argument type variable - * - * Note that a type variable could be passed as an argument too: - * class Foo extends Bar { int m(V v) {} } - * ^^^ - * type argument's value is a type variable - */ - - -class Type; -class ClassType; -class ArrayType; -class PrimitiveType; -class Context; -class DescriptorCache; - -class DescriptorStream; - -class Identifier : public ResourceObj { - private: - Symbol* _sym; - int _begin; - int _end; - - public: - Identifier(Symbol* sym, int begin, int end) : - _sym(sym), _begin(begin), _end(end) {} - - bool equals(Identifier* other); - bool equals(Symbol* sym); - -#ifndef PRODUCT - void print_on(outputStream* str) const; -#endif // ndef PRODUCT -}; - -class Descriptor : public ResourceObj { - protected: - GrowableArray _type_parameters; - ClassDescriptor* _outer_class; - - Descriptor(GrowableArray& params, - ClassDescriptor* outer) - : _type_parameters(params), _outer_class(outer) {} - - public: - - ClassDescriptor* outer_class() { return _outer_class; } - void set_outer_class(ClassDescriptor* sig) { _outer_class = sig; } - - virtual ClassDescriptor* as_class_signature() { return NULL; } - virtual MethodDescriptor* as_method_signature() { return NULL; } - - bool is_class_signature() { return as_class_signature() != NULL; } - bool is_method_signature() { return as_method_signature() != NULL; } - - GrowableArray& type_parameters() { - return _type_parameters; - } - - TypeParameter* find_type_parameter(Identifier* id, int* param_depth); - - virtual void bind_variables_to_parameters() = 0; - -#ifndef PRODUCT - virtual void print_on(outputStream* str) const = 0; -#endif -}; - -class ClassDescriptor : public Descriptor { - private: - ClassType* _super; - GrowableArray _interfaces; - MethodDescriptor* _outer_method; - - ClassDescriptor(GrowableArray& ftp, ClassType* scs, - GrowableArray& sis, ClassDescriptor* outer_class = NULL, - MethodDescriptor* outer_method = NULL) - : Descriptor(ftp, outer_class), _super(scs), _interfaces(sis), - _outer_method(outer_method) {} - - static u2 get_outer_class_index(InstanceKlass* k, TRAPS); - static ClassDescriptor* parse_generic_signature(Klass* k, Symbol* original_name, TRAPS); - - public: - - virtual ClassDescriptor* as_class_signature() { return this; } - - MethodDescriptor* outer_method() { return _outer_method; } - void set_outer_method(MethodDescriptor* m) { _outer_method = m; } - - ClassType* super() { return _super; } - ClassType* interface_desc(Symbol* sym); - - static ClassDescriptor* parse_generic_signature(Klass* k, TRAPS); - static ClassDescriptor* parse_generic_signature(Symbol* sym); - - // For use in superclass chains in positions where this is no generic info - static ClassDescriptor* placeholder(InstanceKlass* klass); - -#ifndef PRODUCT - void print_on(outputStream* str) const; -#endif - - ClassDescriptor* canonicalize(Context* ctx); - - // Linking sets the position index in any contained TypeVariable type - // to correspond to the location of that identifier in the formal type - // parameters. - void bind_variables_to_parameters(); -}; - -class MethodDescriptor : public Descriptor { - private: - GrowableArray _parameters; - Type* _return_type; - GrowableArray _throws; - - MethodDescriptor(GrowableArray& ftp, ClassDescriptor* outer, - GrowableArray& sigs, Type* rt, GrowableArray& throws) - : Descriptor(ftp, outer), _parameters(sigs), _return_type(rt), - _throws(throws) {} - - public: - - static MethodDescriptor* parse_generic_signature(Method* m, ClassDescriptor* outer); - static MethodDescriptor* parse_generic_signature(Symbol* sym, ClassDescriptor* outer); - - MethodDescriptor* as_method_signature() { return this; } - - // Performs generic analysis on the method parameters to determine - // if both methods refer to the same argument types. - bool covariant_match(MethodDescriptor* other, Context* ctx); - - // Returns a new method descriptor with all generic variables - // removed and replaced with whatever is indicated using the Context. - MethodDescriptor* canonicalize(Context* ctx); - - void bind_variables_to_parameters(); - -#ifndef PRODUCT - TempNewSymbol reify_signature(Context* ctx, TRAPS); - void print_on(outputStream* str) const; -#endif -}; - -class TypeParameter : public ResourceObj { - private: - Identifier* _identifier; - ClassType* _class_bound; - GrowableArray _interface_bounds; - - // The position is the ordinal location of the parameter within the - // formal parameter list (excluding outer classes). It is only set for - // formal type parameters that are associated with a class -- method - // type parameters are left as -1. When resolving a generic variable to - // find the actual type, this index is used to access the generic type - // argument in the provided context object. - int _position; // Assigned during variable linking - - TypeParameter(Identifier* id, ClassType* class_bound, - GrowableArray& interface_bounds) : - _identifier(id), _class_bound(class_bound), - _interface_bounds(interface_bounds), _position(-1) {} - - public: - static TypeParameter* parse_generic_signature(DescriptorStream* str); - - ClassType* bound(); - int position() { return _position; } - - void bind_variables_to_parameters(Descriptor* sig, int position); - Identifier* identifier() { return _identifier; } - - Type* resolve(Context* ctx, int inner_depth, int ctx_depth); - TypeParameter* canonicalize(Context* ctx, int ctx_depth); - -#ifndef PRODUCT - void print_on(outputStream* str) const; -#endif -}; - -class Type : public ResourceObj { - public: - static Type* parse_generic_signature(DescriptorStream* str); - - virtual ClassType* as_class() { return NULL; } - virtual TypeVariable* as_variable() { return NULL; } - virtual ArrayType* as_array() { return NULL; } - virtual PrimitiveType* as_primitive() { return NULL; } - - virtual bool covariant_match(Type* gt, Context* ctx) = 0; - virtual Type* canonicalize(Context* ctx, int ctx_depth) = 0; - - virtual void bind_variables_to_parameters(Descriptor* sig) = 0; - -#ifndef PRODUCT - virtual void reify_signature(stringStream* ss, Context* ctx) = 0; - virtual void print_on(outputStream* str) const = 0; -#endif -}; - -class ClassType : public Type { - friend class ClassDescriptor; - protected: - Identifier* _identifier; - GrowableArray _type_arguments; - ClassType* _outer_class; - - ClassType(Identifier* identifier, - GrowableArray& args, - ClassType* outer) - : _identifier(identifier), _type_arguments(args), _outer_class(outer) {} - - // Returns true if there are inner classes to read - static Identifier* parse_generic_signature_simple( - GrowableArray* args, - bool* has_inner, DescriptorStream* str); - - static ClassType* parse_generic_signature(ClassType* outer, - DescriptorStream* str); - static ClassType* from_symbol(Symbol* sym); - - public: - ClassType* as_class() { return this; } - - static ClassType* parse_generic_signature(DescriptorStream* str); - static ClassType* java_lang_Object(); - - Identifier* identifier() { return _identifier; } - int type_arguments_length() { return _type_arguments.length(); } - TypeArgument* type_argument_at(int i); - - virtual ClassType* outer_class() { return _outer_class; } - - bool covariant_match(Type* gt, Context* ctx); - ClassType* canonicalize(Context* ctx, int context_depth); - - void bind_variables_to_parameters(Descriptor* sig); - -#ifndef PRODUCT - void reify_signature(stringStream* ss, Context* ctx); - void print_on(outputStream* str) const; -#endif -}; - -class TypeVariable : public Type { - private: - Identifier* _id; - TypeParameter* _parameter; // assigned during linking - - // how many steps "out" from inner classes, -1 if method - int _inner_depth; - - TypeVariable(Identifier* id) - : _id(id), _parameter(NULL), _inner_depth(0) {} - - public: - TypeVariable* as_variable() { return this; } - - static TypeVariable* parse_generic_signature(DescriptorStream* str); - - Identifier* identifier() { return _id; } - TypeParameter* parameter() { return _parameter; } - int inner_depth() { return _inner_depth; } - - void bind_variables_to_parameters(Descriptor* sig); - - Type* resolve(Context* ctx, int ctx_depth); - bool covariant_match(Type* gt, Context* ctx); - Type* canonicalize(Context* ctx, int ctx_depth); - -#ifndef PRODUCT - void reify_signature(stringStream* ss, Context* ctx); - void print_on(outputStream* str) const; -#endif -}; - -class ArrayType : public Type { - private: - Type* _base; - - ArrayType(Type* base) : _base(base) {} - - public: - ArrayType* as_array() { return this; } - - static ArrayType* parse_generic_signature(DescriptorStream* str); - - bool covariant_match(Type* gt, Context* ctx); - ArrayType* canonicalize(Context* ctx, int ctx_depth); - - void bind_variables_to_parameters(Descriptor* sig); - -#ifndef PRODUCT - void reify_signature(stringStream* ss, Context* ctx); - void print_on(outputStream* str) const; -#endif -}; - -class PrimitiveType : public Type { - friend class Type; - private: - char _type; // includes V for void - - PrimitiveType(char& type) : _type(type) {} - - public: - PrimitiveType* as_primitive() { return this; } - - bool covariant_match(Type* gt, Context* ctx); - PrimitiveType* canonicalize(Context* ctx, int ctx_depth); - - void bind_variables_to_parameters(Descriptor* sig); - -#ifndef PRODUCT - void reify_signature(stringStream* ss, Context* ctx); - void print_on(outputStream* str) const; -#endif -}; - -class TypeArgument : public ResourceObj { - private: - Type* _lower_bound; - Type* _upper_bound; // may be null or == _lower_bound - - TypeArgument(Type* lower_bound, Type* upper_bound) - : _lower_bound(lower_bound), _upper_bound(upper_bound) {} - - public: - - static TypeArgument* parse_generic_signature(DescriptorStream* str); - - Type* lower_bound() { return _lower_bound; } - Type* upper_bound() { return _upper_bound; } - - void bind_variables_to_parameters(Descriptor* sig); - TypeArgument* canonicalize(Context* ctx, int ctx_depth); - - bool covariant_match(TypeArgument* a, Context* ctx); - -#ifndef PRODUCT - void print_on(outputStream* str) const; -#endif -}; - - -class Context : public ResourceObj { - private: - DescriptorCache* _cache; - GrowableArray _type_arguments; - - void reset_to_mark(int size); - - public: - // When this object goes out of scope or 'destroy' is - // called, then the application of the type to the - // context is wound-back (unless it's been deactivated). - class Mark : public StackObj { - private: - mutable Context* _context; - int _marked_size; - - bool is_active() const { return _context != NULL; } - void deactivate() const { _context = NULL; } - - public: - Mark() : _context(NULL), _marked_size(0) {} - Mark(Context* ctx, int sz) : _context(ctx), _marked_size(sz) {} - Mark(const Mark& m) : _context(m._context), _marked_size(m._marked_size) { - m.deactivate(); // Ownership is transferred - } - - Mark& operator=(const Mark& cm) { - destroy(); - _context = cm._context; - _marked_size = cm._marked_size; - cm.deactivate(); - return *this; - } - - void destroy(); - ~Mark() { destroy(); } - }; - - Context(DescriptorCache* cache) : _cache(cache) {} - - Mark mark() { return Mark(this, _type_arguments.length()); } - void apply_type_arguments(InstanceKlass* current, InstanceKlass* super,TRAPS); - - ClassType* at_depth(int i) const; - -#ifndef PRODUCT - void print_on(outputStream* str) const; -#endif -}; - -/** - * Contains a cache of descriptors for classes and methods so they can be - * looked-up instead of reparsing each time they are needed. - */ -class DescriptorCache : public ResourceObj { - private: - ResourceHashtable _class_descriptors; - ResourceHashtable _method_descriptors; - - public: - ClassDescriptor* descriptor_for(InstanceKlass* ikh, TRAPS); - - MethodDescriptor* descriptor_for(Method* mh, ClassDescriptor* cd, TRAPS); - // Class descriptor derived from method holder - MethodDescriptor* descriptor_for(Method* mh, TRAPS); -}; - -} // namespace generic - -#endif // SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP - diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/classfile/javaClasses.cpp --- a/src/share/vm/classfile/javaClasses.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/classfile/javaClasses.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -438,6 +438,29 @@ return true; } +bool java_lang_String::equals(oop str1, oop str2) { + assert(str1->klass() == SystemDictionary::String_klass(), + "must be java String"); + assert(str2->klass() == SystemDictionary::String_klass(), + "must be java String"); + typeArrayOop value1 = java_lang_String::value(str1); + int offset1 = java_lang_String::offset(str1); + int length1 = java_lang_String::length(str1); + typeArrayOop value2 = java_lang_String::value(str2); + int offset2 = java_lang_String::offset(str2); + int length2 = java_lang_String::length(str2); + + if (length1 != length2) { + return false; + } + for (int i = 0; i < length1; i++) { + if (value1->char_at(i + offset1) != value2->char_at(i + offset2)) { + return false; + } + } + return true; +} + void java_lang_String::print(Handle java_string, outputStream* st) { oop obj = java_string(); assert(obj->klass() == SystemDictionary::String_klass(), "must be java_string"); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/classfile/javaClasses.hpp --- a/src/share/vm/classfile/javaClasses.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/classfile/javaClasses.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -182,6 +182,7 @@ static unsigned int hash_string(oop java_string); static bool equals(oop java_string, jchar* chars, int len); + static bool equals(oop str1, oop str2); // Conversion between '.' and '/' formats static Handle externalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '/', '.', THREAD); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/classfile/symbolTable.cpp --- a/src/share/vm/classfile/symbolTable.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/classfile/symbolTable.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -341,7 +341,7 @@ Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len, unsigned int hashValue_arg, bool c_heap, TRAPS) { - assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(), + assert(!Universe::heap()->is_in_reserved(name), "proposed name of symbol must be stable"); // Don't allow symbols to be created which cannot fit in a Symbol*. @@ -685,7 +685,7 @@ if (found_string != NULL) return found_string; debug_only(StableMemoryChecker smc(name, len * sizeof(name[0]))); - assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(), + assert(!Universe::heap()->is_in_reserved(name), "proposed name of symbol must be stable"); Handle string; @@ -807,6 +807,8 @@ } } +// This verification is part of Universe::verify() and needs to be quick. +// See StringTable::verify_and_compare() below for exhaustive verification. void StringTable::verify() { for (int i = 0; i < the_table()->table_size(); ++i) { HashtableEntry* p = the_table()->bucket(i); @@ -825,6 +827,162 @@ the_table()->dump_table(st, "StringTable"); } +StringTable::VerifyRetTypes StringTable::compare_entries( + int bkt1, int e_cnt1, + HashtableEntry* e_ptr1, + int bkt2, int e_cnt2, + HashtableEntry* e_ptr2) { + // These entries are sanity checked by verify_and_compare_entries() + // before this function is called. + oop str1 = e_ptr1->literal(); + oop str2 = e_ptr2->literal(); + + if (str1 == str2) { + tty->print_cr("ERROR: identical oop values (0x" PTR_FORMAT ") " + "in entry @ bucket[%d][%d] and entry @ bucket[%d][%d]", + (void *)str1, bkt1, e_cnt1, bkt2, e_cnt2); + return _verify_fail_continue; + } + + if (java_lang_String::equals(str1, str2)) { + tty->print_cr("ERROR: identical String values in entry @ " + "bucket[%d][%d] and entry @ bucket[%d][%d]", + bkt1, e_cnt1, bkt2, e_cnt2); + return _verify_fail_continue; + } + + return _verify_pass; +} + +StringTable::VerifyRetTypes StringTable::verify_entry(int bkt, int e_cnt, + HashtableEntry* e_ptr, + StringTable::VerifyMesgModes mesg_mode) { + + VerifyRetTypes ret = _verify_pass; // be optimistic + + oop str = e_ptr->literal(); + if (str == NULL) { + if (mesg_mode == _verify_with_mesgs) { + tty->print_cr("ERROR: NULL oop value in entry @ bucket[%d][%d]", bkt, + e_cnt); + } + // NULL oop means no more verifications are possible + return _verify_fail_done; + } + + if (str->klass() != SystemDictionary::String_klass()) { + if (mesg_mode == _verify_with_mesgs) { + tty->print_cr("ERROR: oop is not a String in entry @ bucket[%d][%d]", + bkt, e_cnt); + } + // not a String means no more verifications are possible + return _verify_fail_done; + } + + unsigned int h = java_lang_String::hash_string(str); + if (e_ptr->hash() != h) { + if (mesg_mode == _verify_with_mesgs) { + tty->print_cr("ERROR: broken hash value in entry @ bucket[%d][%d], " + "bkt_hash=%d, str_hash=%d", bkt, e_cnt, e_ptr->hash(), h); + } + ret = _verify_fail_continue; + } + + if (the_table()->hash_to_index(h) != bkt) { + if (mesg_mode == _verify_with_mesgs) { + tty->print_cr("ERROR: wrong index value for entry @ bucket[%d][%d], " + "str_hash=%d, hash_to_index=%d", bkt, e_cnt, h, + the_table()->hash_to_index(h)); + } + ret = _verify_fail_continue; + } + + return ret; +} + +// See StringTable::verify() above for the quick verification that is +// part of Universe::verify(). This verification is exhaustive and +// reports on every issue that is found. StringTable::verify() only +// reports on the first issue that is found. +// +// StringTable::verify_entry() checks: +// - oop value != NULL (same as verify()) +// - oop value is a String +// - hash(String) == hash in entry (same as verify()) +// - index for hash == index of entry (same as verify()) +// +// StringTable::compare_entries() checks: +// - oops are unique across all entries +// - String values are unique across all entries +// +int StringTable::verify_and_compare_entries() { + assert(StringTable_lock->is_locked(), "sanity check"); + + int fail_cnt = 0; + + // first, verify all the entries individually: + for (int bkt = 0; bkt < the_table()->table_size(); bkt++) { + HashtableEntry* e_ptr = the_table()->bucket(bkt); + for (int e_cnt = 0; e_ptr != NULL; e_ptr = e_ptr->next(), e_cnt++) { + VerifyRetTypes ret = verify_entry(bkt, e_cnt, e_ptr, _verify_with_mesgs); + if (ret != _verify_pass) { + fail_cnt++; + } + } + } + + // Optimization: if the above check did not find any failures, then + // the comparison loop below does not need to call verify_entry() + // before calling compare_entries(). If there were failures, then we + // have to call verify_entry() to see if the entry can be passed to + // compare_entries() safely. When we call verify_entry() in the loop + // below, we do so quietly to void duplicate messages and we don't + // increment fail_cnt because the failures have already been counted. + bool need_entry_verify = (fail_cnt != 0); + + // second, verify all entries relative to each other: + for (int bkt1 = 0; bkt1 < the_table()->table_size(); bkt1++) { + HashtableEntry* e_ptr1 = the_table()->bucket(bkt1); + for (int e_cnt1 = 0; e_ptr1 != NULL; e_ptr1 = e_ptr1->next(), e_cnt1++) { + if (need_entry_verify) { + VerifyRetTypes ret = verify_entry(bkt1, e_cnt1, e_ptr1, + _verify_quietly); + if (ret == _verify_fail_done) { + // cannot use the current entry to compare against other entries + continue; + } + } + + for (int bkt2 = bkt1; bkt2 < the_table()->table_size(); bkt2++) { + HashtableEntry* e_ptr2 = the_table()->bucket(bkt2); + int e_cnt2; + for (e_cnt2 = 0; e_ptr2 != NULL; e_ptr2 = e_ptr2->next(), e_cnt2++) { + if (bkt1 == bkt2 && e_cnt2 <= e_cnt1) { + // skip the entries up to and including the one that + // we're comparing against + continue; + } + + if (need_entry_verify) { + VerifyRetTypes ret = verify_entry(bkt2, e_cnt2, e_ptr2, + _verify_quietly); + if (ret == _verify_fail_done) { + // cannot compare against this entry + continue; + } + } + + // compare two entries, report and count any failures: + if (compare_entries(bkt1, e_cnt1, e_ptr1, bkt2, e_cnt2, e_ptr2) + != _verify_pass) { + fail_cnt++; + } + } + } + } + } + return fail_cnt; +} // Create a new table and using alternate hash code, populate the new table // with the existing strings. Set flag to use the alternate hash code afterwards. diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/classfile/symbolTable.hpp --- a/src/share/vm/classfile/symbolTable.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/classfile/symbolTable.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -311,6 +311,26 @@ static void verify(); static void dump(outputStream* st); + enum VerifyMesgModes { + _verify_quietly = 0, + _verify_with_mesgs = 1 + }; + + enum VerifyRetTypes { + _verify_pass = 0, + _verify_fail_continue = 1, + _verify_fail_done = 2 + }; + + static VerifyRetTypes compare_entries(int bkt1, int e_cnt1, + HashtableEntry* e_ptr1, + int bkt2, int e_cnt2, + HashtableEntry* e_ptr2); + static VerifyRetTypes verify_entry(int bkt, int e_cnt, + HashtableEntry* e_ptr, + VerifyMesgModes mesg_mode); + static int verify_and_compare_entries(); + // Sharing static void copy_buckets(char** top, char*end) { the_table()->Hashtable::copy_buckets(top, end); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/classfile/verifier.cpp --- a/src/share/vm/classfile/verifier.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/classfile/verifier.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -188,6 +188,10 @@ bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool should_verify_class) { Symbol* name = klass->name(); Klass* refl_magic_klass = SystemDictionary::reflect_MagicAccessorImpl_klass(); + Klass* lambda_magic_klass = SystemDictionary::lambda_MagicLambdaImpl_klass(); + + bool is_reflect = refl_magic_klass != NULL && klass->is_subtype_of(refl_magic_klass); + bool is_lambda = lambda_magic_klass != NULL && klass->is_subtype_of(lambda_magic_klass); return (should_verify_for(klass->class_loader(), should_verify_class) && // return if the class is a bootstrapping class @@ -210,9 +214,9 @@ // sun/reflect/SerializationConstructorAccessor. // NOTE: this is called too early in the bootstrapping process to be // guarded by Universe::is_gte_jdk14x_version()/UseNewReflection. - (refl_magic_klass == NULL || - !klass->is_subtype_of(refl_magic_klass) || - VerifyReflectionBytecodes) + // Also for lambda generated code, gte jdk8 + (!is_reflect || VerifyReflectionBytecodes) && + (!is_lambda || VerifyLambdaBytecodes) ); } @@ -2318,9 +2322,6 @@ types = 1 << JVM_CONSTANT_InvokeDynamic; break; case Bytecodes::_invokespecial: - types = (1 << JVM_CONSTANT_InterfaceMethodref) | - (1 << JVM_CONSTANT_Methodref); - break; case Bytecodes::_invokestatic: types = (_klass->major_version() < STATIC_METHOD_IN_INTERFACE_MAJOR_VERSION) ? (1 << JVM_CONSTANT_Methodref) : diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/classfile/vmSymbols.hpp --- a/src/share/vm/classfile/vmSymbols.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/classfile/vmSymbols.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -270,6 +270,7 @@ template(java_lang_invoke_LambdaForm, "java/lang/invoke/LambdaForm") \ template(java_lang_invoke_ForceInline_signature, "Ljava/lang/invoke/ForceInline;") \ template(java_lang_invoke_DontInline_signature, "Ljava/lang/invoke/DontInline;") \ + template(java_lang_invoke_Stable_signature, "Ljava/lang/invoke/Stable;") \ template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \ template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;") \ template(java_lang_invoke_MagicLambdaImpl, "java/lang/invoke/MagicLambdaImpl") \ @@ -745,6 +746,10 @@ do_name(log_name,"log") do_name(log10_name,"log10") do_name(pow_name,"pow") \ do_name(exp_name,"exp") do_name(min_name,"min") do_name(max_name,"max") \ \ + do_name(addExact_name,"addExact") \ + do_name(subtractExact_name,"subtractExact") \ + do_name(multiplyExact_name,"multiplyExact") \ + \ do_intrinsic(_dabs, java_lang_Math, abs_name, double_double_signature, F_S) \ do_intrinsic(_dsin, java_lang_Math, sin_name, double_double_signature, F_S) \ do_intrinsic(_dcos, java_lang_Math, cos_name, double_double_signature, F_S) \ @@ -757,6 +762,7 @@ do_intrinsic(_dexp, java_lang_Math, exp_name, double_double_signature, F_S) \ do_intrinsic(_min, java_lang_Math, min_name, int2_int_signature, F_S) \ do_intrinsic(_max, java_lang_Math, max_name, int2_int_signature, F_S) \ + do_intrinsic(_addExact, java_lang_Math, addExact_name, int2_int_signature, F_S) \ \ do_intrinsic(_floatToRawIntBits, java_lang_Float, floatToRawIntBits_name, float_int_signature, F_S) \ do_name( floatToRawIntBits_name, "floatToRawIntBits") \ diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/code/codeBlob.cpp --- a/src/share/vm/code/codeBlob.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/code/codeBlob.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -245,7 +245,7 @@ } -void* BufferBlob::operator new(size_t s, unsigned size) { +void* BufferBlob::operator new(size_t s, unsigned size) throw() { void* p = CodeCache::allocate(size); return p; } @@ -347,14 +347,14 @@ } -void* RuntimeStub::operator new(size_t s, unsigned size) { +void* RuntimeStub::operator new(size_t s, unsigned size) throw() { void* p = CodeCache::allocate(size, true); if (!p) fatal("Initial size of CodeCache is too small"); return p; } // operator new shared by all singletons: -void* SingletonBlob::operator new(size_t s, unsigned size) { +void* SingletonBlob::operator new(size_t s, unsigned size) throw() { void* p = CodeCache::allocate(size, true); if (!p) fatal("Initial size of CodeCache is too small"); return p; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/code/codeBlob.hpp --- a/src/share/vm/code/codeBlob.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/code/codeBlob.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -210,7 +210,7 @@ BufferBlob(const char* name, int size); BufferBlob(const char* name, int size, CodeBuffer* cb); - void* operator new(size_t s, unsigned size); + void* operator new(size_t s, unsigned size) throw(); public: // Creation @@ -284,7 +284,7 @@ bool caller_must_gc_arguments ); - void* operator new(size_t s, unsigned size); + void* operator new(size_t s, unsigned size) throw(); public: // Creation @@ -322,7 +322,7 @@ friend class VMStructs; protected: - void* operator new(size_t s, unsigned size); + void* operator new(size_t s, unsigned size) throw(); public: SingletonBlob( diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/code/codeCache.cpp --- a/src/share/vm/code/codeCache.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/code/codeCache.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -124,7 +124,6 @@ int CodeCache::_number_of_nmethods_with_dependencies = 0; bool CodeCache::_needs_cache_clean = false; nmethod* CodeCache::_scavenge_root_nmethods = NULL; -nmethod* CodeCache::_saved_nmethods = NULL; int CodeCache::_codemem_full_count = 0; @@ -464,96 +463,11 @@ } #endif //PRODUCT -/** - * Remove and return nmethod from the saved code list in order to reanimate it. - */ -nmethod* CodeCache::reanimate_saved_code(Method* m) { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - nmethod* saved = _saved_nmethods; - nmethod* prev = NULL; - while (saved != NULL) { - if (saved->is_in_use() && saved->method() == m) { - if (prev != NULL) { - prev->set_saved_nmethod_link(saved->saved_nmethod_link()); - } else { - _saved_nmethods = saved->saved_nmethod_link(); - } - assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods"); - saved->set_speculatively_disconnected(false); - saved->set_saved_nmethod_link(NULL); - if (PrintMethodFlushing) { - saved->print_on(tty, " ### nmethod is reconnected"); - } - if (LogCompilation && (xtty != NULL)) { - ttyLocker ttyl; - xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id()); - xtty->method(m); - xtty->stamp(); - xtty->end_elem(); - } - return saved; - } - prev = saved; - saved = saved->saved_nmethod_link(); - } - return NULL; -} - -/** - * Remove nmethod from the saved code list in order to discard it permanently - */ -void CodeCache::remove_saved_code(nmethod* nm) { - // For conc swpr this will be called with CodeCache_lock taken by caller - assert_locked_or_safepoint(CodeCache_lock); - assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods"); - nmethod* saved = _saved_nmethods; - nmethod* prev = NULL; - while (saved != NULL) { - if (saved == nm) { - if (prev != NULL) { - prev->set_saved_nmethod_link(saved->saved_nmethod_link()); - } else { - _saved_nmethods = saved->saved_nmethod_link(); - } - if (LogCompilation && (xtty != NULL)) { - ttyLocker ttyl; - xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id()); - xtty->stamp(); - xtty->end_elem(); - } - return; - } - prev = saved; - saved = saved->saved_nmethod_link(); - } - ShouldNotReachHere(); -} - -void CodeCache::speculatively_disconnect(nmethod* nm) { - assert_locked_or_safepoint(CodeCache_lock); - assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods"); - nm->set_saved_nmethod_link(_saved_nmethods); - _saved_nmethods = nm; - if (PrintMethodFlushing) { - nm->print_on(tty, " ### nmethod is speculatively disconnected"); - } - if (LogCompilation && (xtty != NULL)) { - ttyLocker ttyl; - xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id()); - xtty->method(nm->method()); - xtty->stamp(); - xtty->end_elem(); - } - nm->method()->clear_code(); - nm->set_speculatively_disconnected(true); -} - void CodeCache::gc_prologue() { assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called"); } - void CodeCache::gc_epilogue() { assert_locked_or_safepoint(CodeCache_lock); FOR_ALL_ALIVE_BLOBS(cb) { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/code/codeCache.hpp --- a/src/share/vm/code/codeCache.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/code/codeCache.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -57,7 +57,6 @@ static int _number_of_nmethods_with_dependencies; static bool _needs_cache_clean; static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() - static nmethod* _saved_nmethods; // Linked list of speculatively disconnected nmethods. static void verify_if_often() PRODUCT_RETURN; @@ -167,17 +166,12 @@ static size_t capacity() { return _heap->capacity(); } static size_t max_capacity() { return _heap->max_capacity(); } static size_t unallocated_capacity() { return _heap->unallocated_capacity(); } - static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; } static double reverse_free_ratio(); static bool needs_cache_clean() { return _needs_cache_clean; } static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } static void clear_inline_caches(); // clear all inline caches - static nmethod* reanimate_saved_code(Method* m); - static void remove_saved_code(nmethod* nm); - static void speculatively_disconnect(nmethod* nm); - // Deoptimization static int mark_for_deoptimization(DepChange& changes); #ifdef HOTSWAP diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/code/compiledIC.cpp --- a/src/share/vm/code/compiledIC.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/code/compiledIC.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -167,32 +167,42 @@ // High-level access to an inline cache. Guaranteed to be MT-safe. -void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) { - methodHandle method = call_info->selected_method(); - bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index()); +bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) { assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic"); assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?"); address entry; - if (is_invoke_interface) { - int index = klassItable::compute_itable_index(call_info->resolved_method()()); - entry = VtableStubs::create_stub(false, index, method()); - assert(entry != NULL, "entry not computed"); + if (call_info->call_kind() == CallInfo::itable_call) { + assert(bytecode == Bytecodes::_invokeinterface, ""); + int itable_index = call_info->itable_index(); + entry = VtableStubs::find_itable_stub(itable_index); + if (entry == false) { + return false; + } +#ifdef ASSERT + int index = call_info->resolved_method()->itable_index(); + assert(index == itable_index, "CallInfo pre-computes this"); +#endif //ASSERT InstanceKlass* k = call_info->resolved_method()->method_holder(); - assert(k->is_interface(), "sanity check"); + assert(k->verify_itable_index(itable_index), "sanity check"); InlineCacheBuffer::create_transition_stub(this, k, entry); } else { - // Can be different than method->vtable_index(), due to package-private etc. + assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable"); + // Can be different than selected_method->vtable_index(), due to package-private etc. int vtable_index = call_info->vtable_index(); - entry = VtableStubs::create_stub(true, vtable_index, method()); - InlineCacheBuffer::create_transition_stub(this, method(), entry); + assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check"); + entry = VtableStubs::find_vtable_stub(vtable_index); + if (entry == NULL) { + return false; + } + InlineCacheBuffer::create_transition_stub(this, NULL, entry); } if (TraceICs) { ResourceMark rm; tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT, - instruction_address(), method->print_value_string(), entry); + instruction_address(), call_info->selected_method()->print_value_string(), entry); } // We can't check this anymore. With lazy deopt we could have already @@ -202,6 +212,7 @@ // race because the IC entry was complete when we safepointed so // cleaning it immediately is harmless. // assert(is_megamorphic(), "sanity check"); + return true; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/code/compiledIC.hpp --- a/src/share/vm/code/compiledIC.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/code/compiledIC.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -226,7 +226,10 @@ // void set_to_clean(); // Can only be called during a safepoint operation void set_to_monomorphic(CompiledICInfo& info); - void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS); + + // Returns true if successful and false otherwise. The call can fail if memory + // allocation in the code cache fails. + bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS); static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass, bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/code/debugInfoRec.cpp --- a/src/share/vm/code/debugInfoRec.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/code/debugInfoRec.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,7 @@ int _length; // number of bytes in the stream int _hash; // hash of stream bytes (for quicker reuse) - void* operator new(size_t ignore, DebugInformationRecorder* dir) { + void* operator new(size_t ignore, DebugInformationRecorder* dir) throw() { assert(ignore == sizeof(DIR_Chunk), ""); if (dir->_next_chunk >= dir->_next_chunk_limit) { const int CHUNK = 100; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/code/nmethod.cpp --- a/src/share/vm/code/nmethod.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/code/nmethod.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -96,8 +96,9 @@ #endif bool nmethod::is_compiled_by_c1() const { - if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing - if (is_native_method()) return false; + if (compiler() == NULL) { + return false; + } return compiler()->is_c1(); } bool nmethod::is_compiled_by_graal() const { @@ -106,13 +107,15 @@ return compiler()->is_graal(); } bool nmethod::is_compiled_by_c2() const { - if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing - if (is_native_method()) return false; + if (compiler() == NULL) { + return false; + } return compiler()->is_c2(); } bool nmethod::is_compiled_by_shark() const { - if (is_native_method()) return false; - assert(compiler() != NULL, "must be"); + if (compiler() == NULL) { + return false; + } return compiler()->is_shark(); } @@ -465,7 +468,6 @@ _state = alive; _marked_for_reclamation = 0; _has_flushed_dependencies = 0; - _speculatively_disconnected = 0; _has_unsafe_access = 0; _has_method_handle_invokes = 0; _lazy_critical_native = 0; @@ -484,7 +486,6 @@ _osr_link = NULL; _scavenge_root_link = NULL; _scavenge_root_state = 0; - _saved_nmethod_link = NULL; _compiler = NULL; #ifdef GRAAL _graal_installed_code = NULL; @@ -705,10 +706,12 @@ _osr_entry_point = NULL; _exception_cache = NULL; _pc_desc_cache.reset_to(NULL); + _hotness_counter = NMethodSweeper::hotness_counter_reset_val(); code_buffer->copy_values_to(this); if (ScavengeRootsInCode && detect_scavenge_root_oops()) { CodeCache::add_scavenge_root_nmethod(this); + Universe::heap()->register_nmethod(this); } debug_only(verify_scavenge_root_oops()); CodeCache::commit(this); @@ -788,6 +791,7 @@ _osr_entry_point = NULL; _exception_cache = NULL; _pc_desc_cache.reset_to(NULL); + _hotness_counter = NMethodSweeper::hotness_counter_reset_val(); code_buffer->copy_values_to(this); debug_only(verify_scavenge_root_oops()); @@ -821,7 +825,7 @@ } #endif // def HAVE_DTRACE_H -void* nmethod::operator new(size_t size, int nmethod_size) throw () { +void* nmethod::operator new(size_t size, int nmethod_size) throw() { // Not critical, may return null if there is too little continuous memory return CodeCache::allocate(nmethod_size); } @@ -865,6 +869,7 @@ _comp_level = comp_level; _compiler = compiler; _orig_pc_offset = orig_pc_offset; + _hotness_counter = NMethodSweeper::hotness_counter_reset_val(); // Section offsets _consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts()); @@ -935,6 +940,7 @@ dependencies->copy_to(this); if (ScavengeRootsInCode && detect_scavenge_root_oops()) { CodeCache::add_scavenge_root_nmethod(this); + Universe::heap()->register_nmethod(this); } debug_only(verify_scavenge_root_oops()); @@ -1229,7 +1235,7 @@ // This is a private interface with the sweeper. void nmethod::mark_as_seen_on_stack() { - assert(is_not_entrant(), "must be a non-entrant method"); + assert(is_alive(), "Must be an alive method"); // Set the traversal mark to ensure that the sweeper does 2 // cleaning passes before moving to zombie. set_stack_traversal_mark(NMethodSweeper::traversal_count()); @@ -1325,7 +1331,7 @@ set_osr_link(NULL); //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods - NMethodSweeper::notify(this); + NMethodSweeper::notify(); } void nmethod::invalidate_osr_method() { @@ -1369,6 +1375,13 @@ methodHandle the_method(method()); No_Safepoint_Verifier nsv; + // during patching, depending on the nmethod state we must notify the GC that + // code has been unloaded, unregistering it. We cannot do this right while + // holding the Patching_lock because we need to use the CodeCache_lock. This + // would be prone to deadlocks. + // This flag is used to remember whether we need to later lock and unregister. + bool nmethod_needs_unregister = false; + { // invalidate osr nmethod before acquiring the patching lock since // they both acquire leaf locks and we don't want a deadlock. @@ -1408,6 +1421,22 @@ inc_decompile_count(); } + // If the state is becoming a zombie, signal to unregister the nmethod with + // the heap. + // This nmethod may have already been unloaded during a full GC. + if ((state == zombie) && !is_unloaded()) { + nmethod_needs_unregister = true; + } + + // Must happen before state change. Otherwise we have a race condition in + // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately + // transition its state from 'not_entrant' to 'zombie' without having to wait + // for stack scanning. + if (state == not_entrant) { + mark_as_seen_on_stack(); + OrderAccess::storestore(); + } + // Change state _state = state; @@ -1426,11 +1455,6 @@ HandleMark hm; method()->clear_code(); } - - if (state == not_entrant) { - mark_as_seen_on_stack(); - } - } // leave critical region under Patching_lock // When the nmethod becomes zombie it is no longer alive so the @@ -1443,6 +1467,9 @@ // safepoint can sneak in, otherwise the oops used by the // dependency logic could have become stale. MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + if (nmethod_needs_unregister) { + Universe::heap()->unregister_nmethod(this); + } flush_dependencies(NULL); } @@ -1458,6 +1485,9 @@ // nmethods aren't scanned for GC. _oops_are_stale = true; #endif + // the Method may be reclaimed by class unloading now that the + // nmethod is in zombie state + set_method(NULL); } else { assert(state == not_entrant, "other cases may need to be handled differently"); } @@ -1468,7 +1498,7 @@ } // Make sweeper aware that there is a zombie method that needs to be removed - NMethodSweeper::notify(this); + NMethodSweeper::notify(); return true; } @@ -1503,10 +1533,6 @@ CodeCache::drop_scavenge_root_nmethod(this); } - if (is_speculatively_disconnected()) { - CodeCache::remove_saved_code(this); - } - #ifdef SHARK ((SharkCompiler *) compiler())->free_compiled_method(insts_begin()); #endif // SHARK @@ -1908,21 +1934,10 @@ if (_method != NULL) f(_method); } - -// This method is called twice during GC -- once while -// tracing the "active" nmethods on thread stacks during -// the (strong) marking phase, and then again when walking -// the code cache contents during the weak roots processing -// phase. The two uses are distinguished by means of the -// 'do_strong_roots_only' flag, which is true in the first -// case. We want to walk the weak roots in the nmethod -// only in the second case. The weak roots in the nmethod -// are the oops in the ExceptionCache and the InlineCache -// oops. -void nmethod::oops_do(OopClosure* f, bool do_strong_roots_only) { +void nmethod::oops_do(OopClosure* f, bool allow_zombie) { // make sure the oops ready to receive visitors - assert(!is_zombie() && !is_unloaded(), - "should not call follow on zombie or unloaded nmethod"); + assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod"); + assert(!is_unloaded(), "should not call follow on unloaded nmethod"); // If the method is not entrant or zombie then a JMP is plastered over the // first few bytes. If an oop in the old code was there, that oop @@ -2051,7 +2066,7 @@ if (!_detected_scavenge_root) _print_nm->print_on(tty, "new scavenge root"); tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")", _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm), - (intptr_t)(*p), (intptr_t)p); + (void *)(*p), (intptr_t)p); (*p)->print(); } #endif //PRODUCT @@ -2431,7 +2446,7 @@ _ok = false; } tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)", - (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm)); + (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm)); } virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } }; @@ -2553,7 +2568,7 @@ _ok = false; } tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)", - (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm)); + (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm)); (*p)->print(); } virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/code/nmethod.hpp --- a/src/share/vm/code/nmethod.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/code/nmethod.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -125,7 +125,6 @@ // To support simple linked-list chaining of nmethods: nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods - nmethod* _saved_nmethod_link; // from CodeCache::speculatively_disconnect static nmethod* volatile _oops_do_mark_nmethods; nmethod* volatile _oops_do_mark_link; @@ -172,7 +171,6 @@ // protected by CodeCache_lock bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) - bool _speculatively_disconnected; // Marked for potential unload bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper) bool _marked_for_deoptimization; // Used for stack deoptimization @@ -188,7 +186,7 @@ unsigned int _external_method:1; // Set for GPU methods // Protected by Patching_lock - unsigned char _state; // {alive, not_entrant, zombie, unloaded} + volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded} #ifdef ASSERT bool _oops_are_stale; // indicates that it's no longer safe to access oops section @@ -210,11 +208,18 @@ // not_entrant method removal. Each mark_sweep pass will update // this mark to current sweep invocation count if it is seen on the - // stack. An not_entrant method can be removed when there is no + // stack. An not_entrant method can be removed when there are no // more activations, i.e., when the _stack_traversal_mark is less than // current sweep traversal index. long _stack_traversal_mark; + // The _hotness_counter indicates the hotness of a method. The higher + // the value the hotter the method. The hotness counter of a nmethod is + // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method + // is active while stack scanning (mark_active_nmethods()). The hotness + // counter is decreased (by 1) while sweeping. + int _hotness_counter; + ExceptionCache *_exception_cache; PcDescCache _pc_desc_cache; @@ -279,7 +284,7 @@ ); // helper methods - void* operator new(size_t size, int nmethod_size); + void* operator new(size_t size, int nmethod_size) throw(); const char* reloc_string_for(u_char* begin, u_char* end); // Returns true if this thread changed the state of the nmethod or @@ -405,6 +410,10 @@ int total_size () const; + void dec_hotness_counter() { _hotness_counter--; } + void set_hotness_counter(int val) { _hotness_counter = val; } + int hotness_counter() const { return _hotness_counter; } + // Containment bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); } bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); } @@ -431,8 +440,8 @@ // alive. It is used when an uncommon trap happens. Returns true // if this thread changed the state of the nmethod or false if // another thread performed the transition. - bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); } - bool make_zombie() { return make_not_entrant_or_zombie(zombie); } + bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); } + bool make_zombie() { return make_not_entrant_or_zombie(zombie); } // used by jvmti to track if the unload event has been reported bool unload_reported() { return _unload_reported; } @@ -460,9 +469,6 @@ bool has_method_handle_invokes() const { return _has_method_handle_invokes; } void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } - bool is_speculatively_disconnected() const { return _speculatively_disconnected; } - void set_speculatively_disconnected(bool z) { _speculatively_disconnected = z; } - bool is_external_method() const { return _external_method; } void set_external_method(bool z) { _external_method = z; } @@ -525,9 +531,6 @@ nmethod* scavenge_root_link() const { return _scavenge_root_link; } void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; } - nmethod* saved_nmethod_link() const { return _saved_nmethod_link; } - void set_saved_nmethod_link(nmethod *n) { _saved_nmethod_link = n; } - public: // Sweeper support @@ -597,7 +600,7 @@ void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f); void oops_do(OopClosure* f) { oops_do(f, false); } - void oops_do(OopClosure* f, bool do_strong_roots_only); + void oops_do(OopClosure* f, bool allow_zombie); bool detect_scavenge_root_oops(); void verify_scavenge_root_oops() PRODUCT_RETURN; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/code/relocInfo.hpp --- a/src/share/vm/code/relocInfo.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/code/relocInfo.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -677,7 +677,7 @@ } public: - void* operator new(size_t size, const RelocationHolder& holder) { + void* operator new(size_t size, const RelocationHolder& holder) throw() { if (size > sizeof(holder._relocbuf)) guarantee_size(); assert((void* const *)holder.reloc() == &holder._relocbuf[0], "ptrs must agree"); return holder.reloc(); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/code/vtableStubs.cpp --- a/src/share/vm/code/vtableStubs.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/code/vtableStubs.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,12 +46,9 @@ address VtableStub::_chunk_end = NULL; VMReg VtableStub::_receiver_location = VMRegImpl::Bad(); -static int num_vtable_chunks = 0; - -void* VtableStub::operator new(size_t size, int code_size) { +void* VtableStub::operator new(size_t size, int code_size) throw() { assert(size == sizeof(VtableStub), "mismatched size"); - num_vtable_chunks++; // compute real VtableStub size (rounded to nearest word) const int real_size = round_to(code_size + sizeof(VtableStub), wordSize); // malloc them in chunks to minimize header overhead @@ -60,7 +57,7 @@ const int bytes = chunk_factor * real_size + pd_code_alignment(); BufferBlob* blob = BufferBlob::create("vtable chunks", bytes); if (blob == NULL) { - vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks"); + return NULL; } _chunk = blob->content_begin(); _chunk_end = _chunk + bytes; @@ -111,7 +108,7 @@ } -address VtableStubs::create_stub(bool is_vtable_stub, int vtable_index, Method* method) { +address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) { assert(vtable_index >= 0, "must be positive"); VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL; @@ -121,6 +118,12 @@ } else { s = create_itable_stub(vtable_index); } + + // Creation of vtable or itable can fail if there is not enough free space in the code cache. + if (s == NULL) { + return NULL; + } + enter(is_vtable_stub, vtable_index, s); if (PrintAdapterHandlers) { tty->print_cr("Decoding VtableStub %s[%d]@%d", diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/code/vtableStubs.hpp --- a/src/share/vm/code/vtableStubs.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/code/vtableStubs.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,7 +46,7 @@ bool _is_vtable_stub; // True if vtable stub, false, is itable stub /* code follows here */ // The vtableStub code - void* operator new(size_t size, int code_size); + void* operator new(size_t size, int code_size) throw(); VtableStub(bool is_vtable_stub, int index) : _next(NULL), _is_vtable_stub(is_vtable_stub), @@ -121,9 +121,11 @@ static VtableStub* lookup (bool is_vtable_stub, int vtable_index); static void enter (bool is_vtable_stub, int vtable_index, VtableStub* s); static inline uint hash (bool is_vtable_stub, int vtable_index); + static address find_stub (bool is_vtable_stub, int vtable_index); public: - static address create_stub(bool is_vtable_stub, int vtable_index, Method* method); // return the entry point of a stub for this call + static address find_vtable_stub(int vtable_index) { return find_stub(true, vtable_index); } + static address find_itable_stub(int itable_index) { return find_stub(false, itable_index); } static bool is_entry_point(address pc); // is pc a vtable stub entry point? static bool contains(address pc); // is pc within any stub? static VtableStub* stub_containing(address pc); // stub containing pc or NULL diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/compiler/compileBroker.cpp --- a/src/share/vm/compiler/compileBroker.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/compiler/compileBroker.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -637,19 +637,36 @@ NMethodSweeper::possibly_sweep(); MutexLocker locker(lock()); - // Wait for an available CompileTask. + // If _first is NULL we have no more compile jobs. There are two reasons for + // having no compile jobs: First, we compiled everything we wanted. Second, + // we ran out of code cache so compilation has been disabled. In the latter + // case we perform code cache sweeps to free memory such that we can re-enable + // compilation. while (_first == NULL) { - // There is no work to be done right now. Wait. - if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) { - // During the emergency sweeping periods, wake up and sweep occasionally - bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000); - if (timedout) { + if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) { + // Wait a certain amount of time to possibly do another sweep. + // We must wait until stack scanning has happened so that we can + // transition a method's state from 'not_entrant' to 'zombie'. + long wait_time = NmethodSweepCheckInterval * 1000; + if (FLAG_IS_DEFAULT(NmethodSweepCheckInterval)) { + // Only one thread at a time can do sweeping. Scale the + // wait time according to the number of compiler threads. + // As a result, the next sweep is likely to happen every 100ms + // with an arbitrary number of threads that do sweeping. + wait_time = 100 * CICompilerCount; + } + bool timeout = lock()->wait(!Mutex::_no_safepoint_check_flag, wait_time); + if (timeout) { MutexUnlocker ul(lock()); - // When otherwise not busy, run nmethod sweeping NMethodSweeper::possibly_sweep(); } } else { - // During normal operation no need to wake up on timer + // If there are no compilation tasks and we can compile new jobs + // (i.e., there is enough free space in the code cache) there is + // no need to invoke the sweeper. As a result, the hotness of methods + // remains unchanged. This behavior is desired, since we want to keep + // the stable state, i.e., we do not want to evict methods from the + // code cache if it is unnecessary. lock()->wait(); } } @@ -1246,16 +1263,9 @@ return method_code; } } - if (method->is_not_compilable(comp_level)) return NULL; - - if (UseCodeCacheFlushing) { - nmethod* saved = CodeCache::reanimate_saved_code(method()); - if (saved != NULL) { - method->set_code(method, saved); - return saved; - } + if (method->is_not_compilable(comp_level)) { + return NULL; } - } else { // osr compilation #ifndef TIERED @@ -1604,9 +1614,6 @@ if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) { // the code cache is really full handle_full_code_cache(); - } else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) { - // Attempt to start cleaning the code cache while there is still a little headroom - NMethodSweeper::handle_full_code_cache(false); } CompileTask* task = queue->get(); @@ -1737,7 +1744,7 @@ CodeCache::print_summary(&s, detailed); } ttyLocker ttyl; - tty->print_cr(s.as_string()); + tty->print(s.as_string()); } // ------------------------------------------------------------------ @@ -1962,7 +1969,11 @@ } #endif if (UseCodeCacheFlushing) { - NMethodSweeper::handle_full_code_cache(true); + // Since code cache is full, immediately stop new compiles + if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) { + NMethodSweeper::log_sweep("disable_compiler"); + NMethodSweeper::possibly_sweep(); + } } else { UseCompiler = false; AlwaysCompileLoopMethods = false; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/compiler/oopMap.cpp --- a/src/share/vm/compiler/oopMap.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/compiler/oopMap.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -631,7 +631,7 @@ // Returns value of location as an int -intptr_t value_of_loc(oop *pointer) { return (intptr_t)(*pointer); } +intptr_t value_of_loc(oop *pointer) { return cast_from_oop((*pointer)); } void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -230,7 +230,7 @@ // depends on this property. debug_only( FreeChunk* junk = NULL; - assert(UseCompressedKlassPointers || + assert(UseCompressedClassPointers || junk->prev_addr() == (void*)(oop(junk)->klass_addr()), "Offset of FreeChunk::_prev within FreeChunk must match" " that of OopDesc::_klass within OopDesc"); @@ -1407,7 +1407,7 @@ assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size"); OrderAccess::storestore(); - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { // Copy gap missed by (aligned) header size calculation below obj->set_klass_gap(old->klass_gap()); } @@ -3460,7 +3460,9 @@ void ConcurrentMarkSweepGeneration::shrink(size_t bytes) { assert_locked_or_safepoint(Heap_lock); size_t size = ReservedSpace::page_align_size_down(bytes); - if (size > 0) { + // Only shrink if a compaction was done so that all the free space + // in the generation is in a contiguous block at the end. + if (size > 0 && did_compact()) { shrink_by(size); } } @@ -5478,40 +5480,42 @@ HandleMark hm; SequentialSubTasksDone* pst = space->par_seq_tasks(); - assert(pst->valid(), "Uninitialized use?"); uint nth_task = 0; uint n_tasks = pst->n_tasks(); - HeapWord *start, *end; - while (!pst->is_task_claimed(/* reference */ nth_task)) { - // We claimed task # nth_task; compute its boundaries. - if (chunk_top == 0) { // no samples were taken - assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task"); - start = space->bottom(); - end = space->top(); - } else if (nth_task == 0) { - start = space->bottom(); - end = chunk_array[nth_task]; - } else if (nth_task < (uint)chunk_top) { - assert(nth_task >= 1, "Control point invariant"); - start = chunk_array[nth_task - 1]; - end = chunk_array[nth_task]; - } else { - assert(nth_task == (uint)chunk_top, "Control point invariant"); - start = chunk_array[chunk_top - 1]; - end = space->top(); - } - MemRegion mr(start, end); - // Verify that mr is in space - assert(mr.is_empty() || space->used_region().contains(mr), - "Should be in space"); - // Verify that "start" is an object boundary - assert(mr.is_empty() || oop(mr.start())->is_oop(), - "Should be an oop"); - space->par_oop_iterate(mr, cl); - } - pst->all_tasks_completed(); + if (n_tasks > 0) { + assert(pst->valid(), "Uninitialized use?"); + HeapWord *start, *end; + while (!pst->is_task_claimed(/* reference */ nth_task)) { + // We claimed task # nth_task; compute its boundaries. + if (chunk_top == 0) { // no samples were taken + assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task"); + start = space->bottom(); + end = space->top(); + } else if (nth_task == 0) { + start = space->bottom(); + end = chunk_array[nth_task]; + } else if (nth_task < (uint)chunk_top) { + assert(nth_task >= 1, "Control point invariant"); + start = chunk_array[nth_task - 1]; + end = chunk_array[nth_task]; + } else { + assert(nth_task == (uint)chunk_top, "Control point invariant"); + start = chunk_array[chunk_top - 1]; + end = space->top(); + } + MemRegion mr(start, end); + // Verify that mr is in space + assert(mr.is_empty() || space->used_region().contains(mr), + "Should be in space"); + // Verify that "start" is an object boundary + assert(mr.is_empty() || oop(mr.start())->is_oop(), + "Should be an oop"); + space->par_oop_iterate(mr, cl); + } + pst->all_tasks_completed(); + } } void @@ -5788,7 +5792,7 @@ DefNewGeneration* dng = (DefNewGeneration*)_young_gen; // Eden space - { + if (!dng->eden()->is_empty()) { SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks(); assert(!pst->valid(), "Clobbering existing data?"); // Each valid entry in [0, _eden_chunk_index) represents a task. @@ -8694,9 +8698,10 @@ assert(inFreeRange(), "Should only be called if currently in a free range."); HeapWord* const eob = ((HeapWord*)fc) + chunk_size; assert(_sp->used_region().contains(eob - 1), - err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")" + err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT + " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")" " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")", - _limit, _sp->bottom(), _sp->end(), fc, chunk_size)); + eob, eob-1, _limit, _sp->bottom(), _sp->end(), fc, chunk_size)); if (eob >= _limit) { assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit"); if (CMSTraceSweeper) { @@ -9060,7 +9065,7 @@ return !stack->isEmpty(); } -#define BUSY (oop(0x1aff1aff)) +#define BUSY (cast_to_oop(0x1aff1aff)) // (MT-safe) Get a prefix of at most "num" from the list. // The overflow list is chained through the mark word of // each object in the list. We fetch the entire list, @@ -9093,7 +9098,7 @@ return false; } // Grab the entire list; we'll put back a suffix - oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); + oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); Thread* tid = Thread::current(); // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was // set to ParallelGCThreads. @@ -9108,7 +9113,7 @@ return false; } else if (_overflow_list != BUSY) { // Try and grab the prefix - prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); + prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); } } // If the list was found to be empty, or we spun long diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -481,9 +481,8 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) : _g1h(g1h), - _markBitMap1(MinObjAlignment - 1), - _markBitMap2(MinObjAlignment - 1), - + _markBitMap1(log2_intptr(MinObjAlignment)), + _markBitMap2(log2_intptr(MinObjAlignment)), _parallel_marking_threads(0), _max_parallel_marking_threads(0), _sleep_factor(0.0), @@ -2695,7 +2694,7 @@ if (print_it) { _out->print_cr(" "PTR_FORMAT"%s", - o, (over_tams) ? " >" : (marked) ? " M" : ""); + (void *)o, (over_tams) ? " >" : (marked) ? " M" : ""); PrintReachableOopClosure oopCl(_out, _vo, _all); o->oop_iterate_no_header(&oopCl); } @@ -4529,7 +4528,7 @@ _total_prev_live_bytes(0), _total_next_live_bytes(0), _hum_used_bytes(0), _hum_capacity_bytes(0), _hum_prev_live_bytes(0), _hum_next_live_bytes(0), - _total_remset_bytes(0) { + _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); MemRegion g1_committed = g1h->g1_committed(); MemRegion g1_reserved = g1h->g1_reserved(); @@ -4553,9 +4552,11 @@ G1PPRL_BYTE_H_FORMAT G1PPRL_BYTE_H_FORMAT G1PPRL_DOUBLE_H_FORMAT + G1PPRL_BYTE_H_FORMAT G1PPRL_BYTE_H_FORMAT, "type", "address-range", - "used", "prev-live", "next-live", "gc-eff", "remset"); + "used", "prev-live", "next-live", "gc-eff", + "remset", "code-roots"); _out->print_cr(G1PPRL_LINE_PREFIX G1PPRL_TYPE_H_FORMAT G1PPRL_ADDR_BASE_H_FORMAT @@ -4563,9 +4564,11 @@ G1PPRL_BYTE_H_FORMAT G1PPRL_BYTE_H_FORMAT G1PPRL_DOUBLE_H_FORMAT + G1PPRL_BYTE_H_FORMAT G1PPRL_BYTE_H_FORMAT, "", "", - "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", "(bytes)"); + "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", + "(bytes)", "(bytes)"); } // It takes as a parameter a reference to one of the _hum_* fields, it @@ -4608,6 +4611,8 @@ size_t next_live_bytes = r->next_live_bytes(); double gc_eff = r->gc_efficiency(); size_t remset_bytes = r->rem_set()->mem_size(); + size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); + if (r->used() == 0) { type = "FREE"; } else if (r->is_survivor()) { @@ -4642,6 +4647,7 @@ _total_prev_live_bytes += prev_live_bytes; _total_next_live_bytes += next_live_bytes; _total_remset_bytes += remset_bytes; + _total_strong_code_roots_bytes += strong_code_roots_bytes; // Print a line for this particular region. _out->print_cr(G1PPRL_LINE_PREFIX @@ -4651,9 +4657,11 @@ G1PPRL_BYTE_FORMAT G1PPRL_BYTE_FORMAT G1PPRL_DOUBLE_FORMAT + G1PPRL_BYTE_FORMAT G1PPRL_BYTE_FORMAT, type, bottom, end, - used_bytes, prev_live_bytes, next_live_bytes, gc_eff , remset_bytes); + used_bytes, prev_live_bytes, next_live_bytes, gc_eff, + remset_bytes, strong_code_roots_bytes); return false; } @@ -4669,7 +4677,8 @@ G1PPRL_SUM_MB_PERC_FORMAT("used") G1PPRL_SUM_MB_PERC_FORMAT("prev-live") G1PPRL_SUM_MB_PERC_FORMAT("next-live") - G1PPRL_SUM_MB_FORMAT("remset"), + G1PPRL_SUM_MB_FORMAT("remset") + G1PPRL_SUM_MB_FORMAT("code-roots"), bytes_to_mb(_total_capacity_bytes), bytes_to_mb(_total_used_bytes), perc(_total_used_bytes, _total_capacity_bytes), @@ -4677,6 +4686,7 @@ perc(_total_prev_live_bytes, _total_capacity_bytes), bytes_to_mb(_total_next_live_bytes), perc(_total_next_live_bytes, _total_capacity_bytes), - bytes_to_mb(_total_remset_bytes)); + bytes_to_mb(_total_remset_bytes), + bytes_to_mb(_total_strong_code_roots_bytes)); _out->cr(); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/concurrentMark.hpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1257,6 +1257,9 @@ // Accumulator for the remembered set size size_t _total_remset_bytes; + // Accumulator for strong code roots memory size + size_t _total_strong_code_roots_bytes; + static double perc(size_t val, size_t total) { if (total == 0) { return 0.0; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -81,7 +81,7 @@ size_t* marked_bytes_array, BitMap* task_card_bm) { G1CollectedHeap* g1h = _g1h; - CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set()); + CardTableModRefBS* ct_bs = g1h->g1_barrier_set(); HeapWord* start = mr.start(); HeapWord* end = mr.end(); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1BiasedArray.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1BiasedArray.hpp" + +#ifndef PRODUCT +void G1BiasedMappedArrayBase::verify_index(idx_t index) const { + guarantee(_base != NULL, "Array not initialized"); + guarantee(index < length(), err_msg("Index out of bounds index: "SIZE_FORMAT" length: "SIZE_FORMAT, index, length())); +} + +void G1BiasedMappedArrayBase::verify_biased_index(idx_t biased_index) const { + guarantee(_biased_base != NULL, "Array not initialized"); + guarantee(biased_index >= bias() && biased_index < (bias() + length()), + err_msg("Biased index out of bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length())); +} + +void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const { + guarantee(_biased_base != NULL, "Array not initialized"); + guarantee(biased_index >= bias() && biased_index <= (bias() + length()), + err_msg("Biased index out of inclusive bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length())); +} + +class TestMappedArray : public G1BiasedMappedArray { +protected: + virtual int default_value() const { return 0xBAADBABE; } +public: + static void test_biasedarray() { + const size_t REGION_SIZE_IN_WORDS = 512; + const size_t NUM_REGIONS = 20; + HeapWord* fake_heap = (HeapWord*)LP64_ONLY(0xBAAA00000) NOT_LP64(0xBA000000); // Any value that is non-zero + + TestMappedArray array; + array.initialize(fake_heap, fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS, + REGION_SIZE_IN_WORDS * HeapWordSize); + // Check address calculation (bounds) + assert(array.bottom_address_mapped() == fake_heap, + err_msg("bottom mapped address should be "PTR_FORMAT", but is "PTR_FORMAT, fake_heap, array.bottom_address_mapped())); + assert(array.end_address_mapped() == (fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS), "must be"); + + int* bottom = array.address_mapped_to(fake_heap); + assert((void*)bottom == (void*) array.base(), "must be"); + int* end = array.address_mapped_to(fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS); + assert((void*)end == (void*)(array.base() + array.length()), "must be"); + // The entire array should contain default value elements + for (int* current = bottom; current < end; current++) { + assert(*current == array.default_value(), "must be"); + } + + // Test setting values in the table + + HeapWord* region_start_address = fake_heap + REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2); + HeapWord* region_end_address = fake_heap + (REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2) + REGION_SIZE_IN_WORDS - 1); + + // Set/get by address tests: invert some value; first retrieve one + int actual_value = array.get_by_index(NUM_REGIONS / 2); + array.set_by_index(NUM_REGIONS / 2, ~actual_value); + // Get the same value by address, should correspond to the start of the "region" + int value = array.get_by_address(region_start_address); + assert(value == ~actual_value, "must be"); + // Get the same value by address, at one HeapWord before the start + value = array.get_by_address(region_start_address - 1); + assert(value == array.default_value(), "must be"); + // Get the same value by address, at the end of the "region" + value = array.get_by_address(region_end_address); + assert(value == ~actual_value, "must be"); + // Make sure the next value maps to another index + value = array.get_by_address(region_end_address + 1); + assert(value == array.default_value(), "must be"); + + // Reset the value in the array + array.set_by_address(region_start_address + (region_end_address - region_start_address) / 2, actual_value); + + // The entire array should have the default value again + for (int* current = bottom; current < end; current++) { + assert(*current == array.default_value(), "must be"); + } + + // Set/get by index tests: invert some value + idx_t index = NUM_REGIONS / 2; + actual_value = array.get_by_index(index); + array.set_by_index(index, ~actual_value); + + value = array.get_by_index(index); + assert(value == ~actual_value, "must be"); + + value = array.get_by_index(index - 1); + assert(value == array.default_value(), "must be"); + + value = array.get_by_index(index + 1); + assert(value == array.default_value(), "must be"); + + array.set_by_index(0, 0); + value = array.get_by_index(0); + assert(value == 0, "must be"); + + array.set_by_index(array.length() - 1, 0); + value = array.get_by_index(array.length() - 1); + assert(value == 0, "must be"); + + array.set_by_index(index, 0); + + // The array should have three zeros, and default values otherwise + size_t num_zeros = 0; + for (int* current = bottom; current < end; current++) { + assert(*current == array.default_value() || *current == 0, "must be"); + if (*current == 0) { + num_zeros++; + } + } + assert(num_zeros == 3, "must be"); + } +}; + +void TestG1BiasedArray_test() { + TestMappedArray::test_biasedarray(); +} + +#endif diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1BiasedArray.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP + +#include "utilities/debug.hpp" +#include "memory/allocation.inline.hpp" + +// Implements the common base functionality for arrays that contain provisions +// for accessing its elements using a biased index. +// The element type is defined by the instantiating the template. +class G1BiasedMappedArrayBase VALUE_OBJ_CLASS_SPEC { + friend class VMStructs; +public: + typedef size_t idx_t; +protected: + address _base; // the real base address + size_t _length; // the length of the array + address _biased_base; // base address biased by "bias" elements + size_t _bias; // the bias, i.e. the offset biased_base is located to the right in elements + uint _shift_by; // the amount of bits to shift right when mapping to an index of the array. + +protected: + + G1BiasedMappedArrayBase() : _base(NULL), _length(0), _biased_base(NULL), + _bias(0), _shift_by(0) { } + + // Allocate a new array, generic version. + static address create_new_base_array(size_t length, size_t elem_size) { + assert(length > 0, "just checking"); + assert(elem_size > 0, "just checking"); + return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC); + } + + // Initialize the members of this class. The biased start address of this array + // is the bias (in elements) multiplied by the element size. + void initialize_base(address base, size_t length, size_t bias, size_t elem_size, uint shift_by) { + assert(base != NULL, "just checking"); + assert(length > 0, "just checking"); + assert(shift_by < sizeof(uintptr_t) * 8, err_msg("Shifting by %zd, larger than word size?", shift_by)); + _base = base; + _length = length; + _biased_base = base - (bias * elem_size); + _bias = bias; + _shift_by = shift_by; + } + + // Allocate and initialize this array to cover the heap addresses in the range + // of [bottom, end). + void initialize(HeapWord* bottom, HeapWord* end, size_t target_elem_size_in_bytes, size_t mapping_granularity_in_bytes) { + assert(mapping_granularity_in_bytes > 0, "just checking"); + assert(is_power_of_2(mapping_granularity_in_bytes), + err_msg("mapping granularity must be power of 2, is %zd", mapping_granularity_in_bytes)); + assert((uintptr_t)bottom % mapping_granularity_in_bytes == 0, + err_msg("bottom mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT, + mapping_granularity_in_bytes, bottom)); + assert((uintptr_t)end % mapping_granularity_in_bytes == 0, + err_msg("end mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT, + mapping_granularity_in_bytes, end)); + size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize); + idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes; + address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes); + initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes)); + } + + size_t bias() const { return _bias; } + uint shift_by() const { return _shift_by; } + + void verify_index(idx_t index) const PRODUCT_RETURN; + void verify_biased_index(idx_t biased_index) const PRODUCT_RETURN; + void verify_biased_index_inclusive_end(idx_t biased_index) const PRODUCT_RETURN; + +public: + // Return the length of the array in elements. + size_t length() const { return _length; } +}; + +// Array that provides biased access and mapping from (valid) addresses in the +// heap into this array. +template +class G1BiasedMappedArray : public G1BiasedMappedArrayBase { +public: + typedef G1BiasedMappedArrayBase::idx_t idx_t; + + T* base() const { return (T*)G1BiasedMappedArrayBase::_base; } + // Return the element of the given array at the given index. Assume + // the index is valid. This is a convenience method that does sanity + // checking on the index. + T get_by_index(idx_t index) const { + verify_index(index); + return this->base()[index]; + } + + // Set the element of the given array at the given index to the + // given value. Assume the index is valid. This is a convenience + // method that does sanity checking on the index. + void set_by_index(idx_t index, T value) { + verify_index(index); + this->base()[index] = value; + } + + // The raw biased base pointer. + T* biased_base() const { return (T*)G1BiasedMappedArrayBase::_biased_base; } + + // Return the element of the given array that covers the given word in the + // heap. Assumes the index is valid. + T get_by_address(HeapWord* value) const { + idx_t biased_index = ((uintptr_t)value) >> this->shift_by(); + this->verify_biased_index(biased_index); + return biased_base()[biased_index]; + } + + // Set the value of the array entry that corresponds to the given array. + void set_by_address(HeapWord * address, T value) { + idx_t biased_index = ((uintptr_t)address) >> this->shift_by(); + this->verify_biased_index(biased_index); + biased_base()[biased_index] = value; + } + +protected: + // Returns the address of the element the given address maps to + T* address_mapped_to(HeapWord* address) { + idx_t biased_index = ((uintptr_t)address) >> this->shift_by(); + this->verify_biased_index_inclusive_end(biased_index); + return biased_base() + biased_index; + } + +public: + // Return the smallest address (inclusive) in the heap that this array covers. + HeapWord* bottom_address_mapped() const { + return (HeapWord*) ((uintptr_t)this->bias() << this->shift_by()); + } + + // Return the highest address (exclusive) in the heap that this array covers. + HeapWord* end_address_mapped() const { + return (HeapWord*) ((uintptr_t)(this->bias() + this->length()) << this->shift_by()); + } + +protected: + virtual T default_value() const = 0; + // Set all elements of the given array to the given value. + void clear() { + T value = default_value(); + for (idx_t i = 0; i < length(); i++) { + set_by_index(i, value); + } + } +public: + G1BiasedMappedArray() {} + + // Allocate and initialize this array to cover the heap addresses in the range + // of [bottom, end). + void initialize(HeapWord* bottom, HeapWord* end, size_t mapping_granularity) { + G1BiasedMappedArrayBase::initialize(bottom, end, sizeof(T), mapping_granularity); + this->clear(); + } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1CardCounts.cpp --- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -33,8 +33,8 @@ void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) { if (has_count_table()) { - check_card_num(from_card_num, - err_msg("from card num out of range: "SIZE_FORMAT, from_card_num)); + assert(from_card_num >= 0 && from_card_num < _committed_max_card_num, + err_msg("from card num out of range: "SIZE_FORMAT, from_card_num)); assert(from_card_num < to_card_num, err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT, from_card_num, to_card_num)); @@ -65,9 +65,7 @@ // threshold limit is no more than this. guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity"); - ModRefBarrierSet* bs = _g1h->mr_bs(); - guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition"); - _ct_bs = (CardTableModRefBS*)bs; + _ct_bs = _g1h->g1_barrier_set(); _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start()); // Allocate/Reserve the counts table diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1CardCounts.hpp --- a/src/share/vm/gc_implementation/g1/g1CardCounts.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1CardCounts.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -72,25 +72,21 @@ return has_reserved_count_table() && _committed_max_card_num > 0; } - void check_card_num(size_t card_num, const char* msg) { - assert(card_num >= 0 && card_num < _committed_max_card_num, msg); - } - size_t ptr_2_card_num(const jbyte* card_ptr) { assert(card_ptr >= _ct_bot, - err_msg("Inavalied card pointer: " + err_msg("Invalid card pointer: " "card_ptr: " PTR_FORMAT ", " "_ct_bot: " PTR_FORMAT, card_ptr, _ct_bot)); size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte)); - check_card_num(card_num, - err_msg("card pointer out of range: " PTR_FORMAT, card_ptr)); + assert(card_num >= 0 && card_num < _committed_max_card_num, + err_msg("card pointer out of range: " PTR_FORMAT, card_ptr)); return card_num; } jbyte* card_num_2_ptr(size_t card_num) { - check_card_num(card_num, - err_msg("card num out of range: "SIZE_FORMAT, card_num)); + assert(card_num >= 0 && card_num < _committed_max_card_num, + err_msg("card num out of range: "SIZE_FORMAT, card_num)); return (jbyte*) (_ct_bot + card_num); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "code/codeCache.hpp" #include "code/icBuffer.hpp" #include "gc_implementation/g1/bufferingOopClosure.hpp" #include "gc_implementation/g1/concurrentG1Refine.hpp" @@ -124,10 +125,8 @@ int _histo[256]; public: ClearLoggedCardTableEntryClosure() : - _calls(0) + _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) { - _g1h = G1CollectedHeap::heap(); - _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); for (int i = 0; i < 256; i++) _histo[i] = 0; } bool do_card_ptr(jbyte* card_ptr, int worker_i) { @@ -157,11 +156,8 @@ CardTableModRefBS* _ctbs; public: RedirtyLoggedCardTableEntryClosure() : - _calls(0) - { - _g1h = G1CollectedHeap::heap(); - _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); - } + _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {} + bool do_card_ptr(jbyte* card_ptr, int worker_i) { if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { _calls++; @@ -477,7 +473,7 @@ void G1CollectedHeap::check_ct_logs_at_safepoint() { DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); - CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); + CardTableModRefBS* ct_bs = g1_barrier_set(); // Count the dirty cards at the start. CountNonCleanMemRegionClosure count1(this); @@ -980,7 +976,8 @@ if (should_try_gc) { bool succeeded; - result = do_collection_pause(word_size, gc_count_before, &succeeded); + result = do_collection_pause(word_size, gc_count_before, &succeeded, + GCCause::_g1_inc_collection_pause); if (result != NULL) { assert(succeeded, "only way to get back a non-NULL result"); return result; @@ -1105,7 +1102,8 @@ // enough space for the allocation to succeed after the pause. bool succeeded; - result = do_collection_pause(word_size, gc_count_before, &succeeded); + result = do_collection_pause(word_size, gc_count_before, &succeeded, + GCCause::_g1_humongous_allocation); if (result != NULL) { assert(succeeded, "only way to get back a non-NULL result"); return result; @@ -1176,26 +1174,33 @@ ModRefBarrierSet* _mr_bs; public: PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) : - _g1h(g1h), _mr_bs(mr_bs) { } + _g1h(g1h), _mr_bs(mr_bs) {} + bool doHeapRegion(HeapRegion* r) { + HeapRegionRemSet* hrrs = r->rem_set(); + if (r->continuesHumongous()) { + // We'll assert that the strong code root list and RSet is empty + assert(hrrs->strong_code_roots_list_length() == 0, "sanity"); + assert(hrrs->occupied() == 0, "RSet should be empty"); return false; } + _g1h->reset_gc_time_stamps(r); - HeapRegionRemSet* hrrs = r->rem_set(); - if (hrrs != NULL) hrrs->clear(); + hrrs->clear(); // You might think here that we could clear just the cards // corresponding to the used region. But no: if we leave a dirty card // in a region we might allocate into, then it would prevent that card // from being enqueued, and cause it to be missed. // Re: the performance cost: we shouldn't be doing full GC anyway! _mr_bs->clear(MemRegion(r->bottom(), r->end())); + return false; } }; void G1CollectedHeap::clear_rsets_post_compaction() { - PostMCRemSetClearClosure rs_clear(this, mr_bs()); + PostMCRemSetClearClosure rs_clear(this, g1_barrier_set()); heap_region_iterate(&rs_clear); } @@ -1269,30 +1274,6 @@ heap_region_iterate(&cl); } -double G1CollectedHeap::verify(bool guard, const char* msg) { - double verify_time_ms = 0.0; - - if (guard && total_collections() >= VerifyGCStartAt) { - double verify_start = os::elapsedTime(); - HandleMark hm; // Discard invalid handles created during verification - prepare_for_verify(); - Universe::verify(VerifyOption_G1UsePrevMarking, msg); - verify_time_ms = (os::elapsedTime() - verify_start) * 1000; - } - - return verify_time_ms; -} - -void G1CollectedHeap::verify_before_gc() { - double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:"); - g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms); -} - -void G1CollectedHeap::verify_after_gc() { - double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:"); - g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms); -} - bool G1CollectedHeap::do_collection(bool explicit_gc, bool clear_all_soft_refs, size_t word_size) { @@ -1433,7 +1414,7 @@ // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); - MetaspaceAux::verify_metrics(); + MetaspaceAux::verify_metrics(); // Note: since we've just done a full GC, concurrent // marking is no longer active. Therefore we need not @@ -1504,6 +1485,9 @@ heap_region_iterate(&rebuild_rs); } + // Rebuild the strong code root lists for each region + rebuild_strong_code_roots(); + if (true) { // FIXME MetaspaceGC::compute_new_size(); } @@ -1788,7 +1772,6 @@ } bool G1CollectedHeap::expand(size_t expand_bytes) { - size_t old_mem_size = _g1_storage.committed_size(); size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); aligned_expand_bytes = align_size_up(aligned_expand_bytes, HeapRegion::GrainBytes); @@ -1798,6 +1781,13 @@ ergo_format_byte("attempted expansion amount"), expand_bytes, aligned_expand_bytes); + if (_g1_storage.uncommitted_size() == 0) { + ergo_verbose0(ErgoHeapSizing, + "did not expand the heap", + ergo_format_reason("heap already fully expanded")); + return false; + } + // First commit the memory. HeapWord* old_end = (HeapWord*) _g1_storage.high(); bool successful = _g1_storage.expand_by(aligned_expand_bytes); @@ -1856,7 +1846,6 @@ } void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { - size_t old_mem_size = _g1_storage.committed_size(); size_t aligned_shrink_bytes = ReservedSpace::page_align_size_down(shrink_bytes); aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, @@ -2019,10 +2008,12 @@ size_t init_byte_size = collector_policy()->initial_heap_byte_size(); size_t max_byte_size = collector_policy()->max_heap_byte_size(); + size_t heap_alignment = collector_policy()->max_alignment(); // Ensure that the sizes are properly aligned. Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); + Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap"); _cg1r = new ConcurrentG1Refine(this); @@ -2039,12 +2030,8 @@ // If this happens then we could end up using a non-optimal // compressed oops mode. - // Since max_byte_size is aligned to the size of a heap region (checked - // above). - Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); - ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, - HeapRegion::GrainBytes); + heap_alignment); // It is important to do this in a way such that concurrent readers can't // temporarily think something is in the heap. (I've actually seen this @@ -2058,20 +2045,13 @@ // Create the gen rem set (and barrier set) for the entire reserved region. _rem_set = collector_policy()->create_rem_set(_reserved, 2); set_barrier_set(rem_set()->bs()); - if (barrier_set()->is_a(BarrierSet::ModRef)) { - _mr_bs = (ModRefBarrierSet*)_barrier_set; - } else { - vm_exit_during_initialization("G1 requires a mod ref bs."); + if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) { + vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS"); return JNI_ENOMEM; } // Also create a G1 rem set. - if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { - _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs()); - } else { - vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); - return JNI_ENOMEM; - } + _g1_rem_set = new G1RemSet(this, g1_barrier_set()); // Carve out the G1 part of the heap. @@ -2082,8 +2062,10 @@ _g1_storage.initialize(g1_rs, 0); _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); _hrs.initialize((HeapWord*) _g1_reserved.start(), - (HeapWord*) _g1_reserved.end(), - _expansion_regions); + (HeapWord*) _g1_reserved.end()); + assert(_hrs.max_length() == _expansion_regions, + err_msg("max length: %u expansion regions: %u", + _hrs.max_length(), _expansion_regions)); // Do later initialization work for concurrent refinement. _cg1r->init(); @@ -2204,6 +2186,10 @@ return JNI_OK; } +size_t G1CollectedHeap::conservative_max_heap_alignment() { + return HeapRegion::max_region_size(); +} + void G1CollectedHeap::ref_processing_init() { // Reference processing in G1 currently works as follows: // @@ -2516,11 +2502,11 @@ void G1CollectedHeap::register_concurrent_cycle_end() { if (_concurrent_cycle_started) { - _gc_timer_cm->register_gc_end(os::elapsed_counter()); - if (_cm->has_aborted()) { _gc_tracer_cm->report_concurrent_mode_failure(); } + + _gc_timer_cm->register_gc_end(os::elapsed_counter()); _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); _concurrent_cycle_started = false; @@ -3119,6 +3105,145 @@ return NULL; // keep some compilers happy } +// TODO: VerifyRootsClosure extends OopsInGenClosure so that we can +// pass it as the perm_blk to SharedHeap::process_strong_roots. +// When process_strong_roots stop calling perm_blk->younger_refs_iterate +// we can change this closure to extend the simpler OopClosure. +class VerifyRootsClosure: public OopsInGenClosure { +private: + G1CollectedHeap* _g1h; + VerifyOption _vo; + bool _failures; +public: + // _vo == UsePrevMarking -> use "prev" marking information, + // _vo == UseNextMarking -> use "next" marking information, + // _vo == UseMarkWord -> use mark word from object header. + VerifyRootsClosure(VerifyOption vo) : + _g1h(G1CollectedHeap::heap()), + _vo(vo), + _failures(false) { } + + bool failures() { return _failures; } + + template void do_oop_nv(T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + if (_g1h->is_obj_dead_cond(obj, _vo)) { + gclog_or_tty->print_cr("Root location "PTR_FORMAT" " + "points to dead obj "PTR_FORMAT, p, (void*) obj); + if (_vo == VerifyOption_G1UseMarkWord) { + gclog_or_tty->print_cr(" Mark word: "PTR_FORMAT, (void*)(obj->mark())); + } + obj->print_on(gclog_or_tty); + _failures = true; + } + } + } + + void do_oop(oop* p) { do_oop_nv(p); } + void do_oop(narrowOop* p) { do_oop_nv(p); } +}; + +class G1VerifyCodeRootOopClosure: public OopsInGenClosure { + G1CollectedHeap* _g1h; + OopClosure* _root_cl; + nmethod* _nm; + VerifyOption _vo; + bool _failures; + + template void do_oop_work(T* p) { + // First verify that this root is live + _root_cl->do_oop(p); + + if (!G1VerifyHeapRegionCodeRoots) { + // We're not verifying the code roots attached to heap region. + return; + } + + // Don't check the code roots during marking verification in a full GC + if (_vo == VerifyOption_G1UseMarkWord) { + return; + } + + // Now verify that the current nmethod (which contains p) is + // in the code root list of the heap region containing the + // object referenced by p. + + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + + // Now fetch the region containing the object + HeapRegion* hr = _g1h->heap_region_containing(obj); + HeapRegionRemSet* hrrs = hr->rem_set(); + // Verify that the strong code root list for this region + // contains the nmethod + if (!hrrs->strong_code_roots_list_contains(_nm)) { + gclog_or_tty->print_cr("Code root location "PTR_FORMAT" " + "from nmethod "PTR_FORMAT" not in strong " + "code roots for region ["PTR_FORMAT","PTR_FORMAT")", + p, _nm, hr->bottom(), hr->end()); + _failures = true; + } + } + } + +public: + G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo): + _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {} + + void do_oop(oop* p) { do_oop_work(p); } + void do_oop(narrowOop* p) { do_oop_work(p); } + + void set_nmethod(nmethod* nm) { _nm = nm; } + bool failures() { return _failures; } +}; + +class G1VerifyCodeRootBlobClosure: public CodeBlobClosure { + G1VerifyCodeRootOopClosure* _oop_cl; + +public: + G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl): + _oop_cl(oop_cl) {} + + void do_code_blob(CodeBlob* cb) { + nmethod* nm = cb->as_nmethod_or_null(); + if (nm != NULL) { + _oop_cl->set_nmethod(nm); + nm->oops_do(_oop_cl); + } + } +}; + +class YoungRefCounterClosure : public OopClosure { + G1CollectedHeap* _g1h; + int _count; + public: + YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {} + void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } } + void do_oop(narrowOop* p) { ShouldNotReachHere(); } + + int count() { return _count; } + void reset_count() { _count = 0; }; +}; + +class VerifyKlassClosure: public KlassClosure { + YoungRefCounterClosure _young_ref_counter_closure; + OopClosure *_oop_closure; + public: + VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {} + void do_klass(Klass* k) { + k->oops_do(_oop_closure); + + _young_ref_counter_closure.reset_count(); + k->oops_do(&_young_ref_counter_closure); + if (_young_ref_counter_closure.count() > 0) { + guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k)); + } + } +}; + class VerifyLivenessOopClosure: public OopClosure { G1CollectedHeap* _g1h; VerifyOption _vo; @@ -3252,75 +3377,7 @@ } }; -class YoungRefCounterClosure : public OopClosure { - G1CollectedHeap* _g1h; - int _count; - public: - YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {} - void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } } - void do_oop(narrowOop* p) { ShouldNotReachHere(); } - - int count() { return _count; } - void reset_count() { _count = 0; }; -}; - -class VerifyKlassClosure: public KlassClosure { - YoungRefCounterClosure _young_ref_counter_closure; - OopClosure *_oop_closure; - public: - VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {} - void do_klass(Klass* k) { - k->oops_do(_oop_closure); - - _young_ref_counter_closure.reset_count(); - k->oops_do(&_young_ref_counter_closure); - if (_young_ref_counter_closure.count() > 0) { - guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k)); - } - } -}; - -// TODO: VerifyRootsClosure extends OopsInGenClosure so that we can -// pass it as the perm_blk to SharedHeap::process_strong_roots. -// When process_strong_roots stop calling perm_blk->younger_refs_iterate -// we can change this closure to extend the simpler OopClosure. -class VerifyRootsClosure: public OopsInGenClosure { -private: - G1CollectedHeap* _g1h; - VerifyOption _vo; - bool _failures; -public: - // _vo == UsePrevMarking -> use "prev" marking information, - // _vo == UseNextMarking -> use "next" marking information, - // _vo == UseMarkWord -> use mark word from object header. - VerifyRootsClosure(VerifyOption vo) : - _g1h(G1CollectedHeap::heap()), - _vo(vo), - _failures(false) { } - - bool failures() { return _failures; } - - template void do_oop_nv(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - if (_g1h->is_obj_dead_cond(obj, _vo)) { - gclog_or_tty->print_cr("Root location "PTR_FORMAT" " - "points to dead obj "PTR_FORMAT, p, (void*) obj); - if (_vo == VerifyOption_G1UseMarkWord) { - gclog_or_tty->print_cr(" Mark word: "PTR_FORMAT, (void*)(obj->mark())); - } - obj->print_on(gclog_or_tty); - _failures = true; - } - } - } - - void do_oop(oop* p) { do_oop_nv(p); } - void do_oop(narrowOop* p) { do_oop_nv(p); } -}; - -// This is the task used for parallel heap verification. +// This is the task used for parallel verification of the heap regions class G1ParVerifyTask: public AbstractGangTask { private: @@ -3354,20 +3411,15 @@ } }; -void G1CollectedHeap::verify(bool silent) { - verify(silent, VerifyOption_G1UsePrevMarking); -} - -void G1CollectedHeap::verify(bool silent, - VerifyOption vo) { +void G1CollectedHeap::verify(bool silent, VerifyOption vo) { if (SafepointSynchronize::is_at_safepoint()) { + assert(Thread::current()->is_VM_thread(), + "Expected to be executed serially by the VM thread at this point"); + if (!silent) { gclog_or_tty->print("Roots "); } VerifyRootsClosure rootsCl(vo); - - assert(Thread::current()->is_VM_thread(), - "Expected to be executed serially by the VM thread at this point"); - - CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); + G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo); + G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl); VerifyKlassClosure klassCl(this, &rootsCl); // We apply the relevant closures to all the oops in the @@ -3386,7 +3438,7 @@ &klassCl ); - bool failures = rootsCl.failures(); + bool failures = rootsCl.failures() || codeRootsCl.failures(); if (vo != VerifyOption_G1UseMarkWord) { // If we're verifying during a full GC then the region sets @@ -3455,6 +3507,34 @@ } } +void G1CollectedHeap::verify(bool silent) { + verify(silent, VerifyOption_G1UsePrevMarking); +} + +double G1CollectedHeap::verify(bool guard, const char* msg) { + double verify_time_ms = 0.0; + + if (guard && total_collections() >= VerifyGCStartAt) { + double verify_start = os::elapsedTime(); + HandleMark hm; // Discard invalid handles created during verification + prepare_for_verify(); + Universe::verify(VerifyOption_G1UsePrevMarking, msg); + verify_time_ms = (os::elapsedTime() - verify_start) * 1000; + } + + return verify_time_ms; +} + +void G1CollectedHeap::verify_before_gc() { + double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:"); + g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms); +} + +void G1CollectedHeap::verify_after_gc() { + double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:"); + g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms); +} + class PrintRegionClosure: public HeapRegionClosure { outputStream* _st; public: @@ -3604,6 +3684,11 @@ assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); // Fill TLAB's and such ensure_parsability(true); + + if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) && + (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { + g1_rem_set()->print_periodic_summary_info("Before GC RS summary"); + } } void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { @@ -3612,7 +3697,7 @@ (G1SummarizeRSetStatsPeriod > 0) && // we are at the end of the GC. Total collections has already been increased. ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) { - g1_rem_set()->print_periodic_summary_info(); + g1_rem_set()->print_periodic_summary_info("After GC RS summary"); } // FIXME: what is this about? @@ -3629,14 +3714,15 @@ HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, unsigned int gc_count_before, - bool* succeeded) { + bool* succeeded, + GCCause::Cause gc_cause) { assert_heap_not_locked_and_not_at_safepoint(); g1_policy()->record_stop_world_start(); VM_G1IncCollectionPause op(gc_count_before, word_size, false, /* should_initiate_conc_mark */ g1_policy()->max_pause_time_ms(), - GCCause::_g1_inc_collection_pause); + gc_cause); VMThread::execute(&op); HeapWord* result = op.result(); @@ -3876,8 +3962,9 @@ append_secondary_free_list_if_not_empty_with_lock(); } - assert(check_young_list_well_formed(), - "young list should be well formed"); + assert(check_young_list_well_formed(), "young list should be well formed"); + assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), + "sanity check"); // Don't dynamically change the number of GC threads this early. A value of // 0 is used to indicate serial work. When parallel work is done, @@ -4471,7 +4558,7 @@ : _g1h(g1h), _refs(g1h->task_queue(queue_num)), _dcq(&g1h->dirty_card_queue_set()), - _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), + _ct_bs(g1h->g1_barrier_set()), _g1_rem(g1h->g1_rem_set()), _hash_seed(17), _queue_num(queue_num), _term_attempts(0), @@ -4538,7 +4625,7 @@ assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref)); oop p = oopDesc::load_decode_heap_oop(ref); assert(_g1h->is_in_g1_reserved(p), - err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); + err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p)); return true; } @@ -4548,11 +4635,11 @@ // Must be in the collection set--it's already been copied. oop p = clear_partial_array_mask(ref); assert(_g1h->obj_in_cs(p), - err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); + err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p)); } else { oop p = oopDesc::load_decode_heap_oop(ref); assert(_g1h->is_in_g1_reserved(p), - err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); + err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p)); } return true; } @@ -4997,7 +5084,11 @@ G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); - int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; + // Don't scan the scavengable methods in the code cache as part + // of strong root scanning. The code roots that point into a + // region in the collection set are scanned when we scan the + // region's RSet. + int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings; pss.start_strong_roots(); _g1h->g1_process_strong_roots(/* is scavenging */ true, @@ -5039,67 +5130,6 @@ // *** Common G1 Evacuation Stuff -// Closures that support the filtering of CodeBlobs scanned during -// external root scanning. - -// Closure applied to reference fields in code blobs (specifically nmethods) -// to determine whether an nmethod contains references that point into -// the collection set. Used as a predicate when walking code roots so -// that only nmethods that point into the collection set are added to the -// 'marked' list. - -class G1FilteredCodeBlobToOopClosure : public CodeBlobToOopClosure { - - class G1PointsIntoCSOopClosure : public OopClosure { - G1CollectedHeap* _g1; - bool _points_into_cs; - public: - G1PointsIntoCSOopClosure(G1CollectedHeap* g1) : - _g1(g1), _points_into_cs(false) { } - - bool points_into_cs() const { return _points_into_cs; } - - template - void do_oop_nv(T* p) { - if (!_points_into_cs) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop) && - _g1->in_cset_fast_test(oopDesc::decode_heap_oop_not_null(heap_oop))) { - _points_into_cs = true; - } - } - } - - virtual void do_oop(oop* p) { do_oop_nv(p); } - virtual void do_oop(narrowOop* p) { do_oop_nv(p); } - }; - - G1CollectedHeap* _g1; - -public: - G1FilteredCodeBlobToOopClosure(G1CollectedHeap* g1, OopClosure* cl) : - CodeBlobToOopClosure(cl, true), _g1(g1) { } - - virtual void do_code_blob(CodeBlob* cb) { - nmethod* nm = cb->as_nmethod_or_null(); - if (nm != NULL && !(nm->test_oops_do_mark())) { - G1PointsIntoCSOopClosure predicate_cl(_g1); - nm->oops_do(&predicate_cl); - - if (predicate_cl.points_into_cs()) { - // At least one of the reference fields or the oop relocations - // in the nmethod points into the collection set. We have to - // 'mark' this nmethod. - // Note: Revisit the following if CodeBlobToOopClosure::do_code_blob() - // or MarkingCodeBlobClosure::do_code_blob() change. - if (!nm->test_set_oops_do_mark()) { - do_newly_marked_nmethod(nm); - } - } - } - } -}; - // This method is run in a GC worker. void @@ -5117,22 +5147,15 @@ BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); - // Walk the code cache w/o buffering, because StarTask cannot handle - // unaligned oop locations. - G1FilteredCodeBlobToOopClosure eager_scan_cs_code_roots(this, scan_non_heap_roots); - - // Scan all code roots from stack - CodeBlobToOopClosure eager_scan_all_code_roots(scan_non_heap_roots, true); - CodeBlobToOopClosure* blobs = &eager_scan_cs_code_roots; - if (UseNewCode && g1_policy()->during_initial_mark_pause()) { - // during initial-mark we need to take care to follow all code roots - blobs = &eager_scan_all_code_roots; - } + assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow"); + // Walk the code cache/strong code roots w/o buffering, because StarTask + // cannot handle unaligned oop locations. + CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */); process_strong_roots(false, // no scoping; this is parallel code is_scavenging, so, &buf_scan_non_heap_roots, - blobs, + &eager_scan_code_roots, scan_klasses ); @@ -5172,9 +5195,22 @@ } g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms); + // If this is an initial mark pause, and we're not scanning + // the entire code cache, we need to mark the oops in the + // strong code root lists for the regions that are not in + // the collection set. + // Note all threads participate in this set of root tasks. + double mark_strong_code_roots_ms = 0.0; + if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) { + double mark_strong_roots_start = os::elapsedTime(); + mark_strong_code_roots(worker_i); + mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0; + } + g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms); + // Now scan the complement of the collection set. if (scan_rs != NULL) { - g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); + g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i); } _process_strong_tasks->all_tasks_completed(); } @@ -5792,9 +5828,6 @@ process_discovered_references(n_workers); // Weak root processing. - // Note: when JSR 292 is enabled and code blobs can contain - // non-perm oops then we will need to process the code blobs - // here too. { G1STWIsAliveClosure is_alive(this); G1KeepAliveClosure keep_alive(this); @@ -5810,6 +5843,17 @@ hot_card_cache->reset_hot_cache(); hot_card_cache->set_use_cache(true); + // Migrate the strong code roots attached to each region in + // the collection set. Ideally we would like to do this + // after we have finished the scanning/evacuation of the + // strong code roots for a particular heap region. + migrate_strong_code_roots(); + + if (g1_policy()->during_initial_mark_pause()) { + // Reset the claim values set during marking the strong code roots + reset_heap_region_claim_values(); + } + finalize_for_evac_failure(); if (evacuation_failed()) { @@ -5943,11 +5987,11 @@ } class G1ParCleanupCTTask : public AbstractGangTask { - CardTableModRefBS* _ct_bs; + G1SATBCardTableModRefBS* _ct_bs; G1CollectedHeap* _g1h; HeapRegion* volatile _su_head; public: - G1ParCleanupCTTask(CardTableModRefBS* ct_bs, + G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs, G1CollectedHeap* g1h) : AbstractGangTask("G1 Par Cleanup CT Task"), _ct_bs(ct_bs), _g1h(g1h) { } @@ -5970,9 +6014,9 @@ #ifndef PRODUCT class G1VerifyCardTableCleanup: public HeapRegionClosure { G1CollectedHeap* _g1h; - CardTableModRefBS* _ct_bs; + G1SATBCardTableModRefBS* _ct_bs; public: - G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs) + G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs) : _g1h(g1h), _ct_bs(ct_bs) { } virtual bool doHeapRegion(HeapRegion* r) { if (r->is_survivor()) { @@ -5986,7 +6030,7 @@ void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) { // All of the region should be clean. - CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); + G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); MemRegion mr(hr->bottom(), hr->end()); ct_bs->verify_not_dirty_region(mr); } @@ -5999,13 +6043,13 @@ // not dirty that area (one less thing to have to do while holding // a lock). So we can only verify that [bottom(),pre_dummy_top()] // is dirty. - CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); + G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); MemRegion mr(hr->bottom(), hr->pre_dummy_top()); ct_bs->verify_dirty_region(mr); } void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { - CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); + G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) { verify_dirty_region(hr); } @@ -6017,7 +6061,7 @@ #endif void G1CollectedHeap::cleanUpCardTable() { - CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); + G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); double start = os::elapsedTime(); { @@ -6606,3 +6650,208 @@ _humongous_set.verify_end(); _free_list.verify_end(); } + +// Optimized nmethod scanning + +class RegisterNMethodOopClosure: public OopClosure { + G1CollectedHeap* _g1h; + nmethod* _nm; + + template void do_oop_work(T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + HeapRegion* hr = _g1h->heap_region_containing(obj); + assert(!hr->isHumongous(), "code root in humongous region?"); + + // HeapRegion::add_strong_code_root() avoids adding duplicate + // entries but having duplicates is OK since we "mark" nmethods + // as visited when we scan the strong code root lists during the GC. + hr->add_strong_code_root(_nm); + assert(hr->rem_set()->strong_code_roots_list_contains(_nm), "add failed?"); + } + } + +public: + RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) : + _g1h(g1h), _nm(nm) {} + + void do_oop(oop* p) { do_oop_work(p); } + void do_oop(narrowOop* p) { do_oop_work(p); } +}; + +class UnregisterNMethodOopClosure: public OopClosure { + G1CollectedHeap* _g1h; + nmethod* _nm; + + template void do_oop_work(T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + HeapRegion* hr = _g1h->heap_region_containing(obj); + assert(!hr->isHumongous(), "code root in humongous region?"); + hr->remove_strong_code_root(_nm); + assert(!hr->rem_set()->strong_code_roots_list_contains(_nm), "remove failed?"); + } + } + +public: + UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) : + _g1h(g1h), _nm(nm) {} + + void do_oop(oop* p) { do_oop_work(p); } + void do_oop(narrowOop* p) { do_oop_work(p); } +}; + +void G1CollectedHeap::register_nmethod(nmethod* nm) { + CollectedHeap::register_nmethod(nm); + + guarantee(nm != NULL, "sanity"); + RegisterNMethodOopClosure reg_cl(this, nm); + nm->oops_do(®_cl); +} + +void G1CollectedHeap::unregister_nmethod(nmethod* nm) { + CollectedHeap::unregister_nmethod(nm); + + guarantee(nm != NULL, "sanity"); + UnregisterNMethodOopClosure reg_cl(this, nm); + nm->oops_do(®_cl, true); +} + +class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure { +public: + bool doHeapRegion(HeapRegion *hr) { + assert(!hr->isHumongous(), "humongous region in collection set?"); + hr->migrate_strong_code_roots(); + return false; + } +}; + +void G1CollectedHeap::migrate_strong_code_roots() { + MigrateCodeRootsHeapRegionClosure cl; + double migrate_start = os::elapsedTime(); + collection_set_iterate(&cl); + double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0; + g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms); +} + +// Mark all the code roots that point into regions *not* in the +// collection set. +// +// Note we do not want to use a "marking" CodeBlobToOopClosure while +// walking the the code roots lists of regions not in the collection +// set. Suppose we have an nmethod (M) that points to objects in two +// separate regions - one in the collection set (R1) and one not (R2). +// Using a "marking" CodeBlobToOopClosure here would result in "marking" +// nmethod M when walking the code roots for R1. When we come to scan +// the code roots for R2, we would see that M is already marked and it +// would be skipped and the objects in R2 that are referenced from M +// would not be evacuated. + +class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure { + + class MarkStrongCodeRootOopClosure: public OopClosure { + ConcurrentMark* _cm; + HeapRegion* _hr; + uint _worker_id; + + template void do_oop_work(T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + // Only mark objects in the region (which is assumed + // to be not in the collection set). + if (_hr->is_in(obj)) { + _cm->grayRoot(obj, (size_t) obj->size(), _worker_id); + } + } + } + + public: + MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) : + _cm(cm), _hr(hr), _worker_id(worker_id) { + assert(!_hr->in_collection_set(), "sanity"); + } + + void do_oop(narrowOop* p) { do_oop_work(p); } + void do_oop(oop* p) { do_oop_work(p); } + }; + + MarkStrongCodeRootOopClosure _oop_cl; + +public: + MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id): + _oop_cl(cm, hr, worker_id) {} + + void do_code_blob(CodeBlob* cb) { + nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); + if (nm != NULL) { + nm->oops_do(&_oop_cl); + } + } +}; + +class MarkStrongCodeRootsHRClosure: public HeapRegionClosure { + G1CollectedHeap* _g1h; + uint _worker_id; + +public: + MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) : + _g1h(g1h), _worker_id(worker_id) {} + + bool doHeapRegion(HeapRegion *hr) { + HeapRegionRemSet* hrrs = hr->rem_set(); + if (hr->isHumongous()) { + // Code roots should never be attached to a humongous region + assert(hrrs->strong_code_roots_list_length() == 0, "sanity"); + return false; + } + + if (hr->in_collection_set()) { + // Don't mark code roots into regions in the collection set here. + // They will be marked when we scan them. + return false; + } + + MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id); + hr->strong_code_roots_do(&cb_cl); + return false; + } +}; + +void G1CollectedHeap::mark_strong_code_roots(uint worker_id) { + MarkStrongCodeRootsHRClosure cl(this, worker_id); + if (G1CollectedHeap::use_parallel_gc_threads()) { + heap_region_par_iterate_chunked(&cl, + worker_id, + workers()->active_workers(), + HeapRegion::ParMarkRootClaimValue); + } else { + heap_region_iterate(&cl); + } +} + +class RebuildStrongCodeRootClosure: public CodeBlobClosure { + G1CollectedHeap* _g1h; + +public: + RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) : + _g1h(g1h) {} + + void do_code_blob(CodeBlob* cb) { + nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL; + if (nm == NULL) { + return; + } + + if (ScavengeRootsInCode && nm->detect_scavenge_root_oops()) { + _g1h->register_nmethod(nm); + } + } +}; + +void G1CollectedHeap::rebuild_strong_code_roots() { + RebuildStrongCodeRootClosure blob_cl(this); + CodeCache::blobs_do(&blob_cl); +} diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -31,6 +31,7 @@ #include "gc_implementation/g1/g1HRPrinter.hpp" #include "gc_implementation/g1/g1MonitoringSupport.hpp" #include "gc_implementation/g1/g1RemSet.hpp" +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #include "gc_implementation/g1/g1YCTypes.hpp" #include "gc_implementation/g1/heapRegionSeq.hpp" #include "gc_implementation/g1/heapRegionSets.hpp" @@ -46,6 +47,7 @@ // may combine concurrent marking with parallel, incremental compaction of // heap subsets that will yield large amounts of garbage. +// Forward declarations class HeapRegion; class HRRSCleanupTask; class GenerationSpec; @@ -69,6 +71,7 @@ class G1NewTracer; class G1OldTracer; class EvacuationFailedInfo; +class nmethod; typedef OverflowTaskQueue RefToScanQueue; typedef GenericTaskQueueSet RefToScanQueueSet; @@ -163,18 +166,6 @@ : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { } }; -// The G1 STW is alive closure. -// An instance is embedded into the G1CH and used as the -// (optional) _is_alive_non_header closure in the STW -// reference processor. It is also extensively used during -// reference processing during STW evacuation pauses. -class G1STWIsAliveClosure: public BoolObjectClosure { - G1CollectedHeap* _g1; -public: - G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} - bool do_object_b(oop p); -}; - class SurvivorGCAllocRegion : public G1AllocRegion { protected: virtual HeapRegion* allocate_new_region(size_t word_size, bool force); @@ -193,6 +184,18 @@ : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { } }; +// The G1 STW is alive closure. +// An instance is embedded into the G1CH and used as the +// (optional) _is_alive_non_header closure in the STW +// reference processor. It is also extensively used during +// reference processing during STW evacuation pauses. +class G1STWIsAliveClosure: public BoolObjectClosure { + G1CollectedHeap* _g1; +public: + G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} + bool do_object_b(oop p); +}; + class RefineCardTableEntryClosure; class G1CollectedHeap : public SharedHeap { @@ -707,7 +710,7 @@ if (_g1_committed.contains((HeapWord*) obj)) { // no need to subtract the bottom of the heap from obj, // _in_cset_fast_test is biased - uintx index = (uintx) obj >> HeapRegion::LogOfHRGrainBytes; + uintx index = cast_from_oop(obj) >> HeapRegion::LogOfHRGrainBytes; bool ret = _in_cset_fast_test[index]; // let's make sure the result is consistent with what the slower // test returns @@ -780,9 +783,10 @@ // it has to be read while holding the Heap_lock. Currently, both // methods that call do_collection_pause() release the Heap_lock // before the call, so it's easy to read gc_count_before just before. - HeapWord* do_collection_pause(size_t word_size, - unsigned int gc_count_before, - bool* succeeded); + HeapWord* do_collection_pause(size_t word_size, + unsigned int gc_count_before, + bool* succeeded, + GCCause::Cause gc_cause); // The guts of the incremental collection pause, executed by the vm // thread. It returns false if it is unable to do the collection due @@ -794,8 +798,6 @@ // The g1 remembered set of the heap. G1RemSet* _g1_rem_set; - // And it's mod ref barrier set, used to track updates for the above. - ModRefBarrierSet* _mr_bs; // A set of cards that cover the objects for which the Rsets should be updated // concurrently after the collection. @@ -1095,6 +1097,9 @@ // specified by the policy object. jint initialize(); + // Return the (conservative) maximum heap alignment for any G1 heap + static size_t conservative_max_heap_alignment(); + // Initialize weak reference processing. virtual void ref_processing_init(); @@ -1127,7 +1132,6 @@ // The rem set and barrier set. G1RemSet* g1_rem_set() const { return _g1_rem_set; } - ModRefBarrierSet* mr_bs() const { return _mr_bs; } unsigned get_gc_time_stamp() { return _gc_time_stamp; @@ -1346,6 +1350,10 @@ virtual bool is_in_closed_subset(const void* p) const; + G1SATBCardTableModRefBS* g1_barrier_set() { + return (G1SATBCardTableModRefBS*) barrier_set(); + } + // This resets the card table to all zeros. It is used after // a collection pause which used the card table to claim cards. void cleanUpCardTable(); @@ -1555,42 +1563,6 @@ virtual jlong millis_since_last_gc(); - // Perform any cleanup actions necessary before allowing a verification. - virtual void prepare_for_verify(); - - // Perform verification. - - // vo == UsePrevMarking -> use "prev" marking information, - // vo == UseNextMarking -> use "next" marking information - // vo == UseMarkWord -> use the mark word in the object header - // - // NOTE: Only the "prev" marking information is guaranteed to be - // consistent most of the time, so most calls to this should use - // vo == UsePrevMarking. - // Currently, there is only one case where this is called with - // vo == UseNextMarking, which is to verify the "next" marking - // information at the end of remark. - // Currently there is only one place where this is called with - // vo == UseMarkWord, which is to verify the marking during a - // full GC. - void verify(bool silent, VerifyOption vo); - - // Override; it uses the "prev" marking information - virtual void verify(bool silent); - - virtual void print_on(outputStream* st) const; - virtual void print_extended_on(outputStream* st) const; - virtual void print_on_error(outputStream* st) const; - - virtual void print_gc_threads_on(outputStream* st) const; - virtual void gc_threads_do(ThreadClosure* tc) const; - - // Override - void print_tracing_info() const; - - // The following two methods are helpful for debugging RSet issues. - void print_cset_rsets() PRODUCT_RETURN; - void print_all_rsets() PRODUCT_RETURN; // Convenience function to be used in situations where the heap type can be // asserted to be this type. @@ -1667,13 +1639,86 @@ else return is_obj_ill(obj, hr); } + bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo); + HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo); + bool is_marked(oop obj, VerifyOption vo); + const char* top_at_mark_start_str(VerifyOption vo); + + ConcurrentMark* concurrent_mark() const { return _cm; } + + // Refinement + + ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; } + + // The dirty cards region list is used to record a subset of regions + // whose cards need clearing. The list if populated during the + // remembered set scanning and drained during the card table + // cleanup. Although the methods are reentrant, population/draining + // phases must not overlap. For synchronization purposes the last + // element on the list points to itself. + HeapRegion* _dirty_cards_region_list; + void push_dirty_cards_region(HeapRegion* hr); + HeapRegion* pop_dirty_cards_region(); + + // Optimized nmethod scanning support routines + + // Register the given nmethod with the G1 heap + virtual void register_nmethod(nmethod* nm); + + // Unregister the given nmethod from the G1 heap + virtual void unregister_nmethod(nmethod* nm); + + // Migrate the nmethods in the code root lists of the regions + // in the collection set to regions in to-space. In the event + // of an evacuation failure, nmethods that reference objects + // that were not successfullly evacuated are not migrated. + void migrate_strong_code_roots(); + + // During an initial mark pause, mark all the code roots that + // point into regions *not* in the collection set. + void mark_strong_code_roots(uint worker_id); + + // Rebuild the stong code root lists for each region + // after a full GC + void rebuild_strong_code_roots(); + + // Verification + + // The following is just to alert the verification code + // that a full collection has occurred and that the + // remembered sets are no longer up to date. + bool _full_collection; + void set_full_collection() { _full_collection = true;} + void clear_full_collection() {_full_collection = false;} + bool full_collection() {return _full_collection;} + + // Perform any cleanup actions necessary before allowing a verification. + virtual void prepare_for_verify(); + + // Perform verification. + + // vo == UsePrevMarking -> use "prev" marking information, + // vo == UseNextMarking -> use "next" marking information + // vo == UseMarkWord -> use the mark word in the object header + // + // NOTE: Only the "prev" marking information is guaranteed to be + // consistent most of the time, so most calls to this should use + // vo == UsePrevMarking. + // Currently, there is only one case where this is called with + // vo == UseNextMarking, which is to verify the "next" marking + // information at the end of remark. + // Currently there is only one place where this is called with + // vo == UseMarkWord, which is to verify the marking during a + // full GC. + void verify(bool silent, VerifyOption vo); + + // Override; it uses the "prev" marking information + virtual void verify(bool silent); + // The methods below are here for convenience and dispatch the // appropriate method depending on value of the given VerifyOption - // parameter. The options for that parameter are: - // - // vo == UsePrevMarking -> use "prev" marking information, - // vo == UseNextMarking -> use "next" marking information, - // vo == UseMarkWord -> use mark word from object header + // parameter. The values for that parameter, and their meanings, + // are the same as those above. bool is_obj_dead_cond(const oop obj, const HeapRegion* hr, @@ -1698,31 +1743,21 @@ return false; // keep some compilers happy } - bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo); - HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo); - bool is_marked(oop obj, VerifyOption vo); - const char* top_at_mark_start_str(VerifyOption vo); + // Printing - // The following is just to alert the verification code - // that a full collection has occurred and that the - // remembered sets are no longer up to date. - bool _full_collection; - void set_full_collection() { _full_collection = true;} - void clear_full_collection() {_full_collection = false;} - bool full_collection() {return _full_collection;} + virtual void print_on(outputStream* st) const; + virtual void print_extended_on(outputStream* st) const; + virtual void print_on_error(outputStream* st) const; - ConcurrentMark* concurrent_mark() const { return _cm; } - ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; } + virtual void print_gc_threads_on(outputStream* st) const; + virtual void gc_threads_do(ThreadClosure* tc) const; - // The dirty cards region list is used to record a subset of regions - // whose cards need clearing. The list if populated during the - // remembered set scanning and drained during the card table - // cleanup. Although the methods are reentrant, population/draining - // phases must not overlap. For synchronization purposes the last - // element on the list points to itself. - HeapRegion* _dirty_cards_region_list; - void push_dirty_cards_region(HeapRegion* hr); - HeapRegion* pop_dirty_cards_region(); + // Override + void print_tracing_info() const; + + // The following two methods are helpful for debugging RSet issues. + void print_cset_rsets() PRODUCT_RETURN; + void print_all_rsets() PRODUCT_RETURN; public: void stop_conc_gc_threads(); @@ -1848,7 +1883,7 @@ G1CollectedHeap* _g1h; RefToScanQueue* _refs; DirtyCardQueue _dcq; - CardTableModRefBS* _ct_bs; + G1SATBCardTableModRefBS* _ct_bs; G1RemSet* _g1_rem; G1ParGCAllocBufferContainer _surviving_alloc_buffer; @@ -1887,7 +1922,7 @@ void add_to_undo_waste(size_t waste) { _undo_waste += waste; } DirtyCardQueue& dirty_card_queue() { return _dcq; } - CardTableModRefBS* ctbs() { return _ct_bs; } + G1SATBCardTableModRefBS* ctbs() { return _ct_bs; } template void immediate_rs_update(HeapRegion* from, T* p, int tid) { if (!from->is_survivor()) { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -134,7 +134,7 @@ assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); MemRegion mr(start, end); - ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); + g1_barrier_set()->dirty(mr); } inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -168,7 +168,15 @@ // Set up the region size and associated fields. Given that the // policy is created before the heap, we have to set this up here, // so it's done as soon as possible. - HeapRegion::setup_heap_region_size(Arguments::min_heap_size()); + + // It would have been natural to pass initial_heap_byte_size() and + // max_heap_byte_size() to setup_heap_region_size() but those have + // not been set up at this point since they should be aligned with + // the region size. So, there is a circular dependency here. We base + // the region size on the heap size, but the heap size should be + // aligned with the region size. To get around this we use the + // unaligned values for the heap. + HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize); HeapRegionRemSet::setup_remset_size(); G1ErgoVerbose::initialize(); @@ -313,7 +321,8 @@ void G1CollectorPolicy::initialize_flags() { set_min_alignment(HeapRegion::GrainBytes); size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name()); - set_max_alignment(MAX2(card_table_alignment, min_alignment())); + size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); + set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size)); if (SurvivorRatio < 1) { vm_exit_during_initialization("Invalid survivor ratio specified"); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1EvacFailure.hpp --- a/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -41,11 +41,11 @@ private: G1CollectedHeap* _g1; DirtyCardQueue *_dcq; - CardTableModRefBS* _ct_bs; + G1SATBCardTableModRefBS* _ct_bs; public: UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : - _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} + _g1(g1), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {} virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop( oop* p) { do_oop_work(p); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -161,6 +161,8 @@ _last_update_rs_times_ms(_max_gc_threads, "%.1lf"), _last_update_rs_processed_buffers(_max_gc_threads, "%d"), _last_scan_rs_times_ms(_max_gc_threads, "%.1lf"), + _last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"), + _last_strong_code_root_mark_times_ms(_max_gc_threads, "%.1lf"), _last_obj_copy_times_ms(_max_gc_threads, "%.1lf"), _last_termination_times_ms(_max_gc_threads, "%.1lf"), _last_termination_attempts(_max_gc_threads, SIZE_FORMAT), @@ -182,6 +184,8 @@ _last_update_rs_times_ms.reset(); _last_update_rs_processed_buffers.reset(); _last_scan_rs_times_ms.reset(); + _last_strong_code_root_scan_times_ms.reset(); + _last_strong_code_root_mark_times_ms.reset(); _last_obj_copy_times_ms.reset(); _last_termination_times_ms.reset(); _last_termination_attempts.reset(); @@ -197,6 +201,8 @@ _last_update_rs_times_ms.verify(); _last_update_rs_processed_buffers.verify(); _last_scan_rs_times_ms.verify(); + _last_strong_code_root_scan_times_ms.verify(); + _last_strong_code_root_mark_times_ms.verify(); _last_obj_copy_times_ms.verify(); _last_termination_times_ms.verify(); _last_termination_attempts.verify(); @@ -210,6 +216,8 @@ _last_satb_filtering_times_ms.get(i) + _last_update_rs_times_ms.get(i) + _last_scan_rs_times_ms.get(i) + + _last_strong_code_root_scan_times_ms.get(i) + + _last_strong_code_root_mark_times_ms.get(i) + _last_obj_copy_times_ms.get(i) + _last_termination_times_ms.get(i); @@ -239,6 +247,9 @@ // Now subtract the time taken to fix up roots in generated code misc_time_ms += _cur_collection_code_root_fixup_time_ms; + // Strong code root migration time + misc_time_ms += _cur_strong_code_root_migration_time_ms; + // Subtract the time taken to clean the card table from the // current value of "other time" misc_time_ms += _cur_clear_ct_time_ms; @@ -257,9 +268,13 @@ if (_last_satb_filtering_times_ms.sum() > 0.0) { _last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)"); } + if (_last_strong_code_root_mark_times_ms.sum() > 0.0) { + _last_strong_code_root_mark_times_ms.print(2, "Code Root Marking (ms)"); + } _last_update_rs_times_ms.print(2, "Update RS (ms)"); _last_update_rs_processed_buffers.print(3, "Processed Buffers"); _last_scan_rs_times_ms.print(2, "Scan RS (ms)"); + _last_strong_code_root_scan_times_ms.print(2, "Code Root Scanning (ms)"); _last_obj_copy_times_ms.print(2, "Object Copy (ms)"); _last_termination_times_ms.print(2, "Termination (ms)"); if (G1Log::finest()) { @@ -273,12 +288,17 @@ if (_last_satb_filtering_times_ms.sum() > 0.0) { _last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)"); } + if (_last_strong_code_root_mark_times_ms.sum() > 0.0) { + _last_strong_code_root_mark_times_ms.print(1, "Code Root Marking (ms)"); + } _last_update_rs_times_ms.print(1, "Update RS (ms)"); _last_update_rs_processed_buffers.print(2, "Processed Buffers"); _last_scan_rs_times_ms.print(1, "Scan RS (ms)"); + _last_strong_code_root_scan_times_ms.print(1, "Code Root Scanning (ms)"); _last_obj_copy_times_ms.print(1, "Object Copy (ms)"); } print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms); + print_stats(1, "Code Root Migration", _cur_strong_code_root_migration_time_ms); print_stats(1, "Clear CT", _cur_clear_ct_time_ms); double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms(); print_stats(1, "Other", misc_time_ms); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -119,6 +119,8 @@ WorkerDataArray _last_update_rs_times_ms; WorkerDataArray _last_update_rs_processed_buffers; WorkerDataArray _last_scan_rs_times_ms; + WorkerDataArray _last_strong_code_root_scan_times_ms; + WorkerDataArray _last_strong_code_root_mark_times_ms; WorkerDataArray _last_obj_copy_times_ms; WorkerDataArray _last_termination_times_ms; WorkerDataArray _last_termination_attempts; @@ -128,6 +130,7 @@ double _cur_collection_par_time_ms; double _cur_collection_code_root_fixup_time_ms; + double _cur_strong_code_root_migration_time_ms; double _cur_clear_ct_time_ms; double _cur_ref_proc_time_ms; @@ -179,6 +182,14 @@ _last_scan_rs_times_ms.set(worker_i, ms); } + void record_strong_code_root_scan_time(uint worker_i, double ms) { + _last_strong_code_root_scan_times_ms.set(worker_i, ms); + } + + void record_strong_code_root_mark_time(uint worker_i, double ms) { + _last_strong_code_root_mark_times_ms.set(worker_i, ms); + } + void record_obj_copy_time(uint worker_i, double ms) { _last_obj_copy_times_ms.set(worker_i, ms); } @@ -208,6 +219,10 @@ _cur_collection_code_root_fixup_time_ms = ms; } + void record_strong_code_root_migration_time(double ms) { + _cur_strong_code_root_migration_time_ms = ms; + } + void record_ref_proc_time(double ms) { _cur_ref_proc_time_ms = ms; } @@ -294,6 +309,14 @@ return _last_scan_rs_times_ms.average(); } + double average_last_strong_code_root_scan_time(){ + return _last_strong_code_root_scan_times_ms.average(); + } + + double average_last_strong_code_root_mark_time(){ + return _last_strong_code_root_mark_times_ms.average(); + } + double average_last_obj_copy_time() { return _last_obj_copy_times_ms.average(); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1MarkSweep.cpp --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -220,7 +220,7 @@ public: G1PrepareCompactClosure(CompactibleSpace* cs) : _g1h(G1CollectedHeap::heap()), - _mrbs(G1CollectedHeap::heap()->mr_bs()), + _mrbs(_g1h->g1_barrier_set()), _cp(NULL, cs, cs->initialize_threshold()), _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp --- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -262,6 +262,7 @@ old_collection_counters()->update_all(); young_collection_counters()->update_all(); MetaspaceCounters::update_performance_counters(); + CompressedClassSpaceCounters::update_performance_counters(); } } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1OopClosures.hpp --- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -91,12 +91,12 @@ } template inline T* set_partial_array_mask(T obj) { - assert(((uintptr_t)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!"); - return (T*) ((uintptr_t)obj | G1_PARTIAL_ARRAY_MASK); + assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!"); + return (T*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK); } template inline oop clear_partial_array_mask(T* ref) { - return oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK); + return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK); } class G1ParScanPartialArrayClosure : public G1ParClosureSuper { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1RemSet.cpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -83,7 +83,9 @@ for (uint i = 0; i < n_workers(); i++) { _cset_rs_update_cl[i] = NULL; } - _prev_period_summary.initialize(this, n_workers()); + if (G1SummarizeRSetStats) { + _prev_period_summary.initialize(this); + } } G1RemSet::~G1RemSet() { @@ -104,15 +106,25 @@ class ScanRSClosure : public HeapRegionClosure { size_t _cards_done, _cards; G1CollectedHeap* _g1h; + OopsInHeapRegionClosure* _oc; + CodeBlobToOopClosure* _code_root_cl; + G1BlockOffsetSharedArray* _bot_shared; - CardTableModRefBS *_ct_bs; - int _worker_i; - int _block_size; - bool _try_claimed; + G1SATBCardTableModRefBS *_ct_bs; + + double _strong_code_root_scan_time_sec; + int _worker_i; + int _block_size; + bool _try_claimed; + public: - ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) : + ScanRSClosure(OopsInHeapRegionClosure* oc, + CodeBlobToOopClosure* code_root_cl, + int worker_i) : _oc(oc), + _code_root_cl(code_root_cl), + _strong_code_root_scan_time_sec(0.0), _cards(0), _cards_done(0), _worker_i(worker_i), @@ -120,7 +132,7 @@ { _g1h = G1CollectedHeap::heap(); _bot_shared = _g1h->bot_shared(); - _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set()); + _ct_bs = _g1h->g1_barrier_set(); _block_size = MAX2(G1RSetScanBlockSize, 1); } @@ -160,6 +172,12 @@ card_start, card_start + G1BlockOffsetSharedArray::N_words); } + void scan_strong_code_roots(HeapRegion* r) { + double scan_start = os::elapsedTime(); + r->strong_code_roots_do(_code_root_cl); + _strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start); + } + bool doHeapRegion(HeapRegion* r) { assert(r->in_collection_set(), "should only be called on elements of CS."); HeapRegionRemSet* hrrs = r->rem_set(); @@ -173,6 +191,7 @@ // _try_claimed || r->claim_iter() // is true: either we're supposed to work on claimed-but-not-complete // regions, or we successfully claimed the region. + HeapRegionRemSetIterator iter(hrrs); size_t card_index; @@ -205,30 +224,43 @@ } } if (!_try_claimed) { + // Scan the strong code root list attached to the current region + scan_strong_code_roots(r); + hrrs->set_iter_complete(); } return false; } + + double strong_code_root_scan_time_sec() { + return _strong_code_root_scan_time_sec; + } + size_t cards_done() { return _cards_done;} size_t cards_looked_up() { return _cards;} }; -void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) { +void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, + CodeBlobToOopClosure* code_root_cl, + int worker_i) { double rs_time_start = os::elapsedTime(); HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i); - ScanRSClosure scanRScl(oc, worker_i); + ScanRSClosure scanRScl(oc, code_root_cl, worker_i); _g1->collection_set_iterate_from(startRegion, &scanRScl); scanRScl.set_try_claimed(); _g1->collection_set_iterate_from(startRegion, &scanRScl); - double scan_rs_time_sec = os::elapsedTime() - rs_time_start; + double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) + - scanRScl.strong_code_root_scan_time_sec(); - assert( _cards_scanned != NULL, "invariant" ); + assert(_cards_scanned != NULL, "invariant"); _cards_scanned[worker_i] = scanRScl.cards_done(); _g1p->phase_times()->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0); + _g1p->phase_times()->record_strong_code_root_scan_time(worker_i, + scanRScl.strong_code_root_scan_time_sec() * 1000.0); } // Closure used for updating RSets and recording references that @@ -288,7 +320,8 @@ } void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc, - int worker_i) { + CodeBlobToOopClosure* code_root_cl, + int worker_i) { #if CARD_REPEAT_HISTO ct_freq_update_histo_and_reset(); #endif @@ -328,7 +361,7 @@ _g1p->phase_times()->record_update_rs_time(worker_i, 0.0); } if (G1UseParallelRSetScanning || (worker_i == 0)) { - scanRS(oc, worker_i); + scanRS(oc, code_root_cl, worker_i); } else { _g1p->phase_times()->record_scan_rs_time(worker_i, 0.0); } @@ -474,12 +507,7 @@ ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) : _g1h(G1CollectedHeap::heap()), _region_bm(region_bm), _card_bm(card_bm), - _ctbs(NULL) - { - ModRefBarrierSet* bs = _g1h->mr_bs(); - guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition"); - _ctbs = (CardTableModRefBS*)bs; - } + _ctbs(_g1h->g1_barrier_set()) {} bool doHeapRegion(HeapRegion* r) { if (!r->continuesHumongous()) { @@ -700,19 +728,19 @@ return has_refs_into_cset; } -void G1RemSet::print_periodic_summary_info() { +void G1RemSet::print_periodic_summary_info(const char* header) { G1RemSetSummary current; - current.initialize(this, n_workers()); + current.initialize(this); _prev_period_summary.subtract_from(¤t); - print_summary_info(&_prev_period_summary); + print_summary_info(&_prev_period_summary, header); _prev_period_summary.set(¤t); } void G1RemSet::print_summary_info() { G1RemSetSummary current; - current.initialize(this, n_workers()); + current.initialize(this); print_summary_info(¤t, " Cumulative RS summary"); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1RemSet.hpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -81,14 +81,23 @@ G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs); ~G1RemSet(); - // Invoke "blk->do_oop" on all pointers into the CS in objects in regions - // outside the CS (having invoked "blk->set_region" to set the "from" - // region correctly beforehand.) The "worker_i" param is for the - // parallel case where the number of the worker thread calling this - // function can be helpful in partitioning the work to be done. It - // should be the same as the "i" passed to the calling thread's - // work(i) function. In the sequential case this param will be ingored. - void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, int worker_i); + // Invoke "blk->do_oop" on all pointers into the collection set + // from objects in regions outside the collection set (having + // invoked "blk->set_region" to set the "from" region correctly + // beforehand.) + // + // Invoke code_root_cl->do_code_blob on the unmarked nmethods + // on the strong code roots list for each region in the + // collection set. + // + // The "worker_i" param is for the parallel case where the id + // of the worker thread calling this function can be helpful in + // partitioning the work to be done. It should be the same as + // the "i" passed to the calling thread's work(i) function. + // In the sequential case this param will be ignored. + void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, + CodeBlobToOopClosure* code_root_cl, + int worker_i); // Prepare for and cleanup after an oops_into_collection_set_do // call. Must call each of these once before and after (in sequential @@ -98,7 +107,10 @@ void prepare_for_oops_into_collection_set_do(); void cleanup_after_oops_into_collection_set_do(); - void scanRS(OopsInHeapRegionClosure* oc, int worker_i); + void scanRS(OopsInHeapRegionClosure* oc, + CodeBlobToOopClosure* code_root_cl, + int worker_i); + void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i); CardTableModRefBS* ct_bs() { return _ct_bs; } @@ -133,7 +145,7 @@ virtual void print_summary_info(); // Print accumulated summary info from the last time called. - virtual void print_periodic_summary_info(); + virtual void print_periodic_summary_info(const char* header); // Prepare remembered set for verification. virtual void prepare_for_verify(); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp --- a/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -77,12 +77,12 @@ return _rs_threads_vtimes[thread]; } -void G1RemSetSummary::initialize(G1RemSet* remset, uint num_workers) { +void G1RemSetSummary::initialize(G1RemSet* remset) { assert(_rs_threads_vtimes == NULL, "just checking"); assert(remset != NULL, "just checking"); _remset = remset; - _num_vtimes = num_workers; + _num_vtimes = ConcurrentG1Refine::thread_num(); _rs_threads_vtimes = NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC); memset(_rs_threads_vtimes, 0, sizeof(double) * _num_vtimes); @@ -125,54 +125,216 @@ _sampling_thread_vtime = other->sampling_thread_vtime() - _sampling_thread_vtime; } -class HRRSStatsIter: public HeapRegionClosure { - size_t _occupied; - size_t _total_mem_sz; - size_t _max_mem_sz; - HeapRegion* _max_mem_sz_region; -public: - HRRSStatsIter() : - _occupied(0), - _total_mem_sz(0), - _max_mem_sz(0), - _max_mem_sz_region(NULL) - {} - - bool doHeapRegion(HeapRegion* r) { - size_t mem_sz = r->rem_set()->mem_size(); - if (mem_sz > _max_mem_sz) { - _max_mem_sz = mem_sz; - _max_mem_sz_region = r; - } - _total_mem_sz += mem_sz; - size_t occ = r->rem_set()->occupied(); - _occupied += occ; - return false; - } - size_t total_mem_sz() { return _total_mem_sz; } - size_t max_mem_sz() { return _max_mem_sz; } - size_t occupied() { return _occupied; } - HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; } -}; - -double calc_percentage(size_t numerator, size_t denominator) { +static double percent_of(size_t numerator, size_t denominator) { if (denominator != 0) { - return (double)numerator / denominator * 100.0; + return (double)numerator / denominator * 100.0f; } else { return 0.0f; } } +static size_t round_to_K(size_t value) { + return value / K; +} + +class RegionTypeCounter VALUE_OBJ_CLASS_SPEC { +private: + const char* _name; + + size_t _rs_mem_size; + size_t _cards_occupied; + size_t _amount; + + size_t _code_root_mem_size; + size_t _code_root_elems; + + double rs_mem_size_percent_of(size_t total) { + return percent_of(_rs_mem_size, total); + } + + double cards_occupied_percent_of(size_t total) { + return percent_of(_cards_occupied, total); + } + + double code_root_mem_size_percent_of(size_t total) { + return percent_of(_code_root_mem_size, total); + } + + double code_root_elems_percent_of(size_t total) { + return percent_of(_code_root_elems, total); + } + + size_t amount() const { return _amount; } + +public: + + RegionTypeCounter(const char* name) : _name(name), _rs_mem_size(0), _cards_occupied(0), + _amount(0), _code_root_mem_size(0), _code_root_elems(0) { } + + void add(size_t rs_mem_size, size_t cards_occupied, size_t code_root_mem_size, + size_t code_root_elems) { + _rs_mem_size += rs_mem_size; + _cards_occupied += cards_occupied; + _code_root_mem_size += code_root_mem_size; + _code_root_elems += code_root_elems; + _amount++; + } + + size_t rs_mem_size() const { return _rs_mem_size; } + size_t cards_occupied() const { return _cards_occupied; } + + size_t code_root_mem_size() const { return _code_root_mem_size; } + size_t code_root_elems() const { return _code_root_elems; } + + void print_rs_mem_info_on(outputStream * out, size_t total) { + out->print_cr(" %8dK (%5.1f%%) by %zd %s regions", round_to_K(rs_mem_size()), rs_mem_size_percent_of(total), amount(), _name); + } + + void print_cards_occupied_info_on(outputStream * out, size_t total) { + out->print_cr(" %8d (%5.1f%%) entries by %zd %s regions", cards_occupied(), cards_occupied_percent_of(total), amount(), _name); + } + + void print_code_root_mem_info_on(outputStream * out, size_t total) { + out->print_cr(" %8dK (%5.1f%%) by %zd %s regions", round_to_K(code_root_mem_size()), code_root_mem_size_percent_of(total), amount(), _name); + } + + void print_code_root_elems_info_on(outputStream * out, size_t total) { + out->print_cr(" %8d (%5.1f%%) elements by %zd %s regions", code_root_elems(), code_root_elems_percent_of(total), amount(), _name); + } +}; + + +class HRRSStatsIter: public HeapRegionClosure { +private: + RegionTypeCounter _young; + RegionTypeCounter _humonguous; + RegionTypeCounter _free; + RegionTypeCounter _old; + RegionTypeCounter _all; + + size_t _max_rs_mem_sz; + HeapRegion* _max_rs_mem_sz_region; + + size_t total_rs_mem_sz() const { return _all.rs_mem_size(); } + size_t total_cards_occupied() const { return _all.cards_occupied(); } + + size_t max_rs_mem_sz() const { return _max_rs_mem_sz; } + HeapRegion* max_rs_mem_sz_region() const { return _max_rs_mem_sz_region; } + + size_t _max_code_root_mem_sz; + HeapRegion* _max_code_root_mem_sz_region; + + size_t total_code_root_mem_sz() const { return _all.code_root_mem_size(); } + size_t total_code_root_elems() const { return _all.code_root_elems(); } + + size_t max_code_root_mem_sz() const { return _max_code_root_mem_sz; } + HeapRegion* max_code_root_mem_sz_region() const { return _max_code_root_mem_sz_region; } + +public: + HRRSStatsIter() : _all("All"), _young("Young"), _humonguous("Humonguous"), + _free("Free"), _old("Old"), _max_code_root_mem_sz_region(NULL), _max_rs_mem_sz_region(NULL), + _max_rs_mem_sz(0), _max_code_root_mem_sz(0) + {} + + bool doHeapRegion(HeapRegion* r) { + HeapRegionRemSet* hrrs = r->rem_set(); + + // HeapRegionRemSet::mem_size() includes the + // size of the strong code roots + size_t rs_mem_sz = hrrs->mem_size(); + if (rs_mem_sz > _max_rs_mem_sz) { + _max_rs_mem_sz = rs_mem_sz; + _max_rs_mem_sz_region = r; + } + size_t occupied_cards = hrrs->occupied(); + size_t code_root_mem_sz = hrrs->strong_code_roots_mem_size(); + if (code_root_mem_sz > max_code_root_mem_sz()) { + _max_code_root_mem_sz_region = r; + } + size_t code_root_elems = hrrs->strong_code_roots_list_length(); + + RegionTypeCounter* current = NULL; + if (r->is_young()) { + current = &_young; + } else if (r->isHumongous()) { + current = &_humonguous; + } else if (r->is_empty()) { + current = &_free; + } else { + current = &_old; + } + current->add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems); + _all.add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems); + + return false; + } + + void print_summary_on(outputStream* out) { + RegionTypeCounter* counters[] = { &_young, &_humonguous, &_free, &_old, NULL }; + + out->print_cr("\n Current rem set statistics"); + out->print_cr(" Total per region rem sets sizes = "SIZE_FORMAT"K." + " Max = "SIZE_FORMAT"K.", + round_to_K(total_rs_mem_sz()), round_to_K(max_rs_mem_sz())); + for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) { + (*current)->print_rs_mem_info_on(out, total_rs_mem_sz()); + } + + out->print_cr(" Static structures = "SIZE_FORMAT"K," + " free_lists = "SIZE_FORMAT"K.", + round_to_K(HeapRegionRemSet::static_mem_size()), + round_to_K(HeapRegionRemSet::fl_mem_size())); + + out->print_cr(" "SIZE_FORMAT" occupied cards represented.", + total_cards_occupied()); + for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) { + (*current)->print_cards_occupied_info_on(out, total_cards_occupied()); + } + + // Largest sized rem set region statistics + HeapRegionRemSet* rem_set = max_rs_mem_sz_region()->rem_set(); + out->print_cr(" Region with largest rem set = "HR_FORMAT", " + "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.", + HR_FORMAT_PARAMS(max_rs_mem_sz_region()), + round_to_K(rem_set->mem_size()), + round_to_K(rem_set->occupied())); + + // Strong code root statistics + HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region()->rem_set(); + out->print_cr(" Total heap region code root sets sizes = "SIZE_FORMAT"K." + " Max = "SIZE_FORMAT"K.", + round_to_K(total_code_root_mem_sz()), + round_to_K(max_code_root_rem_set->strong_code_roots_mem_size())); + for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) { + (*current)->print_code_root_mem_info_on(out, total_code_root_mem_sz()); + } + + out->print_cr(" "SIZE_FORMAT" code roots represented.", + total_code_root_elems()); + for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) { + (*current)->print_code_root_elems_info_on(out, total_code_root_elems()); + } + + out->print_cr(" Region with largest amount of code roots = "HR_FORMAT", " + "size = "SIZE_FORMAT "K, num_elems = "SIZE_FORMAT".", + HR_FORMAT_PARAMS(max_code_root_mem_sz_region()), + round_to_K(max_code_root_rem_set->strong_code_roots_mem_size()), + round_to_K(max_code_root_rem_set->strong_code_roots_list_length())); + } +}; + void G1RemSetSummary::print_on(outputStream* out) { - out->print_cr("\n Concurrent RS processed "SIZE_FORMAT" cards", + out->print_cr("\n Recent concurrent refinement statistics"); + out->print_cr(" Processed "SIZE_FORMAT" cards", num_concurrent_refined_cards()); out->print_cr(" Of %d completed buffers:", num_processed_buf_total()); out->print_cr(" %8d (%5.1f%%) by concurrent RS threads.", num_processed_buf_total(), - calc_percentage(num_processed_buf_rs_threads(), num_processed_buf_total())); + percent_of(num_processed_buf_rs_threads(), num_processed_buf_total())); out->print_cr(" %8d (%5.1f%%) by mutator threads.", num_processed_buf_mutator(), - calc_percentage(num_processed_buf_mutator(), num_processed_buf_total())); + percent_of(num_processed_buf_mutator(), num_processed_buf_total())); + out->print_cr(" Did %d coarsenings.", num_coarsenings()); out->print_cr(" Concurrent RS threads times (s)"); out->print(" "); for (uint i = 0; i < _num_vtimes; i++) { @@ -184,22 +346,5 @@ HRRSStatsIter blk; G1CollectedHeap::heap()->heap_region_iterate(&blk); - out->print_cr(" Total heap region rem set sizes = "SIZE_FORMAT"K." - " Max = "SIZE_FORMAT"K.", - blk.total_mem_sz()/K, blk.max_mem_sz()/K); - out->print_cr(" Static structures = "SIZE_FORMAT"K," - " free_lists = "SIZE_FORMAT"K.", - HeapRegionRemSet::static_mem_size() / K, - HeapRegionRemSet::fl_mem_size() / K); - out->print_cr(" "SIZE_FORMAT" occupied cards represented.", - blk.occupied()); - HeapRegion* max_mem_sz_region = blk.max_mem_sz_region(); - HeapRegionRemSet* rem_set = max_mem_sz_region->rem_set(); - out->print_cr(" Max size region = "HR_FORMAT", " - "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.", - HR_FORMAT_PARAMS(max_mem_sz_region), - (rem_set->mem_size() + K - 1)/K, - (rem_set->occupied() + K - 1)/K); - - out->print_cr(" Did %d coarsenings.", num_coarsenings()); + blk.print_summary_on(out); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp --- a/src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -84,7 +84,7 @@ void subtract_from(G1RemSetSummary* other); // initialize and get the first sampling - void initialize(G1RemSet* remset, uint num_workers); + void initialize(G1RemSet* remset); void print_on(outputStream* out); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,6 +64,27 @@ } } +bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) { + jbyte val = _byte_map[card_index]; + // It's already processed + if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { + return false; + } + // Cached bit can be installed either on a clean card or on a claimed card. + jbyte new_val = val; + if (val == clean_card_val()) { + new_val = (jbyte)deferred_card_val(); + } else { + if (val & claimed_card_val()) { + new_val = val | (jbyte)deferred_card_val(); + } + } + if (new_val != val) { + Atomic::cmpxchg(new_val, &_byte_map[card_index], val); + } + return true; +} + G1SATBCardTableLoggingModRefBS:: G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, int max_covered_regions) : @@ -95,7 +116,7 @@ G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field, oop new_val) { uintptr_t field_uint = (uintptr_t)field; - uintptr_t new_val_uint = (uintptr_t)new_val; + uintptr_t new_val_uint = cast_from_oop(new_val); uintptr_t comb = field_uint ^ new_val_uint; comb = comb >> HeapRegion::LogOfHRGrainBytes; if (comb == 0) return; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -89,6 +89,42 @@ write_ref_array_pre_work(dst, count); } } + +/* + Claimed and deferred bits are used together in G1 during the evacuation + pause. These bits can have the following state transitions: + 1. The claimed bit can be put over any other card state. Except that + the "dirty -> dirty and claimed" transition is checked for in + G1 code and is not used. + 2. Deferred bit can be set only if the previous state of the card + was either clean or claimed. mark_card_deferred() is wait-free. + We do not care if the operation is be successful because if + it does not it will only result in duplicate entry in the update + buffer because of the "cache-miss". So it's not worth spinning. + */ + + bool is_card_claimed(size_t card_index) { + jbyte val = _byte_map[card_index]; + return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val(); + } + + void set_card_claimed(size_t card_index) { + jbyte val = _byte_map[card_index]; + if (val == clean_card_val()) { + val = (jbyte)claimed_card_val(); + } else { + val |= (jbyte)claimed_card_val(); + } + _byte_map[card_index] = val; + } + + bool mark_card_deferred(size_t card_index); + + bool is_card_deferred(size_t card_index) { + jbyte val = _byte_map[card_index]; + return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val(); + } + }; // Adds card-table logging to the post-barrier. diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/g1_globals.hpp --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -319,7 +319,10 @@ \ diagnostic(bool, G1VerifyRSetsDuringFullGC, false, \ "If true, perform verification of each heap region's " \ - "remembered set when verifying the heap during a full GC.") + "remembered set when verifying the heap during a full GC.") \ + \ + diagnostic(bool, G1VerifyHeapRegionCodeRoots, false, \ + "Verify the code root lists attached to each heap region.") G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG) diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/heapRegion.cpp --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "code/nmethod.hpp" #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" @@ -50,144 +51,6 @@ OopClosure* oc) : _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } -class VerifyLiveClosure: public OopClosure { -private: - G1CollectedHeap* _g1h; - CardTableModRefBS* _bs; - oop _containing_obj; - bool _failures; - int _n_failures; - VerifyOption _vo; -public: - // _vo == UsePrevMarking -> use "prev" marking information, - // _vo == UseNextMarking -> use "next" marking information, - // _vo == UseMarkWord -> use mark word from object header. - VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : - _g1h(g1h), _bs(NULL), _containing_obj(NULL), - _failures(false), _n_failures(0), _vo(vo) - { - BarrierSet* bs = _g1h->barrier_set(); - if (bs->is_a(BarrierSet::CardTableModRef)) - _bs = (CardTableModRefBS*)bs; - } - - void set_containing_obj(oop obj) { - _containing_obj = obj; - } - - bool failures() { return _failures; } - int n_failures() { return _n_failures; } - - virtual void do_oop(narrowOop* p) { do_oop_work(p); } - virtual void do_oop( oop* p) { do_oop_work(p); } - - void print_object(outputStream* out, oop obj) { -#ifdef PRODUCT - Klass* k = obj->klass(); - const char* class_name = InstanceKlass::cast(k)->external_name(); - out->print_cr("class name %s", class_name); -#else // PRODUCT - obj->print_on(out); -#endif // PRODUCT - } - - template - void do_oop_work(T* p) { - assert(_containing_obj != NULL, "Precondition"); - assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), - "Precondition"); - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - bool failed = false; - if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { - MutexLockerEx x(ParGCRareEvent_lock, - Mutex::_no_safepoint_check_flag); - - if (!_failures) { - gclog_or_tty->print_cr(""); - gclog_or_tty->print_cr("----------"); - } - if (!_g1h->is_in_closed_subset(obj)) { - HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); - gclog_or_tty->print_cr("Field "PTR_FORMAT - " of live obj "PTR_FORMAT" in region " - "["PTR_FORMAT", "PTR_FORMAT")", - p, (void*) _containing_obj, - from->bottom(), from->end()); - print_object(gclog_or_tty, _containing_obj); - gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap", - (void*) obj); - } else { - HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); - HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); - gclog_or_tty->print_cr("Field "PTR_FORMAT - " of live obj "PTR_FORMAT" in region " - "["PTR_FORMAT", "PTR_FORMAT")", - p, (void*) _containing_obj, - from->bottom(), from->end()); - print_object(gclog_or_tty, _containing_obj); - gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region " - "["PTR_FORMAT", "PTR_FORMAT")", - (void*) obj, to->bottom(), to->end()); - print_object(gclog_or_tty, obj); - } - gclog_or_tty->print_cr("----------"); - gclog_or_tty->flush(); - _failures = true; - failed = true; - _n_failures++; - } - - if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) { - HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); - HeapRegion* to = _g1h->heap_region_containing(obj); - if (from != NULL && to != NULL && - from != to && - !to->isHumongous()) { - jbyte cv_obj = *_bs->byte_for_const(_containing_obj); - jbyte cv_field = *_bs->byte_for_const(p); - const jbyte dirty = CardTableModRefBS::dirty_card_val(); - - bool is_bad = !(from->is_young() - || to->rem_set()->contains_reference(p) - || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed - (_containing_obj->is_objArray() ? - cv_field == dirty - : cv_obj == dirty || cv_field == dirty)); - if (is_bad) { - MutexLockerEx x(ParGCRareEvent_lock, - Mutex::_no_safepoint_check_flag); - - if (!_failures) { - gclog_or_tty->print_cr(""); - gclog_or_tty->print_cr("----------"); - } - gclog_or_tty->print_cr("Missing rem set entry:"); - gclog_or_tty->print_cr("Field "PTR_FORMAT" " - "of obj "PTR_FORMAT", " - "in region "HR_FORMAT, - p, (void*) _containing_obj, - HR_FORMAT_PARAMS(from)); - _containing_obj->print_on(gclog_or_tty); - gclog_or_tty->print_cr("points to obj "PTR_FORMAT" " - "in region "HR_FORMAT, - (void*) obj, - HR_FORMAT_PARAMS(to)); - obj->print_on(gclog_or_tty); - gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", - cv_obj, cv_field); - gclog_or_tty->print_cr("----------"); - gclog_or_tty->flush(); - _failures = true; - if (!failed) _n_failures++; - } - } - } - } - } -}; - template HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, HeapRegion* hr, @@ -286,18 +149,15 @@ // many regions in the heap (based on the min heap size). #define TARGET_REGION_NUMBER 2048 -void HeapRegion::setup_heap_region_size(uintx min_heap_size) { - // region_size in bytes +size_t HeapRegion::max_region_size() { + return (size_t)MAX_REGION_SIZE; +} + +void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { uintx region_size = G1HeapRegionSize; if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { - // We base the automatic calculation on the min heap size. This - // can be problematic if the spread between min and max is quite - // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on - // the max size, the region size might be way too large for the - // min size. Either way, some users might have to set the region - // size manually for some -Xms / -Xmx combos. - - region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER, + size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; + region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER, (uintx) MIN_REGION_SIZE); } @@ -368,7 +228,7 @@ if (!par) { // If this is parallel, this will be done later. HeapRegionRemSet* hrrs = rem_set(); - if (hrrs != NULL) hrrs->clear(); + hrrs->clear(); _claimed = InitialClaimValue; } zero_marked_bytes(); @@ -505,6 +365,7 @@ _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), _predicted_bytes_to_copy(0) { + _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); _orig_end = mr.end(); // Note that initialize() will set the start of the unmarked area of the // region. @@ -512,8 +373,6 @@ set_top(bottom()); set_saved_mark(); - _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); - assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); } @@ -733,6 +592,160 @@ return NULL; } +// Code roots support + +void HeapRegion::add_strong_code_root(nmethod* nm) { + HeapRegionRemSet* hrrs = rem_set(); + hrrs->add_strong_code_root(nm); +} + +void HeapRegion::remove_strong_code_root(nmethod* nm) { + HeapRegionRemSet* hrrs = rem_set(); + hrrs->remove_strong_code_root(nm); +} + +void HeapRegion::migrate_strong_code_roots() { + assert(in_collection_set(), "only collection set regions"); + assert(!isHumongous(), "not humongous regions"); + + HeapRegionRemSet* hrrs = rem_set(); + hrrs->migrate_strong_code_roots(); +} + +void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { + HeapRegionRemSet* hrrs = rem_set(); + hrrs->strong_code_roots_do(blk); +} + +class VerifyStrongCodeRootOopClosure: public OopClosure { + const HeapRegion* _hr; + nmethod* _nm; + bool _failures; + bool _has_oops_in_region; + + template void do_oop_work(T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + + // Note: not all the oops embedded in the nmethod are in the + // current region. We only look at those which are. + if (_hr->is_in(obj)) { + // Object is in the region. Check that its less than top + if (_hr->top() <= (HeapWord*)obj) { + // Object is above top + gclog_or_tty->print_cr("Object "PTR_FORMAT" in region " + "["PTR_FORMAT", "PTR_FORMAT") is above " + "top "PTR_FORMAT, + (void *)obj, _hr->bottom(), _hr->end(), _hr->top()); + _failures = true; + return; + } + // Nmethod has at least one oop in the current region + _has_oops_in_region = true; + } + } + } + +public: + VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm): + _hr(hr), _failures(false), _has_oops_in_region(false) {} + + void do_oop(narrowOop* p) { do_oop_work(p); } + void do_oop(oop* p) { do_oop_work(p); } + + bool failures() { return _failures; } + bool has_oops_in_region() { return _has_oops_in_region; } +}; + +class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { + const HeapRegion* _hr; + bool _failures; +public: + VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : + _hr(hr), _failures(false) {} + + void do_code_blob(CodeBlob* cb) { + nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); + if (nm != NULL) { + // Verify that the nemthod is live + if (!nm->is_alive()) { + gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod " + PTR_FORMAT" in its strong code roots", + _hr->bottom(), _hr->end(), nm); + _failures = true; + } else { + VerifyStrongCodeRootOopClosure oop_cl(_hr, nm); + nm->oops_do(&oop_cl); + if (!oop_cl.has_oops_in_region()) { + gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod " + PTR_FORMAT" in its strong code roots " + "with no pointers into region", + _hr->bottom(), _hr->end(), nm); + _failures = true; + } else if (oop_cl.failures()) { + gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other " + "failures for nmethod "PTR_FORMAT, + _hr->bottom(), _hr->end(), nm); + _failures = true; + } + } + } + } + + bool failures() { return _failures; } +}; + +void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { + if (!G1VerifyHeapRegionCodeRoots) { + // We're not verifying code roots. + return; + } + if (vo == VerifyOption_G1UseMarkWord) { + // Marking verification during a full GC is performed after class + // unloading, code cache unloading, etc so the strong code roots + // attached to each heap region are in an inconsistent state. They won't + // be consistent until the strong code roots are rebuilt after the + // actual GC. Skip verifying the strong code roots in this particular + // time. + assert(VerifyDuringGC, "only way to get here"); + return; + } + + HeapRegionRemSet* hrrs = rem_set(); + int strong_code_roots_length = hrrs->strong_code_roots_list_length(); + + // if this region is empty then there should be no entries + // on its strong code root list + if (is_empty()) { + if (strong_code_roots_length > 0) { + gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty " + "but has "INT32_FORMAT" code root entries", + bottom(), end(), strong_code_roots_length); + *failures = true; + } + return; + } + + // An H-region should have an empty strong code root list + if (isHumongous()) { + if (strong_code_roots_length > 0) { + gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " + "but has "INT32_FORMAT" code root entries", + bottom(), end(), strong_code_roots_length); + *failures = true; + } + return; + } + + VerifyStrongCodeRootCodeBlobClosure cb_cl(this); + strong_code_roots_do(&cb_cl); + + if (cb_cl.failures()) { + *failures = true; + } +} + void HeapRegion::print() const { print_on(gclog_or_tty); } void HeapRegion::print_on(outputStream* st) const { if (isHumongous()) { @@ -761,10 +774,143 @@ G1OffsetTableContigSpace::print_on(st); } -void HeapRegion::verify() const { - bool dummy = false; - verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); -} +class VerifyLiveClosure: public OopClosure { +private: + G1CollectedHeap* _g1h; + CardTableModRefBS* _bs; + oop _containing_obj; + bool _failures; + int _n_failures; + VerifyOption _vo; +public: + // _vo == UsePrevMarking -> use "prev" marking information, + // _vo == UseNextMarking -> use "next" marking information, + // _vo == UseMarkWord -> use mark word from object header. + VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : + _g1h(g1h), _bs(NULL), _containing_obj(NULL), + _failures(false), _n_failures(0), _vo(vo) + { + BarrierSet* bs = _g1h->barrier_set(); + if (bs->is_a(BarrierSet::CardTableModRef)) + _bs = (CardTableModRefBS*)bs; + } + + void set_containing_obj(oop obj) { + _containing_obj = obj; + } + + bool failures() { return _failures; } + int n_failures() { return _n_failures; } + + virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop( oop* p) { do_oop_work(p); } + + void print_object(outputStream* out, oop obj) { +#ifdef PRODUCT + Klass* k = obj->klass(); + const char* class_name = InstanceKlass::cast(k)->external_name(); + out->print_cr("class name %s", class_name); +#else // PRODUCT + obj->print_on(out); +#endif // PRODUCT + } + + template + void do_oop_work(T* p) { + assert(_containing_obj != NULL, "Precondition"); + assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), + "Precondition"); + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + bool failed = false; + if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { + MutexLockerEx x(ParGCRareEvent_lock, + Mutex::_no_safepoint_check_flag); + + if (!_failures) { + gclog_or_tty->print_cr(""); + gclog_or_tty->print_cr("----------"); + } + if (!_g1h->is_in_closed_subset(obj)) { + HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); + gclog_or_tty->print_cr("Field "PTR_FORMAT + " of live obj "PTR_FORMAT" in region " + "["PTR_FORMAT", "PTR_FORMAT")", + p, (void*) _containing_obj, + from->bottom(), from->end()); + print_object(gclog_or_tty, _containing_obj); + gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap", + (void*) obj); + } else { + HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); + HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); + gclog_or_tty->print_cr("Field "PTR_FORMAT + " of live obj "PTR_FORMAT" in region " + "["PTR_FORMAT", "PTR_FORMAT")", + p, (void*) _containing_obj, + from->bottom(), from->end()); + print_object(gclog_or_tty, _containing_obj); + gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region " + "["PTR_FORMAT", "PTR_FORMAT")", + (void*) obj, to->bottom(), to->end()); + print_object(gclog_or_tty, obj); + } + gclog_or_tty->print_cr("----------"); + gclog_or_tty->flush(); + _failures = true; + failed = true; + _n_failures++; + } + + if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) { + HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); + HeapRegion* to = _g1h->heap_region_containing(obj); + if (from != NULL && to != NULL && + from != to && + !to->isHumongous()) { + jbyte cv_obj = *_bs->byte_for_const(_containing_obj); + jbyte cv_field = *_bs->byte_for_const(p); + const jbyte dirty = CardTableModRefBS::dirty_card_val(); + + bool is_bad = !(from->is_young() + || to->rem_set()->contains_reference(p) + || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed + (_containing_obj->is_objArray() ? + cv_field == dirty + : cv_obj == dirty || cv_field == dirty)); + if (is_bad) { + MutexLockerEx x(ParGCRareEvent_lock, + Mutex::_no_safepoint_check_flag); + + if (!_failures) { + gclog_or_tty->print_cr(""); + gclog_or_tty->print_cr("----------"); + } + gclog_or_tty->print_cr("Missing rem set entry:"); + gclog_or_tty->print_cr("Field "PTR_FORMAT" " + "of obj "PTR_FORMAT", " + "in region "HR_FORMAT, + p, (void*) _containing_obj, + HR_FORMAT_PARAMS(from)); + _containing_obj->print_on(gclog_or_tty); + gclog_or_tty->print_cr("points to obj "PTR_FORMAT" " + "in region "HR_FORMAT, + (void*) obj, + HR_FORMAT_PARAMS(to)); + obj->print_on(gclog_or_tty); + gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", + cv_obj, cv_field); + gclog_or_tty->print_cr("----------"); + gclog_or_tty->flush(); + _failures = true; + if (!failed) _n_failures++; + } + } + } + } + } +}; // This really ought to be commoned up into OffsetTableContigSpace somehow. // We would need a mechanism to make that code skip dead objects. @@ -805,12 +951,12 @@ Klass* klass = obj->klass(); if (!klass->is_metaspace_object()) { gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " - "not metadata", klass, obj); + "not metadata", klass, (void *)obj); *failures = true; return; } else if (!klass->is_klass()) { gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " - "not a klass", klass, obj); + "not a klass", klass, (void *)obj); *failures = true; return; } else { @@ -825,7 +971,7 @@ } } } else { - gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj); + gclog_or_tty->print_cr(PTR_FORMAT" no an oop", (void *)obj); *failures = true; return; } @@ -904,6 +1050,13 @@ *failures = true; return; } + + verify_strong_code_roots(vo, failures); +} + +void HeapRegion::verify() const { + bool dummy = false; + verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); } // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/heapRegion.hpp --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,6 +52,7 @@ class HeapRegionRemSetIterator; class HeapRegion; class HeapRegionSetBase; +class nmethod; #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]" #define HR_FORMAT_PARAMS(_hr_) \ @@ -354,13 +355,15 @@ ~((1 << (size_t) LogOfHRGrainBytes) - 1); } + static size_t max_region_size(); + // It sets up the heap region size (GrainBytes / GrainWords), as // well as other related fields that are based on the heap region // size (LogOfHRGrainBytes / LogOfHRGrainWords / // CardsPerRegion). All those fields are considered constant // throughout the JVM's execution, therefore they should only be set // up once during initialization time. - static void setup_heap_region_size(uintx min_heap_size); + static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size); enum ClaimValues { InitialClaimValue = 0, @@ -371,7 +374,8 @@ RebuildRSClaimValue = 5, ParEvacFailureClaimValue = 6, AggregateCountClaimValue = 7, - VerifyCountClaimValue = 8 + VerifyCountClaimValue = 8, + ParMarkRootClaimValue = 9 }; inline HeapWord* par_allocate_no_bot_updates(size_t word_size) { @@ -796,6 +800,25 @@ virtual void reset_after_compaction(); + // Routines for managing a list of code roots (attached to the + // this region's RSet) that point into this heap region. + void add_strong_code_root(nmethod* nm); + void remove_strong_code_root(nmethod* nm); + + // During a collection, migrate the successfully evacuated + // strong code roots that referenced into this region to the + // new regions that they now point into. Unsuccessfully + // evacuated code roots are not migrated. + void migrate_strong_code_roots(); + + // Applies blk->do_code_blob() to each of the entries in + // the strong code roots list for this region + void strong_code_roots_do(CodeBlobClosure* blk) const; + + // Verify that the entries on the strong code root list for this + // region are live and include at least one pointer into this region. + void verify_strong_code_roots(VerifyOption vo, bool* failures) const; + void print() const; void print_on(outputStream* st) const; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -33,6 +33,7 @@ #include "oops/oop.inline.hpp" #include "utilities/bitMap.inline.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/growableArray.hpp" class PerRegionTable: public CHeapObj { friend class OtherRegionsTable; @@ -90,8 +91,8 @@ gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").", from, UseCompressedOops - ? oopDesc::load_decode_heap_oop((narrowOop*)from) - : oopDesc::load_decode_heap_oop((oop*)from)); + ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from) + : (void *)oopDesc::load_decode_heap_oop((oop*)from)); } HeapRegion* loc_hr = hr(); @@ -402,8 +403,8 @@ gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").", from, UseCompressedOops - ? oopDesc::load_decode_heap_oop((narrowOop*)from) - : oopDesc::load_decode_heap_oop((oop*)from)); + ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from) + : (void *)oopDesc::load_decode_heap_oop((oop*)from)); } int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift); @@ -849,7 +850,7 @@ HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, HeapRegion* hr) - : _bosa(bosa), _other_regions(hr) { + : _bosa(bosa), _strong_code_roots_list(NULL), _other_regions(hr) { reset_for_par_iteration(); } @@ -908,6 +909,12 @@ } void HeapRegionRemSet::clear() { + if (_strong_code_roots_list != NULL) { + delete _strong_code_roots_list; + } + _strong_code_roots_list = new (ResourceObj::C_HEAP, mtGC) + GrowableArray(10, 0, NULL, true); + _other_regions.clear(); assert(occupied() == 0, "Should be clear."); reset_for_par_iteration(); @@ -925,6 +932,121 @@ _other_regions.scrub(ctbs, region_bm, card_bm); } + +// Code roots support + +void HeapRegionRemSet::add_strong_code_root(nmethod* nm) { + assert(nm != NULL, "sanity"); + // Search for the code blob from the RHS to avoid + // duplicate entries as much as possible + if (_strong_code_roots_list->find_from_end(nm) < 0) { + // Code blob isn't already in the list + _strong_code_roots_list->push(nm); + } +} + +void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) { + assert(nm != NULL, "sanity"); + int idx = _strong_code_roots_list->find(nm); + if (idx >= 0) { + _strong_code_roots_list->remove_at(idx); + } + // Check that there were no duplicates + guarantee(_strong_code_roots_list->find(nm) < 0, "duplicate entry found"); +} + +class NMethodMigrationOopClosure : public OopClosure { + G1CollectedHeap* _g1h; + HeapRegion* _from; + nmethod* _nm; + + uint _num_self_forwarded; + + template void do_oop_work(T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + if (_from->is_in(obj)) { + // Reference still points into the source region. + // Since roots are immediately evacuated this means that + // we must have self forwarded the object + assert(obj->is_forwarded(), + err_msg("code roots should be immediately evacuated. " + "Ref: "PTR_FORMAT", " + "Obj: "PTR_FORMAT", " + "Region: "HR_FORMAT, + p, (void*) obj, HR_FORMAT_PARAMS(_from))); + assert(obj->forwardee() == obj, + err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj)); + + // The object has been self forwarded. + // Note, if we're during an initial mark pause, there is + // no need to explicitly mark object. It will be marked + // during the regular evacuation failure handling code. + _num_self_forwarded++; + } else { + // The reference points into a promotion or to-space region + HeapRegion* to = _g1h->heap_region_containing(obj); + to->rem_set()->add_strong_code_root(_nm); + } + } + } + +public: + NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm): + _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {} + + void do_oop(narrowOop* p) { do_oop_work(p); } + void do_oop(oop* p) { do_oop_work(p); } + + uint retain() { return _num_self_forwarded > 0; } +}; + +void HeapRegionRemSet::migrate_strong_code_roots() { + assert(hr()->in_collection_set(), "only collection set regions"); + assert(!hr()->isHumongous(), "not humongous regions"); + + ResourceMark rm; + + // List of code blobs to retain for this region + GrowableArray to_be_retained(10); + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + + while (_strong_code_roots_list->is_nonempty()) { + nmethod *nm = _strong_code_roots_list->pop(); + if (nm != NULL) { + NMethodMigrationOopClosure oop_cl(g1h, hr(), nm); + nm->oops_do(&oop_cl); + if (oop_cl.retain()) { + to_be_retained.push(nm); + } + } + } + + // Now push any code roots we need to retain + assert(to_be_retained.is_empty() || hr()->evacuation_failed(), + "Retained nmethod list must be empty or " + "evacuation of this region failed"); + + while (to_be_retained.is_nonempty()) { + nmethod* nm = to_be_retained.pop(); + assert(nm != NULL, "sanity"); + add_strong_code_root(nm); + } +} + +void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const { + for (int i = 0; i < _strong_code_roots_list->length(); i += 1) { + nmethod* nm = _strong_code_roots_list->at(i); + blk->do_code_blob(nm); + } +} + +size_t HeapRegionRemSet::strong_code_roots_mem_size() { + return sizeof(GrowableArray) + + _strong_code_roots_list->max_length() * sizeof(nmethod*); +} + //-------------------- Iteration -------------------- HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) : diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -37,6 +37,7 @@ class HeapRegionRemSetIterator; class PerRegionTable; class SparsePRT; +class nmethod; // Essentially a wrapper around SparsePRTCleanupTask. See // sparsePRT.hpp for more details. @@ -191,6 +192,10 @@ G1BlockOffsetSharedArray* _bosa; G1BlockOffsetSharedArray* bosa() const { return _bosa; } + // A list of code blobs (nmethods) whose code contains pointers into + // the region that owns this RSet. + GrowableArray* _strong_code_roots_list; + OtherRegionsTable _other_regions; enum ParIterState { Unclaimed, Claimed, Complete }; @@ -282,11 +287,13 @@ } // The actual # of bytes this hr_remset takes up. + // Note also includes the strong code root set. size_t mem_size() { return _other_regions.mem_size() // This correction is necessary because the above includes the second // part. - + sizeof(this) - sizeof(OtherRegionsTable); + + (sizeof(this) - sizeof(OtherRegionsTable)) + + strong_code_roots_mem_size(); } // Returns the memory occupancy of all static data structures associated @@ -304,6 +311,37 @@ bool contains_reference(OopOrNarrowOopStar from) const { return _other_regions.contains_reference(from); } + + // Routines for managing the list of code roots that point into + // the heap region that owns this RSet. + void add_strong_code_root(nmethod* nm); + void remove_strong_code_root(nmethod* nm); + + // During a collection, migrate the successfully evacuated strong + // code roots that referenced into the region that owns this RSet + // to the RSets of the new regions that they now point into. + // Unsuccessfully evacuated code roots are not migrated. + void migrate_strong_code_roots(); + + // Applies blk->do_code_blob() to each of the entries in + // the strong code roots list + void strong_code_roots_do(CodeBlobClosure* blk) const; + + // Returns the number of elements in the strong code roots list + int strong_code_roots_list_length() { + return _strong_code_roots_list->length(); + } + + // Returns true if the strong code roots contains the given + // nmethod. + bool strong_code_roots_list_contains(nmethod* nm) { + return _strong_code_roots_list->contains(nm); + } + + // Returns the amount of memory, in bytes, currently + // consumed by the strong code roots. + size_t strong_code_roots_mem_size(); + void print() const; // Called during a stop-world phase to perform any deferred cleanups. diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/heapRegionSeq.cpp --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -71,27 +71,16 @@ // Public -void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end, - uint max_length) { +void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) { assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0, "bottom should be heap region aligned"); assert((uintptr_t) end % HeapRegion::GrainBytes == 0, "end should be heap region aligned"); - _length = 0; - _heap_bottom = bottom; - _heap_end = end; - _region_shift = HeapRegion::LogOfHRGrainBytes; _next_search_index = 0; _allocated_length = 0; - _max_length = max_length; - _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length, mtGC); - memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*)); - _regions_biased = _regions - ((uintx) bottom >> _region_shift); - - assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)], - "bottom should be included in the region with index 0"); + _regions.initialize(bottom, end, HeapRegion::GrainBytes); } MemRegion HeapRegionSeq::expand_by(HeapWord* old_end, @@ -101,15 +90,15 @@ G1CollectedHeap* g1h = G1CollectedHeap::heap(); HeapWord* next_bottom = old_end; - assert(_heap_bottom <= next_bottom, "invariant"); + assert(heap_bottom() <= next_bottom, "invariant"); while (next_bottom < new_end) { - assert(next_bottom < _heap_end, "invariant"); + assert(next_bottom < heap_end(), "invariant"); uint index = length(); - assert(index < _max_length, "otherwise we cannot expand further"); + assert(index < max_length(), "otherwise we cannot expand further"); if (index == 0) { // We have not allocated any regions so far - assert(next_bottom == _heap_bottom, "invariant"); + assert(next_bottom == heap_bottom(), "invariant"); } else { // next_bottom should match the end of the last/previous region assert(next_bottom == at(index - 1)->end(), "invariant"); @@ -122,8 +111,8 @@ // allocation failed, we bail out and return what we have done so far return MemRegion(old_end, next_bottom); } - assert(_regions[index] == NULL, "invariant"); - _regions[index] = new_hr; + assert(_regions.get_by_index(index) == NULL, "invariant"); + _regions.set_by_index(index, new_hr); increment_allocated_length(); } // Have to increment the length first, otherwise we will get an @@ -228,26 +217,26 @@ #ifndef PRODUCT void HeapRegionSeq::verify_optional() { - guarantee(_length <= _allocated_length, + guarantee(length() <= _allocated_length, err_msg("invariant: _length: %u _allocated_length: %u", - _length, _allocated_length)); - guarantee(_allocated_length <= _max_length, + length(), _allocated_length)); + guarantee(_allocated_length <= max_length(), err_msg("invariant: _allocated_length: %u _max_length: %u", - _allocated_length, _max_length)); - guarantee(_next_search_index <= _length, + _allocated_length, max_length())); + guarantee(_next_search_index <= length(), err_msg("invariant: _next_search_index: %u _length: %u", - _next_search_index, _length)); + _next_search_index, length())); - HeapWord* prev_end = _heap_bottom; + HeapWord* prev_end = heap_bottom(); for (uint i = 0; i < _allocated_length; i += 1) { - HeapRegion* hr = _regions[i]; + HeapRegion* hr = _regions.get_by_index(i); guarantee(hr != NULL, err_msg("invariant: i: %u", i)); guarantee(hr->bottom() == prev_end, err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT, i, HR_FORMAT_PARAMS(hr), prev_end)); guarantee(hr->hrs_index() == i, err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index())); - if (i < _length) { + if (i < length()) { // Asserts will fire if i is >= _length HeapWord* addr = hr->bottom(); guarantee(addr_to_region(addr) == hr, "sanity"); @@ -265,8 +254,8 @@ prev_end = hr->end(); } } - for (uint i = _allocated_length; i < _max_length; i += 1) { - guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i)); + for (uint i = _allocated_length; i < max_length(); i += 1) { + guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i)); } } #endif // PRODUCT diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/heapRegionSeq.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -25,10 +25,17 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP +#include "gc_implementation/g1/g1BiasedArray.hpp" + class HeapRegion; class HeapRegionClosure; class FreeRegionList; +class G1HeapRegionTable : public G1BiasedMappedArray { + protected: + virtual HeapRegion* default_value() const { return NULL; } +}; + // This class keeps track of the region metadata (i.e., HeapRegion // instances). They are kept in the _regions array in address // order. A region's index in the array corresponds to its index in @@ -44,35 +51,21 @@ // // We keep track of three lengths: // -// * _length (returned by length()) is the number of currently +// * _committed_length (returned by length()) is the number of currently // committed regions. // * _allocated_length (not exposed outside this class) is the // number of regions for which we have HeapRegions. -// * _max_length (returned by max_length()) is the maximum number of -// regions the heap can have. +// * max_length() returns the maximum number of regions the heap can have. // -// and maintain that: _length <= _allocated_length <= _max_length +// and maintain that: _committed_length <= _allocated_length <= max_length() class HeapRegionSeq: public CHeapObj { friend class VMStructs; - // The array that holds the HeapRegions. - HeapRegion** _regions; - - // Version of _regions biased to address 0 - HeapRegion** _regions_biased; + G1HeapRegionTable _regions; // The number of regions committed in the heap. - uint _length; - - // The address of the first reserved word in the heap. - HeapWord* _heap_bottom; - - // The address of the last reserved word in the heap - 1. - HeapWord* _heap_end; - - // The log of the region byte size. - uint _region_shift; + uint _committed_length; // A hint for which index to start searching from for humongous // allocations. @@ -81,37 +74,33 @@ // The number of regions for which we have allocated HeapRegions for. uint _allocated_length; - // The maximum number of regions in the heap. - uint _max_length; - // Find a contiguous set of empty regions of length num, starting // from the given index. uint find_contiguous_from(uint from, uint num); - // Map a heap address to a biased region index. Assume that the - // address is valid. - inline uintx addr_to_index_biased(HeapWord* addr) const; - void increment_allocated_length() { - assert(_allocated_length < _max_length, "pre-condition"); + assert(_allocated_length < max_length(), "pre-condition"); _allocated_length++; } void increment_length() { - assert(_length < _max_length, "pre-condition"); - _length++; + assert(length() < max_length(), "pre-condition"); + _committed_length++; } void decrement_length() { - assert(_length > 0, "pre-condition"); - _length--; + assert(length() > 0, "pre-condition"); + _committed_length--; } + HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); } + HeapWord* heap_end() const {return _regions.end_address_mapped(); } + public: // Empty contructor, we'll initialize it with the initialize() method. - HeapRegionSeq() { } + HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { } - void initialize(HeapWord* bottom, HeapWord* end, uint max_length); + void initialize(HeapWord* bottom, HeapWord* end); // Return the HeapRegion at the given index. Assume that the index // is valid. @@ -126,10 +115,10 @@ inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const; // Return the number of regions that have been committed in the heap. - uint length() const { return _length; } + uint length() const { return _committed_length; } // Return the maximum number of regions in the heap. - uint max_length() const { return _max_length; } + uint max_length() const { return (uint)_regions.length(); } // Expand the sequence to reflect that the heap has grown from // old_end to new_end. Either create new HeapRegions, or re-use diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -28,28 +28,16 @@ #include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/heapRegionSeq.hpp" -inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const { - assert(_heap_bottom <= addr && addr < _heap_end, - err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT, - addr, _heap_bottom, _heap_end)); - uintx index = (uintx) addr >> _region_shift; - return index; -} - inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const { - assert(_heap_bottom <= addr && addr < _heap_end, - err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT, - addr, _heap_bottom, _heap_end)); - uintx index_biased = addr_to_index_biased(addr); - HeapRegion* hr = _regions_biased[index_biased]; + HeapRegion* hr = _regions.get_by_address(addr); assert(hr != NULL, "invariant"); return hr; } inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const { - if (addr != NULL && addr < _heap_end) { - assert(addr >= _heap_bottom, - err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom)); + if (addr != NULL && addr < heap_end()) { + assert(addr >= heap_bottom(), + err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, heap_bottom())); return addr_to_region_unsafe(addr); } return NULL; @@ -57,7 +45,7 @@ inline HeapRegion* HeapRegionSeq::at(uint index) const { assert(index < length(), "pre-condition"); - HeapRegion* hr = _regions[index]; + HeapRegion* hr = _regions.get_by_index(index); assert(hr != NULL, "sanity"); assert(hr->hrs_index() == index, "sanity"); return hr; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/ptrQueue.hpp --- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -38,6 +38,7 @@ class PtrQueueSet; class PtrQueue VALUE_OBJ_CLASS_SPEC { + friend class VMStructs; protected: // The ptr queue set to which this queue belongs. diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/vmStructs_g1.hpp --- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -31,10 +31,17 @@ #define VM_STRUCTS_G1(nonstatic_field, static_field) \ \ - static_field(HeapRegion, GrainBytes, size_t) \ + static_field(HeapRegion, GrainBytes, size_t) \ + static_field(HeapRegion, LogOfHRGrainBytes, int) \ \ - nonstatic_field(HeapRegionSeq, _regions, HeapRegion**) \ - nonstatic_field(HeapRegionSeq, _length, uint) \ + nonstatic_field(G1HeapRegionTable, _base, address) \ + nonstatic_field(G1HeapRegionTable, _length, size_t) \ + nonstatic_field(G1HeapRegionTable, _biased_base, address) \ + nonstatic_field(G1HeapRegionTable, _bias, size_t) \ + nonstatic_field(G1HeapRegionTable, _shift_by, uint) \ + \ + nonstatic_field(HeapRegionSeq, _regions, G1HeapRegionTable) \ + nonstatic_field(HeapRegionSeq, _committed_length, uint) \ \ nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \ nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \ @@ -57,6 +64,8 @@ #define VM_TYPES_G1(declare_type, declare_toplevel_type) \ \ + declare_toplevel_type(G1HeapRegionTable) \ + \ declare_type(G1CollectedHeap, SharedHeap) \ \ declare_type(HeapRegion, ContiguousSpace) \ diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/g1/vm_operations_g1.cpp --- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -70,9 +70,6 @@ guarantee(target_pause_time_ms > 0.0, err_msg("target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms)); - guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause, - "we can only request an allocation if the GC cause is for " - "an incremental GC pause"); _gc_cause = gc_cause; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/parNew/parNewGeneration.cpp --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1103,7 +1103,7 @@ } } -static const oop ClaimedForwardPtr = oop(0x4); +static const oop ClaimedForwardPtr = cast_to_oop(0x4); // Because of concurrency, there are times where an object for which // "is_forwarded()" is true contains an "interim" forwarding pointer @@ -1226,7 +1226,7 @@ if (TraceScavenge) { gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", is_in_reserved(new_obj) ? "copying" : "tenuring", - new_obj->klass()->internal_name(), old, new_obj, new_obj->size()); + new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size()); } #endif @@ -1347,7 +1347,7 @@ if (TraceScavenge) { gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", is_in_reserved(new_obj) ? "copying" : "tenuring", - new_obj->klass()->internal_name(), old, new_obj, new_obj->size()); + new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size()); } #endif @@ -1436,7 +1436,7 @@ // (although some performance comparisons would be useful since // single global lists have their own performance disadvantages // as we were made painfully aware not long ago, see 6786503). -#define BUSY (oop(0x1aff1aff)) +#define BUSY (cast_to_oop(0x1aff1aff)) void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { assert(is_in_reserved(from_space_obj), "Should be from this generation"); if (ParGCUseLocalOverflow) { @@ -1512,7 +1512,7 @@ if (_overflow_list == NULL) return false; // Otherwise, there was something there; try claiming the list. - oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); + oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); // Trim off a prefix of at most objsFromOverflow items Thread* tid = Thread::current(); size_t spin_count = (size_t)ParallelGCThreads; @@ -1526,7 +1526,7 @@ return false; } else if (_overflow_list != BUSY) { // try and grab the prefix - prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); + prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); } } if (prefix == NULL || prefix == BUSY) { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp --- a/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -84,7 +84,7 @@ Space* sp = gch->space_containing(p); oop obj = oop(sp->block_start(p)); assert((HeapWord*)obj < (HeapWord*)p, "Error"); - tty->print_cr("Object: " PTR_FORMAT, obj); + tty->print_cr("Object: " PTR_FORMAT, (void *)obj); tty->print_cr("-------"); obj->print(); tty->print_cr("-----"); @@ -110,7 +110,7 @@ if (TraceScavenge) { gclog_or_tty->print_cr("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", "forwarded ", - new_obj->klass()->internal_name(), p, obj, new_obj, new_obj->size()); + new_obj->klass()->internal_name(), p, (void *)obj, (void *)new_obj, new_obj->size()); } #endif diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,10 +40,8 @@ void initialize_flags() { // Do basic sizing work - this->TwoGenerationCollectorPolicy::initialize_flags(); + TwoGenerationCollectorPolicy::initialize_flags(); - // If the user hasn't explicitly set the number of worker - // threads, set the count. assert(UseSerialGC || !FLAG_IS_DEFAULT(ParallelGCThreads) || (ParallelGCThreads > 0), @@ -68,9 +66,6 @@ size_t min_old_gen_size() { return _min_gen1_size; } size_t old_gen_size() { return _initial_gen1_size; } size_t max_old_gen_size() { return _max_gen1_size; } - - size_t metaspace_size() { return MetaspaceSize; } - size_t max_metaspace_size() { return MaxMetaspaceSize; } }; #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GENERATIONSIZER_HPP diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -216,6 +216,7 @@ young_gen()->update_counters(); old_gen()->update_counters(); MetaspaceCounters::update_performance_counters(); + CompressedClassSpaceCounters::update_performance_counters(); } size_t ParallelScavengeHeap::capacity() const { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -86,6 +86,11 @@ set_alignment(_old_gen_alignment, intra_heap_alignment()); } + // Return the (conservative) maximum heap alignment + static size_t conservative_max_heap_alignment() { + return intra_heap_alignment(); + } + // For use by VM operations enum CollectionType { Scavenge, @@ -122,7 +127,7 @@ // The alignment used for eden and survivors within the young gen // and for boundary between young gen and old gen. - size_t intra_heap_alignment() const { return 64 * K * HeapWordSize; } + static size_t intra_heap_alignment() { return 64 * K * HeapWordSize; } size_t capacity() const; size_t used() const; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -23,7 +23,6 @@ */ #include "precompiled.hpp" -#include "gc_implementation/parallelScavenge/generationSizer.hpp" #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -53,7 +53,6 @@ // Forward decls class elapsedTimer; -class GenerationSizer; class PSAdaptiveSizePolicy : public AdaptiveSizePolicy { friend class PSGCAdaptivePolicyCounters; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -26,7 +26,6 @@ #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" #include "code/codeCache.hpp" -#include "gc_implementation/parallelScavenge/generationSizer.hpp" #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -27,7 +27,6 @@ #include "classfile/systemDictionary.hpp" #include "code/codeCache.hpp" #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" -#include "gc_implementation/parallelScavenge/generationSizer.hpp" #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp" #include "gc_implementation/parallelScavenge/pcTasks.hpp" #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -333,7 +333,7 @@ gclog_or_tty->print_cr("{%s %s 0x%x (%d)}", "promotion-failure", obj->klass()->internal_name(), - obj, obj->size()); + (void *)obj, obj->size()); } #endif diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -126,7 +126,7 @@ oop* mask_chunked_array_oop(oop obj) { assert(!is_oop_masked((oop*) obj), "invariant"); - oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK); + oop* ret = (oop*) (cast_from_oop(obj) | PS_CHUNKED_ARRAY_OOP_MASK); assert(is_oop_masked(ret), "invariant"); return ret; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -225,7 +225,7 @@ if (TraceScavenge) { gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring", - new_obj->klass()->internal_name(), o, new_obj, new_obj->size()); + new_obj->klass()->internal_name(), (void *)o, (void *)new_obj, new_obj->size()); } #endif diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -27,7 +27,6 @@ #include "code/codeCache.hpp" #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" -#include "gc_implementation/parallelScavenge/generationSizer.hpp" #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -81,7 +81,7 @@ if (TraceScavenge && o->is_forwarded()) { gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", "forwarding", - new_obj->klass()->internal_name(), o, new_obj, new_obj->size()); + new_obj->klass()->internal_name(), (void *)o, (void *)new_obj, new_obj->size()); } #endif diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/shared/allocationStats.hpp --- a/src/share/vm/gc_implementation/shared/allocationStats.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/shared/allocationStats.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,11 +26,9 @@ #define SHARE_VM_GC_IMPLEMENTATION_SHARED_ALLOCATIONSTATS_HPP #include "utilities/macros.hpp" -#if INCLUDE_ALL_GCS -#include "gc_implementation/shared/gcUtil.hpp" #include "memory/allocation.hpp" #include "utilities/globalDefinitions.hpp" -#endif // INCLUDE_ALL_GCS +#include "gc_implementation/shared/gcUtil.hpp" class AllocationStats VALUE_OBJ_CLASS_SPEC { // A duration threshold (in ms) used to filter diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/shared/gcTraceSend.cpp --- a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -28,6 +28,7 @@ #include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/gcWhen.hpp" #include "gc_implementation/shared/copyFailedInfo.hpp" +#include "runtime/os.hpp" #include "trace/tracing.hpp" #include "trace/traceBackend.hpp" #if INCLUDE_ALL_GCS @@ -54,11 +55,12 @@ } void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const { - EventGCReferenceStatistics e; + EventGCReferenceStatistics e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.id()); e.set_type((u1)type); e.set_count(count); + e.set_endtime(os::elapsed_counter()); e.commit(); } } @@ -105,20 +107,22 @@ } void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const { - EventPromotionFailed e; + EventPromotionFailed e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.id()); e.set_data(to_trace_struct(pf_info)); e.set_thread(pf_info.thread()->thread_id()); + e.set_endtime(os::elapsed_counter()); e.commit(); } } // Common to CMS and G1 void OldGCTracer::send_concurrent_mode_failure_event() { - EventConcurrentModeFailure e; + EventConcurrentModeFailure e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.id()); + e.set_endtime(os::elapsed_counter()); e.commit(); } } @@ -136,7 +140,7 @@ } void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) { - EventEvacuationInfo e; + EventEvacuationInfo e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.id()); e.set_cSetRegions(info->collectionset_regions()); @@ -147,15 +151,17 @@ e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied()); e.set_bytesCopied(info->bytes_copied()); e.set_regionsFreed(info->regions_freed()); + e.set_endtime(os::elapsed_counter()); e.commit(); } } void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const { - EventEvacuationFailed e; + EventEvacuationFailed e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.id()); e.set_data(to_trace_struct(ef_info)); + e.set_endtime(os::elapsed_counter()); e.commit(); } } @@ -189,12 +195,13 @@ void visit(const GCHeapSummary* heap_summary) const { const VirtualSpaceSummary& heap_space = heap_summary->heap(); - EventGCHeapSummary e; + EventGCHeapSummary e(UNTIMED); if (e.should_commit()) { e.set_gcId(_id); e.set_when((u1)_when); e.set_heapSpace(to_trace_struct(heap_space)); e.set_heapUsed(heap_summary->used()); + e.set_endtime(os::elapsed_counter()); e.commit(); } } @@ -209,7 +216,7 @@ const SpaceSummary& from_space = ps_heap_summary->from(); const SpaceSummary& to_space = ps_heap_summary->to(); - EventPSHeapSummary e; + EventPSHeapSummary e(UNTIMED); if (e.should_commit()) { e.set_gcId(_id); e.set_when((u1)_when); @@ -220,6 +227,7 @@ e.set_edenSpace(to_trace_struct(ps_heap_summary->eden())); e.set_fromSpace(to_trace_struct(ps_heap_summary->from())); e.set_toSpace(to_trace_struct(ps_heap_summary->to())); + e.set_endtime(os::elapsed_counter()); e.commit(); } } @@ -241,13 +249,14 @@ } void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const { - EventMetaspaceSummary e; + EventMetaspaceSummary e(UNTIMED); if (e.should_commit()) { e.set_gcId(_shared_gc_info.id()); e.set_when((u1) when); e.set_metaspace(to_trace_struct(meta_space_summary.meta_space())); e.set_dataSpace(to_trace_struct(meta_space_summary.data_space())); e.set_classSpace(to_trace_struct(meta_space_summary.class_space())); + e.set_endtime(os::elapsed_counter()); e.commit(); } } @@ -282,8 +291,6 @@ default: /* Ignore sending this phase */ break; } } - -#undef send_phase }; void GCTracer::send_phase_events(TimePartitions* time_partitions) const { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/shared/gcUtil.hpp --- a/src/share/vm/gc_implementation/shared/gcUtil.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/shared/gcUtil.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -144,9 +144,9 @@ _padded_avg(0.0), _deviation(0.0), _padding(padding) {} // Placement support - void* operator new(size_t ignored, void* p) { return p; } + void* operator new(size_t ignored, void* p) throw() { return p; } // Allocator - void* operator new(size_t size) { return CHeapObj::operator new(size); } + void* operator new(size_t size) throw() { return CHeapObj::operator new(size); } // Accessor float padded_average() const { return _padded_avg; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_implementation/shared/hSpaceCounters.hpp --- a/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,11 +26,9 @@ #define SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP #include "utilities/macros.hpp" -#if INCLUDE_ALL_GCS #include "gc_implementation/shared/generationCounters.hpp" #include "memory/generation.hpp" #include "runtime/perfData.hpp" -#endif // INCLUDE_ALL_GCS // A HSpaceCounter is a holder class for performance counters // that track a collections (logical spaces) in a heap; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_interface/collectedHeap.cpp --- a/src/share/vm/gc_interface/collectedHeap.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_interface/collectedHeap.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -87,15 +87,15 @@ const MetaspaceSizes meta_space( MetaspaceAux::allocated_capacity_bytes(), MetaspaceAux::allocated_used_bytes(), - MetaspaceAux::reserved_in_bytes()); + MetaspaceAux::reserved_bytes()); const MetaspaceSizes data_space( MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType), MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType), - MetaspaceAux::reserved_in_bytes(Metaspace::NonClassType)); + MetaspaceAux::reserved_bytes(Metaspace::NonClassType)); const MetaspaceSizes class_space( MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType), MetaspaceAux::allocated_used_bytes(Metaspace::ClassType), - MetaspaceAux::reserved_in_bytes(Metaspace::ClassType)); + MetaspaceAux::reserved_bytes(Metaspace::ClassType)); return MetaspaceSummary(meta_space, data_space, class_space); } @@ -118,6 +118,14 @@ } } +void CollectedHeap::register_nmethod(nmethod* nm) { + assert_locked_or_safepoint(CodeCache_lock); +} + +void CollectedHeap::unregister_nmethod(nmethod* nm) { + assert_locked_or_safepoint(CodeCache_lock); +} + void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) { const GCHeapSummary& heap_summary = create_heap_summary(); const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/gc_interface/collectedHeap.hpp --- a/src/share/vm/gc_interface/collectedHeap.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -49,6 +49,7 @@ class Thread; class ThreadClosure; class VirtualSpaceSummary; +class nmethod; class GCMessage : public FormatBuffer<1024> { public: @@ -605,6 +606,11 @@ void print_heap_before_gc(); void print_heap_after_gc(); + // Registering and unregistering an nmethod (compiled code) with the heap. + // Override with specific mechanism for each specialized heap type. + virtual void register_nmethod(nmethod* nm); + virtual void unregister_nmethod(nmethod* nm); + void trace_heap_before_gc(GCTracer* gc_tracer); void trace_heap_after_gc(GCTracer* gc_tracer); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/interpreter/bytecodeTracer.cpp --- a/src/share/vm/interpreter/bytecodeTracer.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/interpreter/bytecodeTracer.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -215,7 +215,7 @@ st->print_cr(" %s", buf); } } else { - st->print_cr(" " PTR_FORMAT, (intptr_t) value); + st->print_cr(" " PTR_FORMAT, (void *)value); } } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/interpreter/interpreterRuntime.cpp --- a/src/share/vm/interpreter/interpreterRuntime.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -511,15 +511,15 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode)) // resolve field - FieldAccessInfo info; + fieldDescriptor info; constantPoolHandle pool(thread, method(thread)->constants()); bool is_put = (bytecode == Bytecodes::_putfield || bytecode == Bytecodes::_putstatic); bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic); { JvmtiHideSingleStepping jhss(thread); - LinkResolver::resolve_field(info, pool, get_index_u2_cpcache(thread, bytecode), - bytecode, false, CHECK); + LinkResolver::resolve_field_access(info, pool, get_index_u2_cpcache(thread, bytecode), + bytecode, CHECK); } // end JvmtiHideSingleStepping // check if link resolution caused cpCache to be updated @@ -539,7 +539,7 @@ // class is intitialized. This is required so that access to the static // field will call the initialization function every time until the class // is completely initialized ala. in 2.17.5 in JVM Specification. - InstanceKlass *klass = InstanceKlass::cast(info.klass()()); + InstanceKlass* klass = InstanceKlass::cast(info.field_holder()); bool uninitialized_static = ((bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic) && !klass->is_initialized()); Bytecodes::Code get_code = (Bytecodes::Code)0; @@ -554,9 +554,9 @@ cache_entry(thread)->set_field( get_code, put_code, - info.klass(), - info.field_index(), - info.field_offset(), + info.field_holder(), + info.index(), + info.offset(), state, info.access_flags().is_final(), info.access_flags().is_volatile(), @@ -701,29 +701,55 @@ if (already_resolved(thread)) return; if (bytecode == Bytecodes::_invokeinterface) { - if (TraceItables && Verbose) { ResourceMark rm(thread); tty->print_cr("Resolving: klass: %s to method: %s", info.resolved_klass()->name()->as_C_string(), info.resolved_method()->name()->as_C_string()); } + } +#ifdef ASSERT + if (bytecode == Bytecodes::_invokeinterface) { if (info.resolved_method()->method_holder() == SystemDictionary::Object_klass()) { // NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec - // (see also cpCacheOop.cpp for details) + // (see also CallInfo::set_interface for details) + assert(info.call_kind() == CallInfo::vtable_call || + info.call_kind() == CallInfo::direct_call, ""); methodHandle rm = info.resolved_method(); assert(rm->is_final() || info.has_vtable_index(), "should have been set already"); - cache_entry(thread)->set_method(bytecode, rm, info.vtable_index()); + } else if (!info.resolved_method()->has_itable_index()) { + // Resolved something like CharSequence.toString. Use vtable not itable. + assert(info.call_kind() != CallInfo::itable_call, ""); } else { // Setup itable entry - int index = klassItable::compute_itable_index(info.resolved_method()()); - cache_entry(thread)->set_interface_call(info.resolved_method(), index); + assert(info.call_kind() == CallInfo::itable_call, ""); + int index = info.resolved_method()->itable_index(); + assert(info.itable_index() == index, ""); } } else { - cache_entry(thread)->set_method( + assert(info.call_kind() == CallInfo::direct_call || + info.call_kind() == CallInfo::vtable_call, ""); + } +#endif + switch (info.call_kind()) { + case CallInfo::direct_call: + cache_entry(thread)->set_direct_call( + bytecode, + info.resolved_method()); + break; + case CallInfo::vtable_call: + cache_entry(thread)->set_vtable_call( bytecode, info.resolved_method(), info.vtable_index()); + break; + case CallInfo::itable_call: + cache_entry(thread)->set_itable_call( + bytecode, + info.resolved_method(), + info.itable_index()); + break; + default: ShouldNotReachHere(); } } IRT_END diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/interpreter/linkResolver.cpp --- a/src/share/vm/interpreter/linkResolver.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/interpreter/linkResolver.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,4 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,19 +45,6 @@ #include "runtime/thread.inline.hpp" #include "runtime/vmThread.hpp" -//------------------------------------------------------------------------------------------------------------------------ -// Implementation of FieldAccessInfo - -void FieldAccessInfo::set(KlassHandle klass, Symbol* name, int field_index, int field_offset, -BasicType field_type, AccessFlags access_flags) { - _klass = klass; - _name = name; - _field_index = field_index; - _field_offset = field_offset; - _field_type = field_type; - _access_flags = access_flags; -} - //------------------------------------------------------------------------------------------------------------------------ // Implementation of CallInfo @@ -66,26 +52,25 @@ void CallInfo::set_static(KlassHandle resolved_klass, methodHandle resolved_method, TRAPS) { int vtable_index = Method::nonvirtual_vtable_index; - set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK); + set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, CallInfo::direct_call, vtable_index, CHECK); } -void CallInfo::set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, TRAPS) { +void CallInfo::set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int itable_index, TRAPS) { // This is only called for interface methods. If the resolved_method // comes from java/lang/Object, it can be the subject of a virtual call, so // we should pick the vtable index from the resolved method. - // Other than that case, there is no valid vtable index to specify. - int vtable_index = Method::invalid_vtable_index; - if (resolved_method->method_holder() == SystemDictionary::Object_klass()) { - assert(resolved_method->vtable_index() == selected_method->vtable_index(), "sanity check"); - vtable_index = resolved_method->vtable_index(); - } - set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK); + // In that case, the caller must call set_virtual instead of set_interface. + assert(resolved_method->method_holder()->is_interface(), ""); + assert(itable_index == resolved_method()->itable_index(), ""); + set_common(resolved_klass, selected_klass, resolved_method, selected_method, CallInfo::itable_call, itable_index, CHECK); } void CallInfo::set_virtual(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) { assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index, "valid index"); - set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK); + assert(vtable_index < 0 || !resolved_method->has_vtable_index() || vtable_index == resolved_method->vtable_index(), ""); + CallKind kind = (vtable_index >= 0 && !resolved_method->can_be_statically_bound() ? CallInfo::vtable_call : CallInfo::direct_call); + set_common(resolved_klass, selected_klass, resolved_method, selected_method, kind, vtable_index, CHECK); assert(!resolved_method->is_compiled_lambda_form(), "these must be handled via an invokehandle call"); } @@ -98,20 +83,29 @@ resolved_method->is_compiled_lambda_form(), "linkMethod must return one of these"); int vtable_index = Method::nonvirtual_vtable_index; - assert(resolved_method->vtable_index() == vtable_index, ""); - set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK); + assert(!resolved_method->has_vtable_index(), ""); + set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, CallInfo::direct_call, vtable_index, CHECK); _resolved_appendix = resolved_appendix; _resolved_method_type = resolved_method_type; } -void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) { +void CallInfo::set_common(KlassHandle resolved_klass, + KlassHandle selected_klass, + methodHandle resolved_method, + methodHandle selected_method, + CallKind kind, + int index, + TRAPS) { assert(resolved_method->signature() == selected_method->signature(), "signatures must correspond"); _resolved_klass = resolved_klass; _selected_klass = selected_klass; _resolved_method = resolved_method; _selected_method = selected_method; - _vtable_index = vtable_index; + _call_kind = kind; + _call_index = index; _resolved_appendix = Handle(); + DEBUG_ONLY(verify()); // verify before making side effects + if (CompilationPolicy::must_be_compiled(selected_method)) { // This path is unusual, mostly used by the '-Xcomp' stress test mode. @@ -138,6 +132,65 @@ } } +// utility query for unreflecting a method +CallInfo::CallInfo(Method* resolved_method, Klass* resolved_klass) { + Klass* resolved_method_holder = resolved_method->method_holder(); + if (resolved_klass == NULL) { // 2nd argument defaults to holder of 1st + resolved_klass = resolved_method_holder; + } + _resolved_klass = resolved_klass; + _selected_klass = resolved_klass; + _resolved_method = resolved_method; + _selected_method = resolved_method; + // classify: + CallKind kind = CallInfo::unknown_kind; + int index = resolved_method->vtable_index(); + if (resolved_method->can_be_statically_bound()) { + kind = CallInfo::direct_call; + } else if (!resolved_method_holder->is_interface()) { + // Could be an Object method inherited into an interface, but still a vtable call. + kind = CallInfo::vtable_call; + } else if (!resolved_klass->is_interface()) { + // A miranda method. Compute the vtable index. + ResourceMark rm; + klassVtable* vt = InstanceKlass::cast(resolved_klass)->vtable(); + index = vt->index_of_miranda(resolved_method->name(), + resolved_method->signature()); + kind = CallInfo::vtable_call; + } else { + // A regular interface call. + kind = CallInfo::itable_call; + index = resolved_method->itable_index(); + } + assert(index == Method::nonvirtual_vtable_index || index >= 0, err_msg("bad index %d", index)); + _call_kind = kind; + _call_index = index; + _resolved_appendix = Handle(); + DEBUG_ONLY(verify()); +} + +#ifdef ASSERT +void CallInfo::verify() { + switch (call_kind()) { // the meaning and allowed value of index depends on kind + case CallInfo::direct_call: + if (_call_index == Method::nonvirtual_vtable_index) break; + // else fall through to check vtable index: + case CallInfo::vtable_call: + assert(resolved_klass()->verify_vtable_index(_call_index), ""); + break; + case CallInfo::itable_call: + assert(resolved_method()->method_holder()->verify_itable_index(_call_index), ""); + break; + case CallInfo::unknown_kind: + assert(call_kind() != CallInfo::unknown_kind, "CallInfo must be set"); + break; + default: + fatal(err_msg_res("Unexpected call kind %d", call_kind())); + } +} +#endif //ASSERT + + //------------------------------------------------------------------------------------------------------------------------ // Klass resolution @@ -163,13 +216,6 @@ result = KlassHandle(THREAD, result_oop); } -void LinkResolver::resolve_klass_no_update(KlassHandle& result, constantPoolHandle pool, int index, TRAPS) { - Klass* result_oop = - ConstantPool::klass_ref_at_if_loaded_check(pool, index, CHECK); - result = KlassHandle(THREAD, result_oop); -} - - //------------------------------------------------------------------------------------------------------------------------ // Method resolution // @@ -360,7 +406,12 @@ void LinkResolver::resolve_method_statically(methodHandle& resolved_method, KlassHandle& resolved_klass, Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS) { - + // This method is used only + // (1) in C2 from InlineTree::ok_to_inline (via ciMethod::check_call), + // and + // (2) in Bytecode_invoke::static_target + // It appears to fail when applied to an invokeinterface call site. + // FIXME: Remove this method and ciMethod::check_call; refactor to use the other LinkResolver entry points. // resolve klass if (code == Bytecodes::_invokedynamic) { resolved_klass = SystemDictionary::MethodHandle_klass(); @@ -521,6 +572,16 @@ } if (check_access) { + // JDK8 adds non-public interface methods, and accessability check requirement + assert(current_klass.not_null() , "current_klass should not be null"); + + // check if method can be accessed by the referring class + check_method_accessability(current_klass, + resolved_klass, + KlassHandle(THREAD, resolved_method->method_holder()), + resolved_method, + CHECK); + HandleMark hm(THREAD); Handle loader (THREAD, InstanceKlass::cast(current_klass())->class_loader()); Handle class_loader (THREAD, resolved_method->method_holder()->class_loader()); @@ -552,6 +613,20 @@ } } } + + if (TraceItables && Verbose) { + ResourceMark rm(THREAD); + tty->print("invokeinterface resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ", + (current_klass.is_null() ? "" : current_klass->internal_name()), + (resolved_klass.is_null() ? "" : resolved_klass->internal_name()), + Method::name_and_sig_as_C_string(resolved_klass(), + resolved_method->name(), + resolved_method->signature()), + resolved_method->method_holder()->internal_name() + ); + resolved_method->access_flags().print_on(tty); + tty->cr(); + } } //------------------------------------------------------------------------------------------------------------------------ @@ -580,45 +655,49 @@ } } -void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS) { - resolve_field(result, pool, index, byte, check_only, true, CHECK); +void LinkResolver::resolve_field_access(fieldDescriptor& result, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS) { + // Load these early in case the resolve of the containing klass fails + Symbol* field = pool->name_ref_at(index); + Symbol* sig = pool->signature_ref_at(index); + + // resolve specified klass + KlassHandle resolved_klass; + resolve_klass(resolved_klass, pool, index, CHECK); + + KlassHandle current_klass(THREAD, pool->pool_holder()); + resolve_field(result, resolved_klass, field, sig, current_klass, byte, true, true, CHECK); } -void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, bool update_pool, TRAPS) { +void LinkResolver::resolve_field(fieldDescriptor& fd, KlassHandle resolved_klass, Symbol* field, Symbol* sig, + KlassHandle current_klass, Bytecodes::Code byte, bool check_access, bool initialize_class, + TRAPS) { assert(byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic || - byte == Bytecodes::_getfield || byte == Bytecodes::_putfield, "bad bytecode"); + byte == Bytecodes::_getfield || byte == Bytecodes::_putfield || + (byte == Bytecodes::_nop && !check_access), "bad field access bytecode"); bool is_static = (byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic); bool is_put = (byte == Bytecodes::_putfield || byte == Bytecodes::_putstatic); - // resolve specified klass - KlassHandle resolved_klass; - if (update_pool) { - resolve_klass(resolved_klass, pool, index, CHECK); - } else { - resolve_klass_no_update(resolved_klass, pool, index, CHECK); - } - // Load these early in case the resolve of the containing klass fails - Symbol* field = pool->name_ref_at(index); - Symbol* sig = pool->signature_ref_at(index); // Check if there's a resolved klass containing the field - if( resolved_klass.is_null() ) { + if (resolved_klass.is_null()) { ResourceMark rm(THREAD); THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string()); } // Resolve instance field - fieldDescriptor fd; // find_field initializes fd if found KlassHandle sel_klass(THREAD, InstanceKlass::cast(resolved_klass())->find_field(field, sig, &fd)); // check if field exists; i.e., if a klass containing the field def has been selected - if (sel_klass.is_null()){ + if (sel_klass.is_null()) { ResourceMark rm(THREAD); THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string()); } + if (!check_access) + // Access checking may be turned off when calling from within the VM. + return; + // check access - KlassHandle ref_klass(THREAD, pool->pool_holder()); - check_field_accessability(ref_klass, resolved_klass, sel_klass, fd, CHECK); + check_field_accessability(current_klass, resolved_klass, sel_klass, fd, CHECK); // check for errors if (is_static != fd.is_static()) { @@ -629,7 +708,7 @@ } // Final fields can only be accessed from its own class. - if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()) { + if (is_put && fd.access_flags().is_final() && sel_klass() != current_klass()) { THROW(vmSymbols::java_lang_IllegalAccessError()); } @@ -639,19 +718,18 @@ // // note 2: we don't want to force initialization if we are just checking // if the field access is legal; e.g., during compilation - if (is_static && !check_only) { + if (is_static && initialize_class) { sel_klass->initialize(CHECK); } - { + if (sel_klass() != current_klass()) { HandleMark hm(THREAD); - Handle ref_loader (THREAD, InstanceKlass::cast(ref_klass())->class_loader()); + Handle ref_loader (THREAD, InstanceKlass::cast(current_klass())->class_loader()); Handle sel_loader (THREAD, InstanceKlass::cast(sel_klass())->class_loader()); - Symbol* signature_ref = pool->signature_ref_at(index); { ResourceMark rm(THREAD); Symbol* failed_type_symbol = - SystemDictionary::check_signature_loaders(signature_ref, + SystemDictionary::check_signature_loaders(sig, ref_loader, sel_loader, false, CHECK); @@ -677,9 +755,6 @@ // return information. note that the klass is set to the actual klass containing the // field, otherwise access of static fields in superclasses will not work. - KlassHandle holder (THREAD, fd.field_holder()); - Symbol* name = fd.name(); - result.set(holder, name, fd.index(), fd.offset(), fd.field_type(), fd.access_flags()); } @@ -743,26 +818,12 @@ Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS) { - if (resolved_klass->is_interface() && current_klass() != NULL) { - // If the target class is a direct interface, treat this as a "super" - // default call. - // - // If the current method is an overpass that happens to call a direct - // super-interface's method, then we'll end up rerunning the default method - // analysis even though we don't need to, but that's ok since it will end - // up with the same answer. - InstanceKlass* ik = InstanceKlass::cast(current_klass()); - Array* interfaces = ik->local_interfaces(); - int num_interfaces = interfaces->length(); - for (int index = 0; index < num_interfaces; index++) { - if (interfaces->at(index) == resolved_klass()) { - Method* method = DefaultMethods::find_super_default(current_klass(), - resolved_klass(), method_name, method_signature, CHECK); - resolved_method = methodHandle(THREAD, method); - return; - } - } - } + // Invokespecial is called for multiple special reasons: + // + // local private method invocation, for classes and interfaces + // superclass.method, which can also resolve to a default method + // and the selected method is recalculated relative to the direct superclass + // superinterface.method, which explicitly does not check shadowing resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK); @@ -792,6 +853,26 @@ resolved_method->signature())); THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); } + if (TraceItables && Verbose) { + ResourceMark rm(THREAD); + tty->print("invokespecial resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ", + (current_klass.is_null() ? "" : current_klass->internal_name()), + (resolved_klass.is_null() ? "" : resolved_klass->internal_name()), + Method::name_and_sig_as_C_string(resolved_klass(), + resolved_method->name(), + resolved_method->signature()), + resolved_method->method_holder()->internal_name() + ); + resolved_method->access_flags().print_on(tty); + if (resolved_method->method_holder()->is_interface() && + !resolved_method->is_abstract()) { + tty->print("default"); + } + if (resolved_method->is_overpass()) { + tty->print("overpass"); + } + tty->cr(); + } } // throws runtime exceptions @@ -799,23 +880,24 @@ KlassHandle current_klass, bool check_access, TRAPS) { // resolved method is selected method unless we have an old-style lookup + // for a superclass method + // Invokespecial for a superinterface, resolved method is selected method, + // no checks for shadowing methodHandle sel_method(THREAD, resolved_method()); // check if this is an old-style super call and do a new lookup if so { KlassHandle method_klass = KlassHandle(THREAD, resolved_method->method_holder()); - const bool direct_calling_default_method = - resolved_klass() != NULL && resolved_method() != NULL && - resolved_klass->is_interface() && !resolved_method->is_abstract(); - - if (!direct_calling_default_method && - check_access && + if (check_access && // a) check if ACC_SUPER flag is set for the current class (current_klass->is_super() || !AllowNonVirtualCalls) && - // b) check if the method class is a superclass of the current class (superclass relation is not reflexive!) - current_klass->is_subtype_of(method_klass()) && - current_klass() != method_klass() && + // b) check if the class of the resolved_klass is a superclass + // (not supertype in order to exclude interface classes) of the current class. + // This check is not performed for super.invoke for interface methods + // in super interfaces. + current_klass->is_subclass_of(resolved_klass()) && + current_klass() != resolved_klass() && // c) check if the method is not resolved_method->name() != vmSymbols::object_initializer_name()) { // Lookup super method @@ -853,6 +935,23 @@ sel_method->signature())); } + if (TraceItables && Verbose) { + ResourceMark rm(THREAD); + tty->print("invokespecial selected method: resolved-class:%s, method:%s, method_holder:%s, access_flags: ", + (resolved_klass.is_null() ? "" : resolved_klass->internal_name()), + Method::name_and_sig_as_C_string(resolved_klass(), + sel_method->name(), + sel_method->signature()), + sel_method->method_holder()->internal_name() + ); + sel_method->access_flags().print_on(tty); + if (sel_method->method_holder()->is_interface() && + !sel_method->is_abstract()) { + tty->print("default"); + } + tty->cr(); + } + // setup result result.set_static(resolved_klass, sel_method, CHECK); } @@ -875,6 +974,18 @@ assert(resolved_method->name() != vmSymbols::object_initializer_name(), "should have been checked in verifier"); assert(resolved_method->name() != vmSymbols::class_initializer_name (), "should have been checked in verifier"); + // check if private interface method + if (resolved_klass->is_interface() && resolved_method->is_private()) { + ResourceMark rm(THREAD); + char buf[200]; + jio_snprintf(buf, sizeof(buf), "private interface method requires invokespecial, not invokevirtual: method %s, caller-class:%s", + Method::name_and_sig_as_C_string(resolved_klass(), + resolved_method->name(), + resolved_method->signature()), + (current_klass.is_null() ? "" : current_klass->internal_name())); + THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); + } + // check if not static if (resolved_method->is_static()) { ResourceMark rm(THREAD); @@ -884,6 +995,27 @@ resolved_method->signature())); THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); } + + if (PrintVtables && Verbose) { + ResourceMark rm(THREAD); + tty->print("invokevirtual resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ", + (current_klass.is_null() ? "" : current_klass->internal_name()), + (resolved_klass.is_null() ? "" : resolved_klass->internal_name()), + Method::name_and_sig_as_C_string(resolved_klass(), + resolved_method->name(), + resolved_method->signature()), + resolved_method->method_holder()->internal_name() + ); + resolved_method->access_flags().print_on(tty); + if (resolved_method->method_holder()->is_interface() && + !resolved_method->is_abstract()) { + tty->print("default"); + } + if (resolved_method->is_overpass()) { + tty->print("overpass"); + } + tty->cr(); + } } // throws runtime exceptions @@ -907,10 +1039,6 @@ } // Virtual methods cannot be resolved before its klass has been linked, for otherwise the Method*'s - // has not been rewritten, and the vtable initialized. - assert(resolved_method->method_holder()->is_linked(), "must be linked"); - - // Virtual methods cannot be resolved before its klass has been linked, for otherwise the Method*'s // has not been rewritten, and the vtable initialized. Make sure to do this after the nullcheck, since // a missing receiver might result in a bogus lookup. assert(resolved_method->method_holder()->is_linked(), "must be linked"); @@ -920,6 +1048,7 @@ vtable_index = vtable_index_of_miranda_method(resolved_klass, resolved_method->name(), resolved_method->signature(), CHECK); + assert(vtable_index >= 0 , "we should have valid vtable index at this point"); InstanceKlass* inst = InstanceKlass::cast(recv_klass()); @@ -927,6 +1056,7 @@ } else { // at this point we are sure that resolved_method is virtual and not // a miranda method; therefore, it must have a valid vtable index. + assert(!resolved_method->has_itable_index(), ""); vtable_index = resolved_method->vtable_index(); // We could get a negative vtable_index for final methods, // because as an optimization they are they are never put in the vtable, @@ -962,6 +1092,27 @@ selected_method->signature())); } + if (PrintVtables && Verbose) { + ResourceMark rm(THREAD); + tty->print("invokevirtual selected method: receiver-class:%s, resolved-class:%s, method:%s, method_holder:%s, vtable_index:%d, access_flags: ", + (recv_klass.is_null() ? "" : recv_klass->internal_name()), + (resolved_klass.is_null() ? "" : resolved_klass->internal_name()), + Method::name_and_sig_as_C_string(resolved_klass(), + resolved_method->name(), + resolved_method->signature()), + selected_method->method_holder()->internal_name(), + vtable_index + ); + selected_method->access_flags().print_on(tty); + if (selected_method->method_holder()->is_interface() && + !selected_method->is_abstract()) { + tty->print("default"); + } + if (resolved_method->is_overpass()) { + tty->print("overpass"); + } + tty->cr(); + } // setup result result.set_virtual(resolved_klass, recv_klass, resolved_method, selected_method, vtable_index, CHECK); } @@ -992,6 +1143,17 @@ THROW(vmSymbols::java_lang_NullPointerException()); } + // check if private interface method + if (resolved_klass->is_interface() && resolved_method->is_private()) { + ResourceMark rm(THREAD); + char buf[200]; + jio_snprintf(buf, sizeof(buf), "private interface method requires invokespecial, not invokeinterface: method %s", + Method::name_and_sig_as_C_string(resolved_klass(), + resolved_method->name(), + resolved_method->signature())); + THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); + } + // check if receiver klass implements the resolved interface if (!recv_klass->is_subtype_of(resolved_klass())) { ResourceMark rm(THREAD); @@ -1006,6 +1168,12 @@ lookup_instance_method_in_klasses(sel_method, recv_klass, resolved_method->name(), resolved_method->signature(), CHECK); + if (sel_method.is_null() && !check_null_and_abstract) { + // In theory this is a harmless placeholder value, but + // in practice leaving in null affects the nsk default method tests. + // This needs further study. + sel_method = resolved_method; + } // check if method exists if (sel_method.is_null()) { ResourceMark rm(THREAD); @@ -1015,28 +1183,15 @@ resolved_method->signature())); } // check access - if (sel_method->method_holder()->is_interface()) { - // Method holder is an interface. Throw Illegal Access Error if sel_method - // is neither public nor private. - if (!(sel_method->is_public() || sel_method->is_private())) { - ResourceMark rm(THREAD); - THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), - Method::name_and_sig_as_C_string(recv_klass(), - sel_method->name(), - sel_method->signature())); - } + // Throw Illegal Access Error if sel_method is not public. + if (!sel_method->is_public()) { + ResourceMark rm(THREAD); + THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), + Method::name_and_sig_as_C_string(recv_klass(), + sel_method->name(), + sel_method->signature())); } - else { - // Method holder is a class. Throw Illegal Access Error if sel_method - // is not public. - if (!sel_method->is_public()) { - ResourceMark rm(THREAD); - THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), - Method::name_and_sig_as_C_string(recv_klass(), - sel_method->name(), - sel_method->signature())); - } - } + // check if abstract if (check_null_and_abstract && sel_method->is_abstract()) { ResourceMark rm(THREAD); @@ -1046,7 +1201,35 @@ sel_method->signature())); } // setup result - result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, CHECK); + if (!resolved_method->has_itable_index()) { + int vtable_index = resolved_method->vtable_index(); + assert(vtable_index == sel_method->vtable_index(), "sanity check"); + result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK); + return; + } + int itable_index = resolved_method()->itable_index(); + + if (TraceItables && Verbose) { + ResourceMark rm(THREAD); + tty->print("invokeinterface selected method: receiver-class:%s, resolved-class:%s, method:%s, method_holder:%s, access_flags: ", + (recv_klass.is_null() ? "" : recv_klass->internal_name()), + (resolved_klass.is_null() ? "" : resolved_klass->internal_name()), + Method::name_and_sig_as_C_string(resolved_klass(), + resolved_method->name(), + resolved_method->signature()), + sel_method->method_holder()->internal_name() + ); + sel_method->access_flags().print_on(tty); + if (sel_method->method_holder()->is_interface() && + !sel_method->is_abstract()) { + tty->print("default"); + } + if (resolved_method->is_overpass()) { + tty->print("overpass"); + } + tty->cr(); + } + result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK); } @@ -1293,7 +1476,8 @@ } if (TraceMethodHandles) { - tty->print_cr("resolve_invokedynamic #%d %s %s", + ResourceMark rm(THREAD); + tty->print_cr("resolve_invokedynamic #%d %s %s", ConstantPool::decode_invokedynamic_index(index), method_name->as_C_string(), method_signature->as_C_string()); tty->print(" BSM info: "); bootstrap_specifier->print(); @@ -1320,7 +1504,7 @@ THREAD); if (HAS_PENDING_EXCEPTION) { if (TraceMethodHandles) { - tty->print_cr("invokedynamic throws BSME for "INTPTR_FORMAT, PENDING_EXCEPTION); + tty->print_cr("invokedynamic throws BSME for "INTPTR_FORMAT, (void *)PENDING_EXCEPTION); PENDING_EXCEPTION->print(); } if (PENDING_EXCEPTION->is_a(SystemDictionary::BootstrapMethodError_klass())) { @@ -1342,9 +1526,16 @@ //------------------------------------------------------------------------------------------------------------------------ #ifndef PRODUCT -void FieldAccessInfo::print() { +void CallInfo::print() { ResourceMark rm; - tty->print_cr("Field %s@%d", name()->as_C_string(), field_offset()); + const char* kindstr = "unknown"; + switch (_call_kind) { + case direct_call: kindstr = "direct"; break; + case vtable_call: kindstr = "vtable"; break; + case itable_call: kindstr = "itable"; break; + } + tty->print_cr("Call %s@%d %s", kindstr, _call_index, + _resolved_method.is_null() ? "(none)" : _resolved_method->name_and_sig_as_C_string()); } #endif diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/interpreter/linkResolver.hpp --- a/src/share/vm/interpreter/linkResolver.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/interpreter/linkResolver.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,63 +30,54 @@ // All the necessary definitions for run-time link resolution. -// LinkInfo & its subclasses provide all the information gathered -// for a particular link after resolving it. A link is any reference +// CallInfo provides all the information gathered for a particular +// linked call site after resolving it. A link is any reference // made from within the bytecodes of a method to an object outside of // that method. If the info is invalid, the link has not been resolved // successfully. -class LinkInfo VALUE_OBJ_CLASS_SPEC { -}; - - -// Link information for getfield/putfield & getstatic/putstatic bytecodes. - -class FieldAccessInfo: public LinkInfo { - protected: - KlassHandle _klass; - Symbol* _name; - AccessFlags _access_flags; - int _field_index; // original index in the klass - int _field_offset; - BasicType _field_type; - +class CallInfo VALUE_OBJ_CLASS_SPEC { public: - void set(KlassHandle klass, Symbol* name, int field_index, int field_offset, - BasicType field_type, AccessFlags access_flags); - KlassHandle klass() const { return _klass; } - Symbol* name() const { return _name; } - int field_index() const { return _field_index; } - int field_offset() const { return _field_offset; } - BasicType field_type() const { return _field_type; } - AccessFlags access_flags() const { return _access_flags; } - - // debugging - void print() PRODUCT_RETURN; -}; - - -// Link information for all calls. - -class CallInfo: public LinkInfo { + // Ways that a method call might be selected (or not) based on receiver type. + // Note that an invokevirtual instruction might be linked with no_dispatch, + // and an invokeinterface instruction might be linked with any of the three options + enum CallKind { + direct_call, // jump into resolved_method (must be concrete) + vtable_call, // select recv.klass.method_at_vtable(index) + itable_call, // select recv.klass.method_at_itable(resolved_method.holder, index) + unknown_kind = -1 + }; private: - KlassHandle _resolved_klass; // static receiver klass + KlassHandle _resolved_klass; // static receiver klass, resolved from a symbolic reference KlassHandle _selected_klass; // dynamic receiver class (same as static, or subklass) methodHandle _resolved_method; // static target method methodHandle _selected_method; // dynamic (actual) target method - int _vtable_index; // vtable index of selected method + CallKind _call_kind; // kind of call (static(=bytecode static/special + + // others inferred), vtable, itable) + int _call_index; // vtable or itable index of selected class method (if any) Handle _resolved_appendix; // extra argument in constant pool (if CPCE::has_appendix) Handle _resolved_method_type; // MethodType (for invokedynamic and invokehandle call sites) void set_static( KlassHandle resolved_klass, methodHandle resolved_method , TRAPS); - void set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method , TRAPS); + void set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int itable_index , TRAPS); void set_virtual( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index , TRAPS); void set_handle( methodHandle resolved_method, Handle resolved_appendix, Handle resolved_method_type, TRAPS); - void set_common( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index , TRAPS); + void set_common( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, CallKind kind, int index, TRAPS); friend class LinkResolver; public: + CallInfo() { +#ifndef PRODUCT + _call_kind = CallInfo::unknown_kind; + _call_index = Method::garbage_vtable_index; +#endif //PRODUCT + } + + // utility to extract an effective CallInfo from a method and an optional receiver limit + // does not queue the method for compilation + CallInfo(Method* resolved_method, Klass* resolved_klass = NULL); + KlassHandle resolved_klass() const { return _resolved_klass; } KlassHandle selected_klass() const { return _selected_klass; } methodHandle resolved_method() const { return _resolved_method; } @@ -95,21 +86,43 @@ Handle resolved_method_type() const { return _resolved_method_type; } BasicType result_type() const { return selected_method()->result_type(); } - bool has_vtable_index() const { return _vtable_index >= 0; } - bool is_statically_bound() const { return _vtable_index == Method::nonvirtual_vtable_index; } + CallKind call_kind() const { return _call_kind; } + int call_index() const { return _call_index; } int vtable_index() const { // Even for interface calls the vtable index could be non-negative. // See CallInfo::set_interface. assert(has_vtable_index() || is_statically_bound(), ""); - return _vtable_index; + assert(call_kind() == vtable_call || call_kind() == direct_call, ""); + // The returned value is < 0 if the call is statically bound. + // But, the returned value may be >= 0 even if the kind is direct_call. + // It is up to the caller to decide which way to go. + return _call_index; } + int itable_index() const { + assert(call_kind() == itable_call, ""); + // The returned value is always >= 0, a valid itable index. + return _call_index; + } + + // debugging +#ifdef ASSERT + bool has_vtable_index() const { return _call_index >= 0 && _call_kind != CallInfo::itable_call; } + bool is_statically_bound() const { return _call_index == Method::nonvirtual_vtable_index; } +#endif //ASSERT + void verify() PRODUCT_RETURN; + void print() PRODUCT_RETURN; }; +// Link information for getfield/putfield & getstatic/putstatic bytecodes +// is represented using a fieldDescriptor. // The LinkResolver is used to resolve constant-pool references at run-time. // It does all necessary link-time checks & throws exceptions if necessary. class LinkResolver: AllStatic { + friend class klassVtable; + friend class klassItable; + private: static void lookup_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); @@ -120,7 +133,6 @@ static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); static void resolve_klass (KlassHandle& result, constantPoolHandle pool, int index, TRAPS); - static void resolve_klass_no_update (KlassHandle& result, constantPoolHandle pool, int index, TRAPS); // no update of constantPool entry static void resolve_pool (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS); @@ -148,9 +160,16 @@ Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS); // runtime/static resolving for fields - static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS); - // takes an extra bool argument "update_pool" to decide whether to update the constantPool during klass resolution. - static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, bool update_pool, TRAPS); + static void resolve_field_access(fieldDescriptor& result, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS); + static void resolve_field(fieldDescriptor& result, KlassHandle resolved_klass, Symbol* field_name, Symbol* field_signature, + KlassHandle current_klass, Bytecodes::Code access_kind, bool check_access, bool initialize_class, TRAPS); + + // source of access_kind codes: + static Bytecodes::Code field_access_kind(bool is_static, bool is_put) { + return (is_static + ? (is_put ? Bytecodes::_putstatic : Bytecodes::_getstatic) + : (is_put ? Bytecodes::_putfield : Bytecodes::_getfield )); + } // runtime resolving: // resolved_klass = specified class (i.e., static receiver class) diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/libadt/port.hpp --- a/src/share/vm/libadt/port.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/libadt/port.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -163,8 +163,8 @@ extern void *safe_calloc (const char *file, unsigned line, unsigned nitems, unsigned size); extern void *safe_realloc(const char *file, unsigned line, void *ptr, unsigned size); extern char *safe_strdup (const char *file, unsigned line, const char *src); -inline void *operator new( size_t size ) { return malloc(size); } -inline void operator delete( void *ptr ) { free(ptr); } +inline void *operator new( size_t size ) throw() { return malloc(size); } +inline void operator delete( void *ptr ) { free(ptr); } #endif //----------------------------------------------------------------------------- diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/allocation.cpp --- a/src/share/vm/memory/allocation.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/allocation.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -49,19 +49,19 @@ # include "os_bsd.inline.hpp" #endif -void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; } -void StackObj::operator delete(void* p) { ShouldNotCallThis(); } -void* StackObj::operator new [](size_t size) { ShouldNotCallThis(); return 0; } -void StackObj::operator delete [](void* p) { ShouldNotCallThis(); } +void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } +void StackObj::operator delete(void* p) { ShouldNotCallThis(); } +void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } +void StackObj::operator delete [](void* p) { ShouldNotCallThis(); } -void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; } -void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); } -void* _ValueObj::operator new [](size_t size) { ShouldNotCallThis(); return 0; } -void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); } +void* _ValueObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } +void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); } +void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } +void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); } void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, bool read_only, - MetaspaceObj::Type type, TRAPS) { + MetaspaceObj::Type type, TRAPS) throw() { // Klass has it's own operator new return Metaspace::allocate(loader_data, word_size, read_only, type, CHECK_NULL); @@ -80,7 +80,7 @@ st->print(" {"INTPTR_FORMAT"}", this); } -void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) { +void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() { address res; switch (type) { case C_HEAP: @@ -97,12 +97,12 @@ return res; } -void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) { +void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() { return (address) operator new(size, type, flags); } void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant, - allocation_type type, MEMFLAGS flags) { + allocation_type type, MEMFLAGS flags) throw() { //should only call this with std::nothrow, use other operator new() otherwise address res; switch (type) { @@ -121,7 +121,7 @@ } void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant, - allocation_type type, MEMFLAGS flags) { + allocation_type type, MEMFLAGS flags) throw() { return (address)operator new(size, nothrow_constant, type, flags); } @@ -370,7 +370,7 @@ //-------------------------------------------------------------------------------------- // Chunk implementation -void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) { +void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() { // requested_size is equal to sizeof(Chunk) but in order for the arena // allocations to come out aligned as expected the size must be aligned // to expected arena alignment. @@ -478,18 +478,18 @@ NOT_PRODUCT(Atomic::dec(&_instance_count);) } -void* Arena::operator new(size_t size) { +void* Arena::operator new(size_t size) throw() { assert(false, "Use dynamic memory type binding"); return NULL; } -void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) { +void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() { assert(false, "Use dynamic memory type binding"); return NULL; } // dynamic memory type binding -void* Arena::operator new(size_t size, MEMFLAGS flags) { +void* Arena::operator new(size_t size, MEMFLAGS flags) throw() { #ifdef ASSERT void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC); if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); @@ -499,7 +499,7 @@ #endif } -void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) { +void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() { #ifdef ASSERT void* p = os::malloc(size, flags|otArena, CALLER_PC); if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); @@ -688,22 +688,22 @@ // define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed. // #ifndef ALLOW_OPERATOR_NEW_USAGE -void* operator new(size_t size){ +void* operator new(size_t size) throw() { assert(false, "Should not call global operator new"); return 0; } -void* operator new [](size_t size){ +void* operator new [](size_t size) throw() { assert(false, "Should not call global operator new[]"); return 0; } -void* operator new(size_t size, const std::nothrow_t& nothrow_constant){ +void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { assert(false, "Should not call global operator new"); return 0; } -void* operator new [](size_t size, std::nothrow_t& nothrow_constant){ +void* operator new [](size_t size, std::nothrow_t& nothrow_constant) throw() { assert(false, "Should not call global operator new[]"); return 0; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/allocation.hpp --- a/src/share/vm/memory/allocation.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/allocation.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -204,12 +204,12 @@ template class CHeapObj ALLOCATION_SUPER_CLASS_SPEC { public: - _NOINLINE_ void* operator new(size_t size, address caller_pc = 0); + _NOINLINE_ void* operator new(size_t size, address caller_pc = 0) throw(); _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant, - address caller_pc = 0); - _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0); + address caller_pc = 0) throw(); + _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0) throw(); _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, - address caller_pc = 0); + address caller_pc = 0) throw(); void operator delete(void* p); void operator delete [] (void* p); }; @@ -219,9 +219,9 @@ class StackObj ALLOCATION_SUPER_CLASS_SPEC { private: - void* operator new(size_t size); + void* operator new(size_t size) throw(); void operator delete(void* p); - void* operator new [](size_t size); + void* operator new [](size_t size) throw(); void operator delete [](void* p); }; @@ -245,9 +245,9 @@ // class _ValueObj { private: - void* operator new(size_t size); + void* operator new(size_t size) throw(); void operator delete(void* p); - void* operator new [](size_t size); + void* operator new [](size_t size) throw(); void operator delete [](void* p); }; @@ -316,7 +316,7 @@ void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, bool read_only, - Type type, Thread* thread); + Type type, Thread* thread) throw(); // can't use TRAPS from this header file. void operator delete(void* p) { ShouldNotCallThis(); } }; @@ -339,7 +339,7 @@ Chunk* _next; // Next Chunk in list const size_t _len; // Size of this Chunk public: - void* operator new(size_t size, AllocFailType alloc_failmode, size_t length); + void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw(); void operator delete(void* p); Chunk(size_t length); @@ -422,12 +422,12 @@ char* hwm() const { return _hwm; } // new operators - void* operator new (size_t size); - void* operator new (size_t size, const std::nothrow_t& nothrow_constant); + void* operator new (size_t size) throw(); + void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw(); // dynamic memory type tagging - void* operator new(size_t size, MEMFLAGS flags); - void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags); + void* operator new(size_t size, MEMFLAGS flags) throw(); + void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw(); void operator delete(void* p); // Fast allocate in the arena. Common case is: pointer test + increment. @@ -583,44 +583,44 @@ #endif // ASSERT public: - void* operator new(size_t size, allocation_type type, MEMFLAGS flags); - void* operator new [](size_t size, allocation_type type, MEMFLAGS flags); + void* operator new(size_t size, allocation_type type, MEMFLAGS flags) throw(); + void* operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw(); void* operator new(size_t size, const std::nothrow_t& nothrow_constant, - allocation_type type, MEMFLAGS flags); + allocation_type type, MEMFLAGS flags) throw(); void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, - allocation_type type, MEMFLAGS flags); + allocation_type type, MEMFLAGS flags) throw(); - void* operator new(size_t size, Arena *arena) { + void* operator new(size_t size, Arena *arena) throw() { address res = (address)arena->Amalloc(size); DEBUG_ONLY(set_allocation_type(res, ARENA);) return res; } - void* operator new [](size_t size, Arena *arena) { + void* operator new [](size_t size, Arena *arena) throw() { address res = (address)arena->Amalloc(size); DEBUG_ONLY(set_allocation_type(res, ARENA);) return res; } - void* operator new(size_t size) { + void* operator new(size_t size) throw() { address res = (address)resource_allocate_bytes(size); DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) return res; } - void* operator new(size_t size, const std::nothrow_t& nothrow_constant) { + void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL); DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);) return res; } - void* operator new [](size_t size) { + void* operator new [](size_t size) throw() { address res = (address)resource_allocate_bytes(size); DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) return res; } - void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) { + void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) throw() { address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL); DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);) return res; @@ -666,7 +666,7 @@ NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1) #define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\ - (type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail) + (type*) AllocateHeap((size) * sizeof(type), memflags, pc, allocfail) #define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\ (type*) (AllocateHeap((size) * sizeof(type), memflags, pc)) @@ -675,16 +675,16 @@ (type*) (AllocateHeap((size) * sizeof(type), memflags)) #define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\ - NEW_C_HEAP_ARRAY3(type, size, memflags, pc, AllocFailStrategy::RETURN_NULL) + NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL) #define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\ - NEW_C_HEAP_ARRAY3(type, size, memflags, (address)0, AllocFailStrategy::RETURN_NULL) + NEW_C_HEAP_ARRAY3(type, (size), memflags, (address)0, AllocFailStrategy::RETURN_NULL) #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\ - (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags)) + (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags)) #define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\ - (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL)) + (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL)) #define FREE_C_HEAP_ARRAY(type, old, memflags) \ FreeHeap((char*)(old), memflags) diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/allocation.inline.hpp --- a/src/share/vm/memory/allocation.inline.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/allocation.inline.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -85,7 +85,7 @@ template void* CHeapObj::operator new(size_t size, - address caller_pc){ + address caller_pc) throw() { void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC)); #ifdef ASSERT if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p); @@ -94,7 +94,7 @@ } template void* CHeapObj::operator new (size_t size, - const std::nothrow_t& nothrow_constant, address caller_pc) { + const std::nothrow_t& nothrow_constant, address caller_pc) throw() { void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC), AllocFailStrategy::RETURN_NULL); #ifdef ASSERT @@ -104,12 +104,12 @@ } template void* CHeapObj::operator new [](size_t size, - address caller_pc){ + address caller_pc) throw() { return CHeapObj::operator new(size, caller_pc); } template void* CHeapObj::operator new [](size_t size, - const std::nothrow_t& nothrow_constant, address caller_pc) { + const std::nothrow_t& nothrow_constant, address caller_pc) throw() { return CHeapObj::operator new(size, nothrow_constant, caller_pc); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/binaryTreeDictionary.cpp --- a/src/share/vm/memory/binaryTreeDictionary.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/binaryTreeDictionary.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,10 +33,10 @@ #include "runtime/globals.hpp" #include "utilities/ostream.hpp" #include "utilities/macros.hpp" +#include "gc_implementation/shared/spaceDecorator.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp" #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp" -#include "gc_implementation/shared/spaceDecorator.hpp" #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp" #endif // INCLUDE_ALL_GCS diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/cardTableModRefBS.cpp --- a/src/share/vm/memory/cardTableModRefBS.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/cardTableModRefBS.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -423,60 +423,6 @@ inline_write_ref_field(field, newVal); } -/* - Claimed and deferred bits are used together in G1 during the evacuation - pause. These bits can have the following state transitions: - 1. The claimed bit can be put over any other card state. Except that - the "dirty -> dirty and claimed" transition is checked for in - G1 code and is not used. - 2. Deferred bit can be set only if the previous state of the card - was either clean or claimed. mark_card_deferred() is wait-free. - We do not care if the operation is be successful because if - it does not it will only result in duplicate entry in the update - buffer because of the "cache-miss". So it's not worth spinning. - */ - - -bool CardTableModRefBS::claim_card(size_t card_index) { - jbyte val = _byte_map[card_index]; - assert(val != dirty_card_val(), "Shouldn't claim a dirty card"); - while (val == clean_card_val() || - (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) { - jbyte new_val = val; - if (val == clean_card_val()) { - new_val = (jbyte)claimed_card_val(); - } else { - new_val = val | (jbyte)claimed_card_val(); - } - jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val); - if (res == val) { - return true; - } - val = res; - } - return false; -} - -bool CardTableModRefBS::mark_card_deferred(size_t card_index) { - jbyte val = _byte_map[card_index]; - // It's already processed - if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { - return false; - } - // Cached bit can be installed either on a clean card or on a claimed card. - jbyte new_val = val; - if (val == clean_card_val()) { - new_val = (jbyte)deferred_card_val(); - } else { - if (val & claimed_card_val()) { - new_val = val | (jbyte)deferred_card_val(); - } - } - if (new_val != val) { - Atomic::cmpxchg(new_val, &_byte_map[card_index], val); - } - return true; -} void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr, diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/cardTableModRefBS.hpp --- a/src/share/vm/memory/cardTableModRefBS.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/cardTableModRefBS.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -339,34 +339,10 @@ _byte_map[card_index] = dirty_card_val(); } - bool is_card_claimed(size_t card_index) { - jbyte val = _byte_map[card_index]; - return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val(); - } - - void set_card_claimed(size_t card_index) { - jbyte val = _byte_map[card_index]; - if (val == clean_card_val()) { - val = (jbyte)claimed_card_val(); - } else { - val |= (jbyte)claimed_card_val(); - } - _byte_map[card_index] = val; - } - - bool claim_card(size_t card_index); - bool is_card_clean(size_t card_index) { return _byte_map[card_index] == clean_card_val(); } - bool is_card_deferred(size_t card_index) { - jbyte val = _byte_map[card_index]; - return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val(); - } - - bool mark_card_deferred(size_t card_index); - // Card marking array base (adjusted for heap low boundary) // This would be the 0th element of _byte_map, if the heap started at 0x0. // But since the heap starts at some higher address, this points to somewhere diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/collectorPolicy.cpp --- a/src/share/vm/memory/collectorPolicy.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/collectorPolicy.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -47,6 +47,11 @@ // CollectorPolicy methods. +// Align down. If the aligning result in 0, return 'alignment'. +static size_t restricted_align_down(size_t size, size_t alignment) { + return MAX2(alignment, align_size_down_(size, alignment)); +} + void CollectorPolicy::initialize_flags() { assert(max_alignment() >= min_alignment(), err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT, @@ -59,18 +64,26 @@ vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); } - if (MetaspaceSize > MaxMetaspaceSize) { - MaxMetaspaceSize = MetaspaceSize; - } - MetaspaceSize = MAX2(min_alignment(), align_size_down_(MetaspaceSize, min_alignment())); - // Don't increase Metaspace size limit above specified. - MaxMetaspaceSize = align_size_down(MaxMetaspaceSize, max_alignment()); + // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will + // override if MaxMetaspaceSize was set on the command line or not. + // This information is needed later to conform to the specification of the + // java.lang.management.MemoryUsage API. + // + // Ideally, we would be able to set the default value of MaxMetaspaceSize in + // globals.hpp to the aligned value, but this is not possible, since the + // alignment depends on other flags being parsed. + MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, max_alignment()); + if (MetaspaceSize > MaxMetaspaceSize) { MetaspaceSize = MaxMetaspaceSize; } - MinMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MinMetaspaceExpansion, min_alignment())); - MaxMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MaxMetaspaceExpansion, min_alignment())); + MetaspaceSize = restricted_align_down(MetaspaceSize, min_alignment()); + + assert(MetaspaceSize <= MaxMetaspaceSize, "Must be"); + + MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment()); + MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment()); MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment()); @@ -124,15 +137,8 @@ GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, int max_covered_regions) { - switch (rem_set_name()) { - case GenRemSet::CardTable: { - CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions); - return res; - } - default: - guarantee(false, "unrecognized GenRemSet::Name"); - return NULL; - } + assert(rem_set_name() == GenRemSet::CardTable, "unrecognized GenRemSet::Name"); + return new CardTableRS(whole_heap, max_covered_regions); } void CollectorPolicy::cleared_all_soft_refs() { @@ -145,6 +151,30 @@ _all_soft_refs_clear = true; } +size_t CollectorPolicy::compute_max_alignment() { + // The card marking array and the offset arrays for old generations are + // committed in os pages as well. Make sure they are entirely full (to + // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 + // byte entry and the os page size is 4096, the maximum heap size should + // be 512*4096 = 2MB aligned. + + // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable + // is supported. + // Requirements of any new remembered set implementations must be added here. + size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable); + + // Parallel GC does its own alignment of the generations to avoid requiring a + // large page (256M on some platforms) for the permanent generation. The + // other collectors should also be updated to do their own alignment and then + // this use of lcm() should be removed. + if (UseLargePages && !UseParallelGC) { + // in presence of large pages we have to make sure that our + // alignment is large page aware + alignment = lcm(os::large_page_size(), alignment); + } + + return alignment; +} // GenCollectorPolicy methods. @@ -175,27 +205,6 @@ GCTimeRatio); } -size_t GenCollectorPolicy::compute_max_alignment() { - // The card marking array and the offset arrays for old generations are - // committed in os pages as well. Make sure they are entirely full (to - // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 - // byte entry and the os page size is 4096, the maximum heap size should - // be 512*4096 = 2MB aligned. - size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name()); - - // Parallel GC does its own alignment of the generations to avoid requiring a - // large page (256M on some platforms) for the permanent generation. The - // other collectors should also be updated to do their own alignment and then - // this use of lcm() should be removed. - if (UseLargePages && !UseParallelGC) { - // in presence of large pages we have to make sure that our - // alignment is large page aware - alignment = lcm(os::large_page_size(), alignment); - } - - return alignment; -} - void GenCollectorPolicy::initialize_flags() { // All sizes must be multiples of the generation granularity. set_min_alignment((uintx) Generation::GenGrain); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/collectorPolicy.hpp --- a/src/share/vm/memory/collectorPolicy.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/collectorPolicy.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -98,6 +98,9 @@ {} public: + // Return maximum heap alignment that may be imposed by the policy + static size_t compute_max_alignment(); + void set_min_alignment(size_t align) { _min_alignment = align; } size_t min_alignment() { return _min_alignment; } void set_max_alignment(size_t align) { _max_alignment = align; } @@ -234,9 +237,6 @@ // Try to allocate space by expanding the heap. virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab); - // compute max heap alignment - size_t compute_max_alignment(); - // Scale the base_size by NewRation according to // result = base_size / (NewRatio + 1) // and align by min_alignment() diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/filemap.cpp --- a/src/share/vm/memory/filemap.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/filemap.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -55,6 +55,7 @@ " shared archive file.\n"); jio_vfprintf(defaultStream::error_stream(), msg, ap); jio_fprintf(defaultStream::error_stream(), "\n"); + // Do not change the text of the below message because some tests check for it. vm_exit_during_initialization("Unable to use shared archive.", NULL); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/gcLocker.cpp --- a/src/share/vm/memory/gcLocker.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/gcLocker.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -122,7 +122,7 @@ // strictly needed. It's added here to make it clear that // the GC will NOT be performed if any other caller // of GC_locker::lock() still needs GC locked. - if (!is_active()) { + if (!is_active_internal()) { _doing_gc = true; { // Must give up the lock while at a safepoint diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/gcLocker.hpp --- a/src/share/vm/memory/gcLocker.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/gcLocker.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -88,7 +88,7 @@ public: // Accessors static bool is_active() { - assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint"); + assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint"); return is_active_internal(); } static bool needs_gc() { return _needs_gc; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/genCollectedHeap.cpp --- a/src/share/vm/memory/genCollectedHeap.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/genCollectedHeap.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -95,13 +95,13 @@ guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); // The heap must be at least as aligned as generations. - size_t alignment = Generation::GenGrain; + size_t gen_alignment = Generation::GenGrain; _gen_specs = gen_policy()->generations(); // Make sure the sizes are all aligned. for (i = 0; i < _n_gens; i++) { - _gen_specs[i]->align(alignment); + _gen_specs[i]->align(gen_alignment); } // Allocate space for the heap. @@ -109,9 +109,11 @@ char* heap_address; size_t total_reserved = 0; int n_covered_regions = 0; - ReservedSpace heap_rs(0); + ReservedSpace heap_rs; - heap_address = allocate(alignment, &total_reserved, + size_t heap_alignment = collector_policy()->max_alignment(); + + heap_address = allocate(heap_alignment, &total_reserved, &n_covered_regions, &heap_rs); if (!heap_rs.is_reserved()) { @@ -168,6 +170,8 @@ const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size(); + assert(alignment % pageSize == 0, "Must be"); + for (int i = 0; i < _n_gens; i++) { total_reserved += _gen_specs[i]->max_size(); if (total_reserved < _gen_specs[i]->max_size()) { @@ -175,24 +179,17 @@ } n_covered_regions += _gen_specs[i]->n_covered_regions(); } - assert(total_reserved % pageSize == 0, - err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize=" - SIZE_FORMAT, total_reserved, pageSize)); + assert(total_reserved % alignment == 0, + err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment=" + SIZE_FORMAT, total_reserved, alignment)); // Needed until the cardtable is fixed to have the right number // of covered regions. n_covered_regions += 2; - if (UseLargePages) { - assert(total_reserved != 0, "total_reserved cannot be 0"); - total_reserved = round_to(total_reserved, os::large_page_size()); - if (total_reserved < os::large_page_size()) { - vm_exit_during_initialization(overflow_msg); - } - } + *_total_reserved = total_reserved; + *_n_covered_regions = n_covered_regions; - *_total_reserved = total_reserved; - *_n_covered_regions = n_covered_regions; *heap_rs = Universe::reserve_heap(total_reserved, alignment); return heap_rs->base(); } @@ -1211,6 +1208,7 @@ } MetaspaceCounters::update_performance_counters(); + CompressedClassSpaceCounters::update_performance_counters(); always_do_update_barrier = UseConcMarkSweepGC; }; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/genCollectedHeap.hpp --- a/src/share/vm/memory/genCollectedHeap.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/genCollectedHeap.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -148,6 +148,11 @@ return gen_policy()->size_policy(); } + // Return the (conservative) maximum heap alignment + static size_t conservative_max_heap_alignment() { + return Generation::GenGrain; + } + size_t capacity() const; size_t used() const; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/genRemSet.cpp --- a/src/share/vm/memory/genRemSet.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/genRemSet.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -32,13 +32,8 @@ // enumeration.) uintx GenRemSet::max_alignment_constraint(Name nm) { - switch (nm) { - case GenRemSet::CardTable: - return CardTableRS::ct_max_alignment_constraint(); - default: - guarantee(false, "Unrecognized GenRemSet type."); - return (0); // Make Windows compiler happy - } + assert(nm == GenRemSet::CardTable, "Unrecognized GenRemSet type."); + return CardTableRS::ct_max_alignment_constraint(); } class HasAccumulatedModifiedOopsClosure : public KlassClosure { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/heapInspection.hpp --- a/src/share/vm/memory/heapInspection.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/heapInspection.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -150,11 +150,11 @@ HEAP_INSPECTION_COLUMNS_DO(DECLARE_KLASS_SIZE_STATS_FIELD) static int count(oop x) { - return (HeapWordSize * ((x) ? (x)->size() : 0)); + return (HeapWordSize * (((x) != NULL) ? (x)->size() : 0)); } static int count_array(objArrayOop x) { - return (HeapWordSize * ((x) ? (x)->size() : 0)); + return (HeapWordSize * (((x) != NULL) ? (x)->size() : 0)); } template static int count(T* x) { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/iterator.cpp --- a/src/share/vm/memory/iterator.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/iterator.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -64,7 +64,7 @@ } void CodeBlobToOopClosure::do_newly_marked_nmethod(nmethod* nm) { - nm->oops_do(_cl, /*do_strong_roots_only=*/ true); + nm->oops_do(_cl, /*allow_zombie=*/ false); } void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/memRegion.cpp --- a/src/share/vm/memory/memRegion.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/memRegion.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -102,11 +102,11 @@ return MemRegion(); } -void* MemRegion::operator new(size_t size) { +void* MemRegion::operator new(size_t size) throw() { return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL); } -void* MemRegion::operator new [](size_t size) { +void* MemRegion::operator new [](size_t size) throw() { return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL); } void MemRegion::operator delete(void* p) { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/memRegion.hpp --- a/src/share/vm/memory/memRegion.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/memRegion.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -94,8 +94,8 @@ size_t word_size() const { return _word_size; } bool is_empty() const { return word_size() == 0; } - void* operator new(size_t size); - void* operator new [](size_t size); + void* operator new(size_t size) throw(); + void* operator new [](size_t size) throw(); void operator delete(void* p); void operator delete [](void* p); }; @@ -111,13 +111,13 @@ class MemRegionClosureRO: public MemRegionClosure { public: - void* operator new(size_t size, ResourceObj::allocation_type type, MEMFLAGS flags) { + void* operator new(size_t size, ResourceObj::allocation_type type, MEMFLAGS flags) throw() { return ResourceObj::operator new(size, type, flags); } - void* operator new(size_t size, Arena *arena) { + void* operator new(size_t size, Arena *arena) throw() { return ResourceObj::operator new(size, arena); } - void* operator new(size_t size) { + void* operator new(size_t size) throw() { return ResourceObj::operator new(size); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/metablock.cpp --- a/src/share/vm/memory/metablock.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/metablock.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -50,13 +50,6 @@ // Chunks, change Chunks so that they can be allocated out of a VirtualSpace. size_t Metablock::_min_block_byte_size = sizeof(Metablock); -#ifdef ASSERT -size_t Metablock::_overhead = - Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord; -#else -size_t Metablock::_overhead = 0; -#endif - // New blocks returned by the Metaspace are zero initialized. // We should fix the constructors to not assume this instead. Metablock* Metablock::initialize(MetaWord* p, size_t word_size) { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/metablock.hpp --- a/src/share/vm/memory/metablock.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/metablock.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -48,7 +48,6 @@ } _header; } _block; static size_t _min_block_byte_size; - static size_t _overhead; typedef union block_t Block; typedef struct header_t Header; @@ -73,7 +72,6 @@ void set_prev(Metablock* v) { _block._header._prev = v; } static size_t min_block_byte_size() { return _min_block_byte_size; } - static size_t overhead() { return _overhead; } bool is_free() { return header()->_word_size != 0; } void clear_next() { set_next(NULL); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/metaspace.cpp --- a/src/share/vm/memory/metaspace.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/metaspace.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" #include "gc_interface/collectedHeap.hpp" +#include "memory/allocation.hpp" #include "memory/binaryTreeDictionary.hpp" #include "memory/freeList.hpp" #include "memory/collectorPolicy.hpp" @@ -51,7 +52,7 @@ // Parameters for stress mode testing const uint metadata_deallocate_a_lot_block = 10; const uint metadata_deallocate_a_lock_chunk = 3; -size_t const allocation_from_dictionary_limit = 64 * K; +size_t const allocation_from_dictionary_limit = 4 * K; MetaWord* last_allocated = 0; @@ -111,7 +112,7 @@ // Has three lists of free chunks, and a total size and // count that includes all three -class ChunkManager VALUE_OBJ_CLASS_SPEC { +class ChunkManager : public CHeapObj { // Free list of chunks of different sizes. // SpecializedChunk @@ -158,7 +159,12 @@ public: - ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {} + ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) + : _free_chunks_total(0), _free_chunks_count(0) { + _free_chunks[SpecializedIndex].set_size(specialized_size); + _free_chunks[SmallIndex].set_size(small_size); + _free_chunks[MediumIndex].set_size(medium_size); + } // add or delete (return) a chunk to the global freelist. Metachunk* chunk_freelist_allocate(size_t word_size); @@ -177,8 +183,8 @@ void return_chunks(ChunkIndex index, Metachunk* chunks); // Total of the space in the free chunks list - size_t free_chunks_total(); - size_t free_chunks_total_in_bytes(); + size_t free_chunks_total_words(); + size_t free_chunks_total_bytes(); // Number of chunks in the free chunks list size_t free_chunks_count(); @@ -219,7 +225,7 @@ void locked_print_free_chunks(outputStream* st); void locked_print_sum_free_chunks(outputStream* st); - void print_on(outputStream* st); + void print_on(outputStream* st) const; }; // Used to manage the free list of Metablocks (a block corresponds @@ -228,6 +234,10 @@ BlockTreeDictionary* _dictionary; static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size); + // Only allocate and split from freelist if the size of the allocation + // is at least 1/4th the size of the available block. + const static int WasteMultiplier = 4; + // Accessors BlockTreeDictionary* dictionary() const { return _dictionary; } @@ -272,11 +282,6 @@ // VirtualSpace Metachunk* first_chunk() { return (Metachunk*) bottom(); } - void inc_container_count(); -#ifdef ASSERT - uint container_count_slow(); -#endif - public: VirtualSpaceNode(size_t byte_size); @@ -287,6 +292,10 @@ MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } + size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } + size_t expanded_words() const { return _virtual_space.committed_size() / BytesPerWord; } + size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } + // address of next available space in _virtual_space; // Accessors VirtualSpaceNode* next() { return _next; } @@ -306,8 +315,10 @@ void inc_top(size_t word_size) { _top += word_size; } uintx container_count() { return _container_count; } + void inc_container_count(); void dec_container_count(); #ifdef ASSERT + uint container_count_slow(); void verify_container_count(); #endif @@ -323,12 +334,10 @@ // Allocate a chunk from the virtual space and return it. Metachunk* get_chunk_vs(size_t chunk_word_size); - Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size); // Expands/shrinks the committed space in a virtual space. Delegates // to Virtualspace bool expand_by(size_t words, bool pre_touch = false); - bool shrink_by(size_t words); // In preparation for deleting this node, remove all the chunks // in the node from any freelist. @@ -336,8 +345,6 @@ #ifdef ASSERT // Debug support - static void verify_virtual_space_total(); - static void verify_virtual_space_count(); void mangle(); #endif @@ -345,7 +352,7 @@ }; // byte_size is the size of the associated virtualspace. -VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) { +VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) { // align up to vm allocation granularity byte_size = align_size_up(byte_size, os::vm_allocation_granularity()); @@ -417,16 +424,17 @@ VirtualSpaceNode* _virtual_space_list; // virtual space currently being used for allocations VirtualSpaceNode* _current_virtual_space; - // Free chunk list for all other metadata - ChunkManager _chunk_manager; // Can this virtual list allocate >1 spaces? Also, used to determine // whether to allocate unlimited small chunks in this virtual space bool _is_class; - bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; } - - // Sum of space in all virtual spaces and number of virtual spaces - size_t _virtual_space_total; + bool can_grow() const { return !is_class() || !UseCompressedClassPointers; } + + // Sum of reserved and committed memory in the virtual spaces + size_t _reserved_words; + size_t _committed_words; + + // Number of virtual spaces size_t _virtual_space_count; ~VirtualSpaceList(); @@ -440,7 +448,7 @@ _current_virtual_space = v; } - void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size); + void link_vs(VirtualSpaceNode* new_entry); // Get another virtual space and add it to the list. This // is typically prompted by a failed attempt to allocate a chunk @@ -457,6 +465,8 @@ size_t grow_chunks_by_words, size_t medium_chunk_bunch); + bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false); + // Get the first chunk for a Metaspace. Used for // special cases such as the boot class loader, reflection // class loader and anonymous class loader. @@ -466,28 +476,25 @@ return _current_virtual_space; } - ChunkManager* chunk_manager() { return &_chunk_manager; } bool is_class() const { return _is_class; } // Allocate the first virtualspace. void initialize(size_t word_size); - size_t virtual_space_total() { return _virtual_space_total; } - - void inc_virtual_space_total(size_t v); - void dec_virtual_space_total(size_t v); + size_t reserved_words() { return _reserved_words; } + size_t reserved_bytes() { return reserved_words() * BytesPerWord; } + size_t committed_words() { return _committed_words; } + size_t committed_bytes() { return committed_words() * BytesPerWord; } + + void inc_reserved_words(size_t v); + void dec_reserved_words(size_t v); + void inc_committed_words(size_t v); + void dec_committed_words(size_t v); void inc_virtual_space_count(); void dec_virtual_space_count(); // Unlink empty VirtualSpaceNodes and free it. - void purge(); - - // Used and capacity in the entire list of virtual spaces. - // These are global values shared by all Metaspaces - size_t capacity_words_sum(); - size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; } - size_t used_words_sum(); - size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; } + void purge(ChunkManager* chunk_manager); bool contains(const void *ptr); @@ -568,18 +575,12 @@ // Type of metadata allocated. Metaspace::MetadataType _mdtype; - // Chunk related size - size_t _medium_chunk_bunch; - // List of chunks in use by this SpaceManager. Allocations // are done from the current chunk. The list is used for deallocating // chunks when the SpaceManager is freed. Metachunk* _chunks_in_use[NumberOfInUseLists]; Metachunk* _current_chunk; - // Virtual space where allocation comes from. - VirtualSpaceList* _vs_list; - // Number of small chunks to allocate to a manager // If class space manager, small chunks are unlimited static uint const _small_chunk_limit; @@ -612,7 +613,9 @@ } Metaspace::MetadataType mdtype() { return _mdtype; } - VirtualSpaceList* vs_list() const { return _vs_list; } + + VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); } + ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); } Metachunk* current_chunk() const { return _current_chunk; } void set_current_chunk(Metachunk* v) { @@ -623,6 +626,7 @@ // Add chunk to the list of chunks in use void add_chunk(Metachunk* v, bool make_current); + void retire_current_chunk(); Mutex* lock() const { return _lock; } @@ -633,18 +637,19 @@ public: SpaceManager(Metaspace::MetadataType mdtype, - Mutex* lock, - VirtualSpaceList* vs_list); + Mutex* lock); ~SpaceManager(); enum ChunkMultiples { MediumChunkMultiple = 4 }; + bool is_class() { return _mdtype == Metaspace::ClassType; } + // Accessors size_t specialized_chunk_size() { return SpecializedChunk; } - size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; } - size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; } + size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } + size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } size_t allocated_blocks_words() const { return _allocated_blocks_words; } @@ -722,9 +727,7 @@ // MinChunkSize is a placeholder for the real minimum size JJJ size_t byte_size = word_size * BytesPerWord; - size_t byte_size_with_overhead = byte_size + Metablock::overhead(); - - size_t raw_bytes_size = MAX2(byte_size_with_overhead, + size_t raw_bytes_size = MAX2(byte_size, Metablock::min_block_byte_size()); raw_bytes_size = ARENA_ALIGN(raw_bytes_size); size_t raw_word_size = raw_bytes_size / BytesPerWord; @@ -749,7 +752,7 @@ _container_count++; assert(_container_count == container_count_slow(), err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT - "container_count_slow() " SIZE_FORMAT, + " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow())); } @@ -762,7 +765,7 @@ void VirtualSpaceNode::verify_container_count() { assert(_container_count == container_count_slow(), err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT - "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow())); + " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow())); } #endif @@ -807,12 +810,25 @@ } Metablock* free_block = - dictionary()->get_chunk(word_size, FreeBlockDictionary::exactly); + dictionary()->get_chunk(word_size, FreeBlockDictionary::atLeast); if (free_block == NULL) { return NULL; } - return (MetaWord*) free_block; + const size_t block_size = free_block->size(); + if (block_size > WasteMultiplier * word_size) { + return_block((MetaWord*)free_block, block_size); + return NULL; + } + + MetaWord* new_block = (MetaWord*)free_block; + assert(block_size >= word_size, "Incorrect size of block from freelist"); + const size_t unused = block_size - word_size; + if (unused >= TreeChunk::min_size()) { + return_block(new_block + word_size, unused); + } + + return new_block; } void BlockFreelist::print_on(outputStream* st) const { @@ -855,9 +871,9 @@ if (!is_available(chunk_word_size)) { if (TraceMetadataChunkAllocation) { - tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size); + gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size); // Dump some information about the virtual space that is nearly full - print_on(tty); + print_on(gclog_or_tty); } return NULL; } @@ -878,20 +894,11 @@ if (TraceMetavirtualspaceAllocation && !result) { gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed " "for byte size " SIZE_FORMAT, bytes); - virtual_space()->print(); + virtual_space()->print_on(gclog_or_tty); } return result; } -// Shrink the virtual space (commit more of the reserved space) -bool VirtualSpaceNode::shrink_by(size_t words) { - size_t bytes = words * BytesPerWord; - virtual_space()->shrink_by(bytes); - return true; -} - -// Add another chunk to the chunk list. - Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { assert_lock_strong(SpaceManager::expand_lock()); Metachunk* result = take_from_committed(chunk_word_size); @@ -901,23 +908,6 @@ return result; } -Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) { - assert_lock_strong(SpaceManager::expand_lock()); - - Metachunk* new_chunk = get_chunk_vs(chunk_word_size); - - if (new_chunk == NULL) { - // Only a small part of the virtualspace is committed when first - // allocated so committing more here can be expected. - size_t page_size_words = os::vm_page_size() / BytesPerWord; - size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size, - page_size_words); - expand_by(aligned_expand_vs_by_words, false); - new_chunk = get_chunk_vs(chunk_word_size); - } - return new_chunk; -} - bool VirtualSpaceNode::initialize() { if (!_rs.is_reserved()) { @@ -977,13 +967,22 @@ } } -void VirtualSpaceList::inc_virtual_space_total(size_t v) { +void VirtualSpaceList::inc_reserved_words(size_t v) { assert_lock_strong(SpaceManager::expand_lock()); - _virtual_space_total = _virtual_space_total + v; + _reserved_words = _reserved_words + v; +} +void VirtualSpaceList::dec_reserved_words(size_t v) { + assert_lock_strong(SpaceManager::expand_lock()); + _reserved_words = _reserved_words - v; } -void VirtualSpaceList::dec_virtual_space_total(size_t v) { + +void VirtualSpaceList::inc_committed_words(size_t v) { assert_lock_strong(SpaceManager::expand_lock()); - _virtual_space_total = _virtual_space_total - v; + _committed_words = _committed_words + v; +} +void VirtualSpaceList::dec_committed_words(size_t v) { + assert_lock_strong(SpaceManager::expand_lock()); + _committed_words = _committed_words - v; } void VirtualSpaceList::inc_virtual_space_count() { @@ -1011,7 +1010,7 @@ // Walk the list of VirtualSpaceNodes and delete // nodes with a 0 container_count. Remove Metachunks in // the node from their respective freelists. -void VirtualSpaceList::purge() { +void VirtualSpaceList::purge(ChunkManager* chunk_manager) { assert_lock_strong(SpaceManager::expand_lock()); // Don't use a VirtualSpaceListIterator because this // list is being changed and a straightforward use of an iterator is not safe. @@ -1033,8 +1032,9 @@ prev_vsl->set_next(vsl->next()); } - vsl->purge(chunk_manager()); - dec_virtual_space_total(vsl->reserved()->word_size()); + vsl->purge(chunk_manager); + dec_reserved_words(vsl->reserved_words()); + dec_committed_words(vsl->committed_words()); dec_virtual_space_count(); purged_vsl = vsl; delete vsl; @@ -1054,49 +1054,16 @@ #endif } -size_t VirtualSpaceList::used_words_sum() { - size_t allocated_by_vs = 0; - VirtualSpaceListIterator iter(virtual_space_list()); - while (iter.repeat()) { - VirtualSpaceNode* vsl = iter.get_next(); - // Sum used region [bottom, top) in each virtualspace - allocated_by_vs += vsl->used_words_in_vs(); - } - assert(allocated_by_vs >= chunk_manager()->free_chunks_total(), - err_msg("Total in free chunks " SIZE_FORMAT - " greater than total from virtual_spaces " SIZE_FORMAT, - allocated_by_vs, chunk_manager()->free_chunks_total())); - size_t used = - allocated_by_vs - chunk_manager()->free_chunks_total(); - return used; -} - -// Space available in all MetadataVirtualspaces allocated -// for metadata. This is the upper limit on the capacity -// of chunks allocated out of all the MetadataVirtualspaces. -size_t VirtualSpaceList::capacity_words_sum() { - size_t capacity = 0; - VirtualSpaceListIterator iter(virtual_space_list()); - while (iter.repeat()) { - VirtualSpaceNode* vsl = iter.get_next(); - capacity += vsl->capacity_words_in_vs(); - } - return capacity; -} - VirtualSpaceList::VirtualSpaceList(size_t word_size ) : _is_class(false), _virtual_space_list(NULL), _current_virtual_space(NULL), - _virtual_space_total(0), + _reserved_words(0), + _committed_words(0), _virtual_space_count(0) { MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); bool initialization_succeeded = grow_vs(word_size); - - _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk); - _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk); - _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk); assert(initialization_succeeded, " VirtualSpaceList initialization should not fail"); } @@ -1105,17 +1072,15 @@ _is_class(true), _virtual_space_list(NULL), _current_virtual_space(NULL), - _virtual_space_total(0), + _reserved_words(0), + _committed_words(0), _virtual_space_count(0) { MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); bool succeeded = class_entry->initialize(); - _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk); - _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk); - _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk); assert(succeeded, " VirtualSpaceList initialization should not fail"); - link_vs(class_entry, rs.size()/BytesPerWord); + link_vs(class_entry); } size_t VirtualSpaceList::free_bytes() { @@ -1130,7 +1095,7 @@ } // Reserve the space size_t vs_byte_size = vs_word_size * BytesPerWord; - assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned"); + assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned"); // Allocate the meta virtual space and initialize it. VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); @@ -1138,44 +1103,53 @@ delete new_entry; return false; } else { + assert(new_entry->reserved_words() == vs_word_size, "Must be"); // ensure lock-free iteration sees fully initialized node OrderAccess::storestore(); - link_vs(new_entry, vs_word_size); + link_vs(new_entry); return true; } } -void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) { +void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { if (virtual_space_list() == NULL) { set_virtual_space_list(new_entry); } else { current_virtual_space()->set_next(new_entry); } set_current_virtual_space(new_entry); - inc_virtual_space_total(vs_word_size); + inc_reserved_words(new_entry->reserved_words()); + inc_committed_words(new_entry->committed_words()); inc_virtual_space_count(); #ifdef ASSERT new_entry->mangle(); #endif if (TraceMetavirtualspaceAllocation && Verbose) { VirtualSpaceNode* vsl = current_virtual_space(); - vsl->print_on(tty); + vsl->print_on(gclog_or_tty); } } +bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) { + size_t before = node->committed_words(); + + bool result = node->expand_by(word_size, pre_touch); + + size_t after = node->committed_words(); + + // after and before can be the same if the memory was pre-committed. + assert(after >= before, "Must be"); + inc_committed_words(after - before); + + return result; +} + Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, size_t grow_chunks_by_words, size_t medium_chunk_bunch) { - // Get a chunk from the chunk freelist - Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); - - if (next != NULL) { - next->container()->inc_container_count(); - } else { - // Allocate a chunk out of the current virtual space. - next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); - } + // Allocate a chunk out of the current virtual space. + Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); if (next == NULL) { // Not enough room in current virtual space. Try to commit @@ -1186,18 +1160,27 @@ size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words, page_size_words); bool vs_expanded = - current_virtual_space()->expand_by(aligned_expand_vs_by_words, false); + expand_by(current_virtual_space(), aligned_expand_vs_by_words); if (!vs_expanded) { // Should the capacity of the metaspaces be expanded for // this allocation? If it's the virtual space for classes and is // being used for CompressedHeaders, don't allocate a new virtualspace. if (can_grow() && MetaspaceGC::should_expand(this, word_size)) { // Get another virtual space. - size_t grow_vs_words = - MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words); + size_t allocation_aligned_expand_words = + align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord); + size_t grow_vs_words = + MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words); if (grow_vs(grow_vs_words)) { // Got it. It's on the list now. Get a chunk from it. - next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words); + assert(current_virtual_space()->expanded_words() == 0, + "New virtual space nodes should not have expanded"); + + size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words, + page_size_words); + // We probably want to expand by aligned_expand_vs_by_words here. + expand_by(current_virtual_space(), grow_chunks_by_words_aligned); + next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); } } else { // Allocation will fail and induce a GC @@ -1307,8 +1290,9 @@ // reserved space, because this is a larger space prereserved for compressed // class pointers. if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) { - size_t real_allocated = Metaspace::space_list()->virtual_space_total() + - MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType); + size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); + size_t class_allocated = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType); + size_t real_allocated = nonclass_allocated + class_allocated; if (real_allocated >= MaxMetaspaceSize) { return false; } @@ -1501,15 +1485,15 @@ if (dummy_chunk == NULL) { break; } - vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk); + sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk); if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ", sm->sum_count_in_chunks_in_use()); dummy_chunk->print_on(gclog_or_tty); gclog_or_tty->print_cr(" Free chunks total %d count %d", - vsl->chunk_manager()->free_chunks_total(), - vsl->chunk_manager()->free_chunks_count()); + sm->chunk_manager()->free_chunks_total_words(), + sm->chunk_manager()->free_chunks_count()); } } } else { @@ -1565,12 +1549,12 @@ // ChunkManager methods -size_t ChunkManager::free_chunks_total() { +size_t ChunkManager::free_chunks_total_words() { return _free_chunks_total; } -size_t ChunkManager::free_chunks_total_in_bytes() { - return free_chunks_total() * BytesPerWord; +size_t ChunkManager::free_chunks_total_bytes() { + return free_chunks_total_words() * BytesPerWord; } size_t ChunkManager::free_chunks_count() { @@ -1698,9 +1682,9 @@ assert_lock_strong(SpaceManager::expand_lock()); slow_locked_verify(); if (TraceMetadataChunkAllocation) { - tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk " - PTR_FORMAT " size " SIZE_FORMAT, - chunk, chunk->word_size()); + gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk " + PTR_FORMAT " size " SIZE_FORMAT, + chunk, chunk->word_size()); } free_chunks_put(chunk); } @@ -1729,9 +1713,9 @@ dec_free_chunks_total(chunk->capacity_word_size()); if (TraceMetadataChunkAllocation && Verbose) { - tty->print_cr("ChunkManager::free_chunks_get: free_list " - PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, - free_list, chunk, chunk->word_size()); + gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list " + PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, + free_list, chunk, chunk->word_size()); } } else { chunk = humongous_dictionary()->get_chunk( @@ -1741,10 +1725,10 @@ if (chunk != NULL) { if (TraceMetadataHumongousAllocation) { size_t waste = chunk->word_size() - word_size; - tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT - " for requested size " SIZE_FORMAT - " waste " SIZE_FORMAT, - chunk->word_size(), word_size, waste); + gclog_or_tty->print_cr("Free list allocate humongous chunk size " + SIZE_FORMAT " for requested size " SIZE_FORMAT + " waste " SIZE_FORMAT, + chunk->word_size(), word_size, waste); } // Chunk is being removed from the chunks free list. dec_free_chunks_total(chunk->capacity_word_size()); @@ -1761,6 +1745,8 @@ // work. chunk->set_is_free(false); #endif + chunk->container()->inc_container_count(); + slow_locked_verify(); return chunk; } @@ -1786,18 +1772,18 @@ } else { list_count = humongous_dictionary()->total_count(); } - tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " - PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", - this, chunk, chunk->word_size(), list_count); - locked_print_free_chunks(tty); + gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " + PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", + this, chunk, chunk->word_size(), list_count); + locked_print_free_chunks(gclog_or_tty); } return chunk; } -void ChunkManager::print_on(outputStream* out) { +void ChunkManager::print_on(outputStream* out) const { if (PrintFLSStatistics != 0) { - humongous_dictionary()->report_statistics(); + const_cast(this)->humongous_dictionary()->report_statistics(); } } @@ -1944,8 +1930,8 @@ } } - vs_list()->chunk_manager()->locked_print_free_chunks(st); - vs_list()->chunk_manager()->locked_print_sum_free_chunks(st); + chunk_manager()->locked_print_free_chunks(st); + chunk_manager()->locked_print_sum_free_chunks(st); } size_t SpaceManager::calc_chunk_size(size_t word_size) { @@ -2049,9 +2035,7 @@ } SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, - Mutex* lock, - VirtualSpaceList* vs_list) : - _vs_list(vs_list), + Mutex* lock) : _mdtype(mdtype), _allocated_blocks_words(0), _allocated_chunks_words(0), @@ -2137,9 +2121,7 @@ MutexLockerEx fcl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); - ChunkManager* chunk_manager = vs_list()->chunk_manager(); - - chunk_manager->slow_locked_verify(); + chunk_manager()->slow_locked_verify(); dec_total_from_size_metrics(); @@ -2153,8 +2135,8 @@ // Have to update before the chunks_in_use lists are emptied // below. - chunk_manager->inc_free_chunks_total(allocated_chunks_words(), - sum_count_in_chunks_in_use()); + chunk_manager()->inc_free_chunks_total(allocated_chunks_words(), + sum_count_in_chunks_in_use()); // Add all the chunks in use by this space manager // to the global list of free chunks. @@ -2169,11 +2151,11 @@ chunk_size_name(i)); } Metachunk* chunks = chunks_in_use(i); - chunk_manager->return_chunks(i, chunks); + chunk_manager()->return_chunks(i, chunks); set_chunks_in_use(i, NULL); if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print_cr("updated freelist count %d %s", - chunk_manager->free_chunks(i)->count(), + chunk_manager()->free_chunks(i)->count(), chunk_size_name(i)); } assert(i != HumongousIndex, "Humongous chunks are handled explicitly later"); @@ -2210,16 +2192,16 @@ humongous_chunks->word_size(), HumongousChunkGranularity)); Metachunk* next_humongous_chunks = humongous_chunks->next(); humongous_chunks->container()->dec_container_count(); - chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks); + chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks); humongous_chunks = next_humongous_chunks; } if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print_cr(""); gclog_or_tty->print_cr("updated dictionary count %d %s", - chunk_manager->humongous_dictionary()->total_count(), + chunk_manager()->humongous_dictionary()->total_count(), chunk_size_name(HumongousIndex)); } - chunk_manager->slow_locked_verify(); + chunk_manager()->slow_locked_verify(); } const char* SpaceManager::chunk_size_name(ChunkIndex index) const { @@ -2278,6 +2260,7 @@ ChunkIndex index = ChunkManager::list_index(new_chunk->word_size()); if (index != HumongousIndex) { + retire_current_chunk(); set_current_chunk(new_chunk); new_chunk->set_next(chunks_in_use(index)); set_chunks_in_use(index, new_chunk); @@ -2307,23 +2290,35 @@ gclog_or_tty->print("SpaceManager::add_chunk: %d) ", sum_count_in_chunks_in_use()); new_chunk->print_on(gclog_or_tty); - if (vs_list() != NULL) { - vs_list()->chunk_manager()->locked_print_free_chunks(tty); + chunk_manager()->locked_print_free_chunks(gclog_or_tty); + } +} + +void SpaceManager::retire_current_chunk() { + if (current_chunk() != NULL) { + size_t remaining_words = current_chunk()->free_word_size(); + if (remaining_words >= TreeChunk::min_size()) { + block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words); + inc_used_metrics(remaining_words); } } } Metachunk* SpaceManager::get_new_chunk(size_t word_size, size_t grow_chunks_by_words) { - - Metachunk* next = vs_list()->get_new_chunk(word_size, - grow_chunks_by_words, - medium_chunk_bunch()); - - if (TraceMetadataHumongousAllocation && + // Get a chunk from the chunk freelist + Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); + + if (next == NULL) { + next = vs_list()->get_new_chunk(word_size, + grow_chunks_by_words, + medium_chunk_bunch()); + } + + if (TraceMetadataHumongousAllocation && next != NULL && SpaceManager::is_humongous(next->word_size())) { - gclog_or_tty->print_cr(" new humongous chunk word size " PTR_FORMAT, - next->word_size()); + gclog_or_tty->print_cr(" new humongous chunk word size " + PTR_FORMAT, next->word_size()); } return next; @@ -2441,9 +2436,6 @@ curr = curr->next()) { out->print("%d) ", i++); curr->print_on(out); - if (TraceMetadataChunkAllocation && Verbose) { - block_freelists()->print_on(out); - } curr_total += curr->word_size(); used += curr->used_word_size(); capacity += curr->capacity_word_size(); @@ -2451,6 +2443,10 @@ } } + if (TraceMetadataChunkAllocation && Verbose) { + block_freelists()->print_on(out); + } + size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size(); // Free space isn't wasted. waste -= free; @@ -2480,16 +2476,13 @@ size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0}; size_t MetaspaceAux::_allocated_used_words[] = {0, 0}; +size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) { + VirtualSpaceList* list = Metaspace::get_space_list(mdtype); + return list == NULL ? 0 : list->free_bytes(); +} + size_t MetaspaceAux::free_bytes() { - size_t result = 0; - if (Metaspace::using_class_space() && - (Metaspace::class_space_list() != NULL)) { - result = result + Metaspace::class_space_list()->free_bytes(); - } - if (Metaspace::space_list() != NULL) { - result = result + Metaspace::space_list()->free_bytes(); - } - return result; + return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType); } void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { @@ -2541,13 +2534,13 @@ return used * BytesPerWord; } -size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) { +size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) { size_t free = 0; ClassLoaderDataGraphMetaspaceIterator iter; while (iter.repeat()) { Metaspace* msp = iter.get_next(); if (msp != NULL) { - free += msp->free_words(mdtype); + free += msp->free_words_slow(mdtype); } } return free * BytesPerWord; @@ -2570,39 +2563,55 @@ return capacity * BytesPerWord; } -size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) { - if (mdtype == Metaspace::ClassType) { - return Metaspace::using_class_space() ? - Metaspace::class_space_list()->virtual_space_total() * BytesPerWord : 0; - } else { - return Metaspace::space_list()->virtual_space_total() * BytesPerWord; - } +size_t MetaspaceAux::capacity_bytes_slow() { +#ifdef PRODUCT + // Use allocated_capacity_bytes() in PRODUCT instead of this function. + guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); +#endif + size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); + size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); + assert(allocated_capacity_bytes() == class_capacity + non_class_capacity, + err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT + " class_capacity + non_class_capacity " SIZE_FORMAT + " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, + allocated_capacity_bytes(), class_capacity + non_class_capacity, + class_capacity, non_class_capacity)); + + return class_capacity + non_class_capacity; } -size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); } - -size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) { - if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { +size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) { + VirtualSpaceList* list = Metaspace::get_space_list(mdtype); + return list == NULL ? 0 : list->reserved_bytes(); +} + +size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) { + VirtualSpaceList* list = Metaspace::get_space_list(mdtype); + return list == NULL ? 0 : list->committed_bytes(); +} + +size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } + +size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) { + ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); + if (chunk_manager == NULL) { return 0; } - ChunkManager* chunk = (mdtype == Metaspace::ClassType) ? - Metaspace::class_space_list()->chunk_manager() : - Metaspace::space_list()->chunk_manager(); - chunk->slow_verify(); - return chunk->free_chunks_total(); + chunk_manager->slow_verify(); + return chunk_manager->free_chunks_total_words(); +} + +size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { + return free_chunks_total_words(mdtype) * BytesPerWord; } -size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) { - return free_chunks_total(mdtype) * BytesPerWord; +size_t MetaspaceAux::free_chunks_total_words() { + return free_chunks_total_words(Metaspace::ClassType) + + free_chunks_total_words(Metaspace::NonClassType); } -size_t MetaspaceAux::free_chunks_total() { - return free_chunks_total(Metaspace::ClassType) + - free_chunks_total(Metaspace::NonClassType); -} - -size_t MetaspaceAux::free_chunks_total_in_bytes() { - return free_chunks_total() * BytesPerWord; +size_t MetaspaceAux::free_chunks_total_bytes() { + return free_chunks_total_words() * BytesPerWord; } void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { @@ -2613,14 +2622,14 @@ "(" SIZE_FORMAT ")", prev_metadata_used, allocated_used_bytes(), - reserved_in_bytes()); + reserved_bytes()); } else { gclog_or_tty->print(" " SIZE_FORMAT "K" "->" SIZE_FORMAT "K" "(" SIZE_FORMAT "K)", - prev_metadata_used / K, - allocated_used_bytes() / K, - reserved_in_bytes()/ K); + prev_metadata_used/K, + allocated_used_bytes()/K, + reserved_bytes()/K); } gclog_or_tty->print("]"); @@ -2633,14 +2642,14 @@ out->print_cr(" Metaspace total " SIZE_FORMAT "K, used " SIZE_FORMAT "K," " reserved " SIZE_FORMAT "K", - allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K); + allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K); out->print_cr(" data space " SIZE_FORMAT "K, used " SIZE_FORMAT "K," " reserved " SIZE_FORMAT "K", allocated_capacity_bytes(nct)/K, allocated_used_bytes(nct)/K, - reserved_in_bytes(nct)/K); + reserved_bytes(nct)/K); if (Metaspace::using_class_space()) { Metaspace::MetadataType ct = Metaspace::ClassType; out->print_cr(" class space " @@ -2648,17 +2657,17 @@ " reserved " SIZE_FORMAT "K", allocated_capacity_bytes(ct)/K, allocated_used_bytes(ct)/K, - reserved_in_bytes(ct)/K); + reserved_bytes(ct)/K); } } // Print information for class space and data space separately. // This is almost the same as above. void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { - size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype); + size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype); size_t capacity_bytes = capacity_bytes_slow(mdtype); size_t used_bytes = used_bytes_slow(mdtype); - size_t free_bytes = free_in_bytes(mdtype); + size_t free_bytes = free_bytes_slow(mdtype); size_t used_and_free = used_bytes + free_bytes + free_chunks_capacity_bytes; out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT @@ -2740,9 +2749,9 @@ } void MetaspaceAux::verify_free_chunks() { - Metaspace::space_list()->chunk_manager()->verify(); + Metaspace::chunk_manager_metadata()->verify(); if (Metaspace::using_class_space()) { - Metaspace::class_space_list()->chunk_manager()->verify(); + Metaspace::chunk_manager_class()->verify(); } } @@ -2813,6 +2822,9 @@ VirtualSpaceList* Metaspace::_space_list = NULL; VirtualSpaceList* Metaspace::_class_space_list = NULL; +ChunkManager* Metaspace::_chunk_manager_metadata = NULL; +ChunkManager* Metaspace::_chunk_manager_class = NULL; + #define VIRTUALSPACEMULTIPLIER 2 #ifdef _LP64 @@ -2844,7 +2856,7 @@ // to work with compressed klass pointers. bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); - assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs"); + assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); address lower_base = MIN2((address)metaspace_base, cds_base); address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), (address)(metaspace_base + class_metaspace_size())); @@ -2854,7 +2866,7 @@ // Try to allocate the metaspace at the requested addr. void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { assert(using_class_space(), "called improperly"); - assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs"); + assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); assert(class_metaspace_size() < KlassEncodingMetaspaceMax, "Metaspace size is too big"); @@ -2877,9 +2889,9 @@ // If no successful allocation then try to allocate the space anywhere. If // that fails then OOM doom. At this point we cannot try allocating the - // metaspace as if UseCompressedKlassPointers is off because too much - // initialization has happened that depends on UseCompressedKlassPointers. - // So, UseCompressedKlassPointers cannot be turned off at this point. + // metaspace as if UseCompressedClassPointers is off because too much + // initialization has happened that depends on UseCompressedClassPointers. + // So, UseCompressedClassPointers cannot be turned off at this point. if (!metaspace_rs.is_reserved()) { metaspace_rs = ReservedSpace(class_metaspace_size(), os::vm_allocation_granularity(), false); @@ -2912,14 +2924,15 @@ } } -// For UseCompressedKlassPointers the class space is reserved above the top of +// For UseCompressedClassPointers the class space is reserved above the top of // the Java heap. The argument passed in is at the base of the compressed space. void Metaspace::initialize_class_space(ReservedSpace rs) { // The reserved space size may be bigger because of alignment, esp with UseLargePages - assert(rs.size() >= ClassMetaspaceSize, - err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize)); + assert(rs.size() >= CompressedClassSpaceSize, + err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize)); assert(using_class_space(), "Must be using class space"); _class_space_list = new VirtualSpaceList(rs); + _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk); } #endif @@ -2929,7 +2942,7 @@ int max_alignment = os::vm_page_size(); size_t cds_total = 0; - set_class_metaspace_size(align_size_up(ClassMetaspaceSize, + set_class_metaspace_size(align_size_up(CompressedClassSpaceSize, os::vm_allocation_granularity())); MetaspaceShared::set_max_alignment(max_alignment); @@ -2945,12 +2958,13 @@ // remainder is the misc code and data chunks. cds_total = FileMapInfo::shared_spaces_size(); _space_list = new VirtualSpaceList(cds_total/wordSize); + _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); #ifdef _LP64 // Set the compressed klass pointer base so that decoding of these pointers works // properly when creating the shared archive. - assert(UseCompressedOops && UseCompressedKlassPointers, - "UseCompressedOops and UseCompressedKlassPointers must be set"); + assert(UseCompressedOops && UseCompressedClassPointers, + "UseCompressedOops and UseCompressedClassPointers must be set"); Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom()); if (TraceMetavirtualspaceAllocation && Verbose) { gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT, @@ -2987,7 +3001,7 @@ } #ifdef _LP64 - // If UseCompressedKlassPointers is set then allocate the metaspace area + // If UseCompressedClassPointers is set then allocate the metaspace area // above the heap and above the CDS area (if it exists). if (using_class_space()) { if (UseSharedSpaces) { @@ -3005,22 +3019,37 @@ // on the medium chunk list. The next chunk will be small and progress // from there. This size calculated by -version. _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, - (ClassMetaspaceSize/BytesPerWord)*2); + (CompressedClassSpaceSize/BytesPerWord)*2); _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); // Arbitrarily set the initial virtual space to a multiple // of the boot class loader size. size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size(); // Initialize the list of virtual spaces. _space_list = new VirtualSpaceList(word_size); + _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); } } +Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, + size_t chunk_word_size, + size_t chunk_bunch) { + // Get a chunk from the chunk freelist + Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); + if (chunk != NULL) { + return chunk; + } + + return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch); +} + void Metaspace::initialize(Mutex* lock, MetaspaceType type) { assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized"); - - _vsm = new SpaceManager(NonClassType, lock, space_list()); + assert(chunk_manager_metadata() != NULL, + "Metadata ChunkManager has not been initialized"); + + _vsm = new SpaceManager(NonClassType, lock); if (_vsm == NULL) { return; } @@ -3029,11 +3058,13 @@ vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size); if (using_class_space()) { - assert(class_space_list() != NULL, - "Class VirtualSpaceList has not been initialized"); + assert(class_space_list() != NULL, + "Class VirtualSpaceList has not been initialized"); + assert(chunk_manager_class() != NULL, + "Class ChunkManager has not been initialized"); // Allocate SpaceManager for classes. - _class_vsm = new SpaceManager(ClassType, lock, class_space_list()); + _class_vsm = new SpaceManager(ClassType, lock); if (_class_vsm == NULL) { return; } @@ -3042,9 +3073,9 @@ MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); // Allocate chunk for metadata objects - Metachunk* new_chunk = - space_list()->get_initialization_chunk(word_size, - vsm()->medium_chunk_bunch()); + Metachunk* new_chunk = get_initialization_chunk(NonClassType, + word_size, + vsm()->medium_chunk_bunch()); assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks"); if (new_chunk != NULL) { // Add to this manager's list of chunks in use and current_chunk(). @@ -3053,9 +3084,9 @@ // Allocate chunk for class metadata objects if (using_class_space()) { - Metachunk* class_chunk = - class_space_list()->get_initialization_chunk(class_word_size, - class_vsm()->medium_chunk_bunch()); + Metachunk* class_chunk = get_initialization_chunk(ClassType, + class_word_size, + class_vsm()->medium_chunk_bunch()); if (class_chunk != NULL) { class_vsm()->add_chunk(class_chunk, true); } @@ -3072,8 +3103,8 @@ MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { // DumpSharedSpaces doesn't use class metadata area (yet) - // Also, don't use class_vsm() unless UseCompressedKlassPointers is true. - if (mdtype == ClassType && using_class_space()) { + // Also, don't use class_vsm() unless UseCompressedClassPointers is true. + if (is_class_space_allocation(mdtype)) { return class_vsm()->allocate(word_size); } else { return vsm()->allocate(word_size); @@ -3111,7 +3142,7 @@ } } -size_t Metaspace::free_words(MetadataType mdtype) const { +size_t Metaspace::free_words_slow(MetadataType mdtype) const { if (mdtype == ClassType) { return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; } else { @@ -3221,8 +3252,8 @@ MetaspaceAux::dump(gclog_or_tty); } // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support - const char* space_string = (mdtype == ClassType) ? "Class Metadata space" : - "Metadata space"; + const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" : + "Metadata space"; report_java_out_of_memory(space_string); if (JvmtiExport::should_post_resource_exhausted()) { @@ -3230,7 +3261,7 @@ JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, space_string); } - if (mdtype == ClassType) { + if (is_class_space_allocation(mdtype)) { THROW_OOP_0(Universe::out_of_memory_error_class_metaspace()); } else { THROW_OOP_0(Universe::out_of_memory_error_metaspace()); @@ -3272,12 +3303,16 @@ } } +void Metaspace::purge(MetadataType mdtype) { + get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); +} + void Metaspace::purge() { MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); - space_list()->purge(); + purge(NonClassType); if (using_class_space()) { - class_space_list()->purge(); + purge(ClassType); } } @@ -3319,3 +3354,70 @@ class_vsm()->dump(out); } } + +/////////////// Unit tests /////////////// + +#ifndef PRODUCT + +class TestMetaspaceAuxTest : AllStatic { + public: + static void test_reserved() { + size_t reserved = MetaspaceAux::reserved_bytes(); + + assert(reserved > 0, "assert"); + + size_t committed = MetaspaceAux::committed_bytes(); + assert(committed <= reserved, "assert"); + + size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); + assert(reserved_metadata > 0, "assert"); + assert(reserved_metadata <= reserved, "assert"); + + if (UseCompressedClassPointers) { + size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType); + assert(reserved_class > 0, "assert"); + assert(reserved_class < reserved, "assert"); + } + } + + static void test_committed() { + size_t committed = MetaspaceAux::committed_bytes(); + + assert(committed > 0, "assert"); + + size_t reserved = MetaspaceAux::reserved_bytes(); + assert(committed <= reserved, "assert"); + + size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType); + assert(committed_metadata > 0, "assert"); + assert(committed_metadata <= committed, "assert"); + + if (UseCompressedClassPointers) { + size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType); + assert(committed_class > 0, "assert"); + assert(committed_class < committed, "assert"); + } + } + + static void test_virtual_space_list_large_chunk() { + VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); + MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); + // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be + // vm_allocation_granularity aligned on Windows. + size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); + large_size += (os::vm_page_size()/BytesPerWord); + vs_list->get_new_chunk(large_size, large_size, 0); + } + + static void test() { + test_reserved(); + test_committed(); + test_virtual_space_list_large_chunk(); + } +}; + +void TestMetaspaceAux_test() { + TestMetaspaceAuxTest::test(); +} + +#endif diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/metaspace.hpp --- a/src/share/vm/memory/metaspace.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/metaspace.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -56,12 +56,15 @@ // +-------------------+ // +class ChunkManager; class ClassLoaderData; class Metablock; +class Metachunk; class MetaWord; class Mutex; class outputStream; class SpaceManager; +class VirtualSpaceList; // Metaspaces each have a SpaceManager and allocations // are done by the SpaceManager. Allocations are done @@ -76,8 +79,6 @@ // allocate() method returns a block for use as a // quantum of metadata. -class VirtualSpaceList; - class Metaspace : public CHeapObj { friend class VMStructs; friend class SpaceManager; @@ -102,6 +103,10 @@ private: void initialize(Mutex* lock, MetaspaceType type); + Metachunk* get_initialization_chunk(MetadataType mdtype, + size_t chunk_word_size, + size_t chunk_bunch); + // Align up the word size to the allocation word size static size_t align_word_size_up(size_t); @@ -134,9 +139,25 @@ static VirtualSpaceList* _space_list; static VirtualSpaceList* _class_space_list; + static ChunkManager* _chunk_manager_metadata; + static ChunkManager* _chunk_manager_class; + + public: static VirtualSpaceList* space_list() { return _space_list; } static VirtualSpaceList* class_space_list() { return _class_space_list; } + static VirtualSpaceList* get_space_list(MetadataType mdtype) { + assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype"); + return mdtype == ClassType ? class_space_list() : space_list(); + } + static ChunkManager* chunk_manager_metadata() { return _chunk_manager_metadata; } + static ChunkManager* chunk_manager_class() { return _chunk_manager_class; } + static ChunkManager* get_chunk_manager(MetadataType mdtype) { + assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype"); + return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata(); + } + + private: // This is used by DumpSharedSpaces only, where only _vsm is used. So we will // maintain a single list for now. void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size); @@ -178,9 +199,8 @@ char* bottom() const; size_t used_words_slow(MetadataType mdtype) const; - size_t free_words(MetadataType mdtype) const; + size_t free_words_slow(MetadataType mdtype) const; size_t capacity_words_slow(MetadataType mdtype) const; - size_t waste_words(MetadataType mdtype) const; size_t used_bytes_slow(MetadataType mdtype) const; size_t capacity_bytes_slow(MetadataType mdtype) const; @@ -196,6 +216,7 @@ void dump(outputStream* const out) const; // Free empty virtualspaces + static void purge(MetadataType mdtype); static void purge(); void print_on(outputStream* st) const; @@ -209,28 +230,25 @@ void iterate(AllocRecordClosure *closure); - // Return TRUE only if UseCompressedKlassPointers is True and DumpSharedSpaces is False. + // Return TRUE only if UseCompressedClassPointers is True and DumpSharedSpaces is False. static bool using_class_space() { - return NOT_LP64(false) LP64_ONLY(UseCompressedKlassPointers && !DumpSharedSpaces); + return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers && !DumpSharedSpaces); } + static bool is_class_space_allocation(MetadataType mdType) { + return mdType == ClassType && using_class_space(); + } }; class MetaspaceAux : AllStatic { - static size_t free_chunks_total(Metaspace::MetadataType mdtype); - static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype); - - public: - // Statistics for class space and data space in metaspace. + static size_t free_chunks_total_words(Metaspace::MetadataType mdtype); // These methods iterate over the classloader data graph // for the given Metaspace type. These are slow. static size_t used_bytes_slow(Metaspace::MetadataType mdtype); - static size_t free_in_bytes(Metaspace::MetadataType mdtype); + static size_t free_bytes_slow(Metaspace::MetadataType mdtype); static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype); - - // Iterates over the virtual space list. - static size_t reserved_in_bytes(Metaspace::MetadataType mdtype); + static size_t capacity_bytes_slow(); // Running sum of space in all Metachunks that has been // allocated to a Metaspace. This is used instead of @@ -260,16 +278,16 @@ } // Used by MetaspaceCounters - static size_t free_chunks_total(); - static size_t free_chunks_total_in_bytes(); + static size_t free_chunks_total_words(); + static size_t free_chunks_total_bytes(); + static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype); static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) { return _allocated_capacity_words[mdtype]; } static size_t allocated_capacity_words() { - return _allocated_capacity_words[Metaspace::NonClassType] + - (Metaspace::using_class_space() ? - _allocated_capacity_words[Metaspace::ClassType] : 0); + return allocated_capacity_words(Metaspace::NonClassType) + + allocated_capacity_words(Metaspace::ClassType); } static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) { return allocated_capacity_words(mdtype) * BytesPerWord; @@ -282,9 +300,8 @@ return _allocated_used_words[mdtype]; } static size_t allocated_used_words() { - return _allocated_used_words[Metaspace::NonClassType] + - (Metaspace::using_class_space() ? - _allocated_used_words[Metaspace::ClassType] : 0); + return allocated_used_words(Metaspace::NonClassType) + + allocated_used_words(Metaspace::ClassType); } static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) { return allocated_used_words(mdtype) * BytesPerWord; @@ -294,32 +311,24 @@ } static size_t free_bytes(); + static size_t free_bytes(Metaspace::MetadataType mdtype); - // Total capacity in all Metaspaces - static size_t capacity_bytes_slow() { -#ifdef PRODUCT - // Use allocated_capacity_bytes() in PRODUCT instead of this function. - guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); -#endif - size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); - size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); - assert(allocated_capacity_bytes() == class_capacity + non_class_capacity, - err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT - " class_capacity + non_class_capacity " SIZE_FORMAT - " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, - allocated_capacity_bytes(), class_capacity + non_class_capacity, - class_capacity, non_class_capacity)); - - return class_capacity + non_class_capacity; + static size_t reserved_bytes(Metaspace::MetadataType mdtype); + static size_t reserved_bytes() { + return reserved_bytes(Metaspace::ClassType) + + reserved_bytes(Metaspace::NonClassType); } - // Total space reserved in all Metaspaces - static size_t reserved_in_bytes() { - return reserved_in_bytes(Metaspace::ClassType) + - reserved_in_bytes(Metaspace::NonClassType); + static size_t committed_bytes(Metaspace::MetadataType mdtype); + static size_t committed_bytes() { + return committed_bytes(Metaspace::ClassType) + + committed_bytes(Metaspace::NonClassType); } - static size_t min_chunk_size(); + static size_t min_chunk_size_words(); + static size_t min_chunk_size_bytes() { + return min_chunk_size_words() * BytesPerWord; + } // Print change in used metadata. static void print_metaspace_change(size_t prev_metadata_used); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/metaspaceCounters.cpp --- a/src/share/vm/memory/metaspaceCounters.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/metaspaceCounters.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -25,109 +25,109 @@ #include "precompiled.hpp" #include "memory/metaspaceCounters.hpp" #include "memory/resourceArea.hpp" +#include "runtime/globals.hpp" +#include "runtime/perfData.hpp" #include "utilities/exceptions.hpp" -MetaspaceCounters* MetaspaceCounters::_metaspace_counters = NULL; - -size_t MetaspaceCounters::calc_total_capacity() { - // The total capacity is the sum of - // 1) capacity of Metachunks in use by all Metaspaces - // 2) unused space at the end of each Metachunk - // 3) space in the freelist - size_t total_capacity = MetaspaceAux::allocated_capacity_bytes() - + MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes(); - return total_capacity; -} - -MetaspaceCounters::MetaspaceCounters() : - _capacity(NULL), - _used(NULL), - _max_capacity(NULL) { - if (UsePerfData) { - size_t min_capacity = MetaspaceAux::min_chunk_size(); - size_t max_capacity = MetaspaceAux::reserved_in_bytes(); - size_t curr_capacity = calc_total_capacity(); - size_t used = MetaspaceAux::allocated_used_bytes(); - - initialize(min_capacity, max_capacity, curr_capacity, used); - } -} +class MetaspacePerfCounters: public CHeapObj { + friend class VMStructs; + PerfVariable* _capacity; + PerfVariable* _used; + PerfVariable* _max_capacity; -static PerfVariable* create_ms_variable(const char *ns, - const char *name, - size_t value, - TRAPS) { - const char *path = PerfDataManager::counter_name(ns, name); - PerfVariable *result = - PerfDataManager::create_variable(SUN_GC, path, PerfData::U_Bytes, value, - CHECK_NULL); - return result; -} + PerfVariable* create_variable(const char *ns, const char *name, size_t value, TRAPS) { + const char *path = PerfDataManager::counter_name(ns, name); + return PerfDataManager::create_variable(SUN_GC, path, PerfData::U_Bytes, value, THREAD); + } -static void create_ms_constant(const char *ns, - const char *name, - size_t value, - TRAPS) { - const char *path = PerfDataManager::counter_name(ns, name); - PerfDataManager::create_constant(SUN_GC, path, PerfData::U_Bytes, value, CHECK); -} + void create_constant(const char *ns, const char *name, size_t value, TRAPS) { + const char *path = PerfDataManager::counter_name(ns, name); + PerfDataManager::create_constant(SUN_GC, path, PerfData::U_Bytes, value, THREAD); + } -void MetaspaceCounters::initialize(size_t min_capacity, - size_t max_capacity, - size_t curr_capacity, - size_t used) { - - if (UsePerfData) { + public: + MetaspacePerfCounters(const char* ns, size_t min_capacity, size_t curr_capacity, size_t max_capacity, size_t used) { EXCEPTION_MARK; ResourceMark rm; - const char *ms = "metaspace"; - - create_ms_constant(ms, "minCapacity", min_capacity, CHECK); - _max_capacity = create_ms_variable(ms, "maxCapacity", max_capacity, CHECK); - _capacity = create_ms_variable(ms, "capacity", curr_capacity, CHECK); - _used = create_ms_variable(ms, "used", used, CHECK); + create_constant(ns, "minCapacity", min_capacity, THREAD); + _capacity = create_variable(ns, "capacity", curr_capacity, THREAD); + _max_capacity = create_variable(ns, "maxCapacity", max_capacity, THREAD); + _used = create_variable(ns, "used", used, THREAD); } -} -void MetaspaceCounters::update_capacity() { - assert(UsePerfData, "Should not be called unless being used"); - size_t total_capacity = calc_total_capacity(); - _capacity->set_value(total_capacity); + void update(size_t capacity, size_t max_capacity, size_t used) { + _capacity->set_value(capacity); + _max_capacity->set_value(max_capacity); + _used->set_value(used); + } +}; + +MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL; + +size_t MetaspaceCounters::used() { + return MetaspaceAux::allocated_used_bytes(); } -void MetaspaceCounters::update_used() { - assert(UsePerfData, "Should not be called unless being used"); - size_t used_in_bytes = MetaspaceAux::allocated_used_bytes(); - _used->set_value(used_in_bytes); +size_t MetaspaceCounters::capacity() { + return MetaspaceAux::committed_bytes(); } -void MetaspaceCounters::update_max_capacity() { - assert(UsePerfData, "Should not be called unless being used"); - assert(_max_capacity != NULL, "Should be initialized"); - size_t reserved_in_bytes = MetaspaceAux::reserved_in_bytes(); - _max_capacity->set_value(reserved_in_bytes); -} - -void MetaspaceCounters::update_all() { - if (UsePerfData) { - update_used(); - update_capacity(); - update_max_capacity(); - } +size_t MetaspaceCounters::max_capacity() { + return MetaspaceAux::reserved_bytes(); } void MetaspaceCounters::initialize_performance_counters() { if (UsePerfData) { - assert(_metaspace_counters == NULL, "Should only be initialized once"); - _metaspace_counters = new MetaspaceCounters(); + assert(_perf_counters == NULL, "Should only be initialized once"); + + size_t min_capacity = 0; + _perf_counters = new MetaspacePerfCounters("metaspace", min_capacity, + capacity(), max_capacity(), used()); } } void MetaspaceCounters::update_performance_counters() { if (UsePerfData) { - assert(_metaspace_counters != NULL, "Should be initialized"); - _metaspace_counters->update_all(); + assert(_perf_counters != NULL, "Should be initialized"); + + _perf_counters->update(capacity(), max_capacity(), used()); } } +MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL; + +size_t CompressedClassSpaceCounters::used() { + return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType); +} + +size_t CompressedClassSpaceCounters::capacity() { + return MetaspaceAux::committed_bytes(Metaspace::ClassType); +} + +size_t CompressedClassSpaceCounters::max_capacity() { + return MetaspaceAux::reserved_bytes(Metaspace::ClassType); +} + +void CompressedClassSpaceCounters::update_performance_counters() { + if (UsePerfData && UseCompressedClassPointers) { + assert(_perf_counters != NULL, "Should be initialized"); + + _perf_counters->update(capacity(), max_capacity(), used()); + } +} + +void CompressedClassSpaceCounters::initialize_performance_counters() { + if (UsePerfData) { + assert(_perf_counters == NULL, "Should only be initialized once"); + const char* ns = "compressedclassspace"; + + if (UseCompressedClassPointers) { + size_t min_capacity = 0; + _perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity(), + max_capacity(), used()); + } else { + _perf_counters = new MetaspacePerfCounters(ns, 0, 0, 0, 0); + } + } +} diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/metaspaceCounters.hpp --- a/src/share/vm/memory/metaspaceCounters.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/metaspaceCounters.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -25,31 +25,30 @@ #ifndef SHARE_VM_MEMORY_METASPACECOUNTERS_HPP #define SHARE_VM_MEMORY_METASPACECOUNTERS_HPP -#include "runtime/perfData.hpp" +#include "memory/allocation.hpp" + +class MetaspacePerfCounters; -class MetaspaceCounters: public CHeapObj { - friend class VMStructs; - PerfVariable* _capacity; - PerfVariable* _used; - PerfVariable* _max_capacity; - static MetaspaceCounters* _metaspace_counters; - void initialize(size_t min_capacity, - size_t max_capacity, - size_t curr_capacity, - size_t used); - size_t calc_total_capacity(); +class MetaspaceCounters: public AllStatic { + static MetaspacePerfCounters* _perf_counters; + static size_t used(); + static size_t capacity(); + static size_t max_capacity(); + public: - MetaspaceCounters(); - ~MetaspaceCounters(); - - void update_capacity(); - void update_used(); - void update_max_capacity(); - - void update_all(); - static void initialize_performance_counters(); static void update_performance_counters(); - }; + +class CompressedClassSpaceCounters: public AllStatic { + static MetaspacePerfCounters* _perf_counters; + static size_t used(); + static size_t capacity(); + static size_t max_capacity(); + + public: + static void initialize_performance_counters(); + static void update_performance_counters(); +}; + #endif // SHARE_VM_MEMORY_METASPACECOUNTERS_HPP diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/metaspaceShared.cpp --- a/src/share/vm/memory/metaspaceShared.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/metaspaceShared.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -103,9 +103,10 @@ if (k->oop_is_instance()) { InstanceKlass* ik = InstanceKlass::cast(k); for (int i = 0; i < ik->methods()->length(); i++) { - ResourceMark rm; Method* m = ik->methods()->at(i); - (new Fingerprinter(m))->fingerprint(); + Fingerprinter fp(m); + // The side effect of this call sets method's fingerprint field. + fp.fingerprint(); } } } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/referenceProcessor.cpp --- a/src/share/vm/memory/referenceProcessor.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/referenceProcessor.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -366,7 +366,7 @@ next_d = java_lang_ref_Reference::discovered(obj); if (TraceReferenceGC && PrintGCDetails) { gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, - obj, next_d); + (void *)obj, (void *)next_d); } assert(java_lang_ref_Reference::next(obj) == NULL, "Reference not active; should not be discovered"); @@ -391,7 +391,7 @@ next_d = java_lang_ref_Reference::discovered(obj); if (TraceReferenceGC && PrintGCDetails) { gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, - obj, next_d); + (void *)obj, (void *)next_d); } assert(java_lang_ref_Reference::next(obj) == NULL, "The reference should not be enqueued"); @@ -561,7 +561,7 @@ !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { if (TraceReferenceGC) { gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", - iter.obj(), iter.obj()->klass()->internal_name()); + (void *)iter.obj(), iter.obj()->klass()->internal_name()); } // Remove Reference object from list iter.remove(); @@ -600,7 +600,7 @@ if (iter.is_referent_alive()) { if (TraceReferenceGC) { gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", - iter.obj(), iter.obj()->klass()->internal_name()); + (void *)iter.obj(), iter.obj()->klass()->internal_name()); } // The referent is reachable after all. // Remove Reference object from list. @@ -686,7 +686,7 @@ if (TraceReferenceGC) { gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", clear_referent ? "cleared " : "", - iter.obj(), iter.obj()->klass()->internal_name()); + (void *)iter.obj(), iter.obj()->klass()->internal_name()); } assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); iter.next(); @@ -1002,7 +1002,7 @@ gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " INTPTR_FORMAT " with next field: " INTPTR_FORMAT " and referent: " INTPTR_FORMAT, - iter.obj(), next, iter.referent()); + (void *)iter.obj(), (void *)next, (void *)iter.referent()); } ) // Remove Reference object from list @@ -1102,14 +1102,14 @@ if (TraceReferenceGC) { gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", - obj, obj->klass()->internal_name()); + (void *)obj, obj->klass()->internal_name()); } } else { // If retest was non NULL, another thread beat us to it: // The reference has already been discovered... if (TraceReferenceGC) { gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", - obj, obj->klass()->internal_name()); + (void *)obj, obj->klass()->internal_name()); } } } @@ -1124,7 +1124,7 @@ assert(da ? referent->is_oop() : referent->is_oop_or_null(), err_msg("Bad referent " INTPTR_FORMAT " found in Reference " INTPTR_FORMAT " during %satomic discovery ", - (intptr_t)referent, (intptr_t)obj, da ? "" : "non-")); + (void *)referent, (void *)obj, da ? "" : "non-")); } #endif @@ -1204,7 +1204,7 @@ // The reference has already been discovered... if (TraceReferenceGC) { gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", - obj, obj->klass()->internal_name()); + (void *)obj, obj->klass()->internal_name()); } if (RefDiscoveryPolicy == ReferentBasedDiscovery) { // assumes that an object is not processed twice; @@ -1272,7 +1272,7 @@ if (TraceReferenceGC) { gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)", - obj, obj->klass()->internal_name()); + (void *)obj, obj->klass()->internal_name()); } } assert(obj->is_oop(), "Discovered a bad reference"); @@ -1371,7 +1371,7 @@ // active; we need to trace and mark its cohort. if (TraceReferenceGC) { gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)", - iter.obj(), iter.obj()->klass()->internal_name()); + (void *)iter.obj(), iter.obj()->klass()->internal_name()); } // Remove Reference object from list iter.remove(); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/universe.cpp --- a/src/share/vm/memory/universe.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/universe.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -602,7 +602,7 @@ } } -static intptr_t non_oop_bits = 0; +intptr_t Universe::_non_oop_bits = 0; void* Universe::non_oop_word() { // Neither the high bits nor the low bits of this value is allowed @@ -616,11 +616,11 @@ // Using the OS-supplied non-memory-address word (usually 0 or -1) // will take care of the high bits, however many there are. - if (non_oop_bits == 0) { - non_oop_bits = (intptr_t)os::non_memory_address_word() | 1; + if (_non_oop_bits == 0) { + _non_oop_bits = (intptr_t)os::non_memory_address_word() | 1; } - return (void*)non_oop_bits; + return (void*)_non_oop_bits; } jint universe_init() { @@ -681,17 +681,23 @@ // 32Gb // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes; -char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) { +char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) { + assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be"); + assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be"); + assert(is_size_aligned(heap_size, alignment), "Must be"); + + uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment); + size_t base = 0; #ifdef _LP64 if (UseCompressedOops) { assert(mode == UnscaledNarrowOop || mode == ZeroBasedNarrowOop || mode == HeapBasedNarrowOop, "mode is invalid"); - const size_t total_size = heap_size + HeapBaseMinAddress; + const size_t total_size = heap_size + heap_base_min_address_aligned; // Return specified base for the first request. if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) { - base = HeapBaseMinAddress; + base = heap_base_min_address_aligned; // If the total size is small enough to allow UnscaledNarrowOop then // just use UnscaledNarrowOop. @@ -742,6 +748,8 @@ } } #endif + + assert(is_ptr_aligned((char*)base, alignment), "Must be"); return (char*)base; // also return NULL (don't care) for 32-bit VM } @@ -864,30 +872,39 @@ // Reserve the Java heap, which is now the same for all GCs. ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) { + assert(alignment <= Arguments::conservative_max_heap_alignment(), + err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT, + alignment, Arguments::conservative_max_heap_alignment())); size_t total_reserved = align_size_up(heap_size, alignment); assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())), "heap size is too big for compressed oops"); - char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); - ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr); + bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size()); + assert(!UseLargePages + || UseParallelGC + || use_large_pages, "Wrong alignment to use large pages"); + + char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop); + + ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr); if (UseCompressedOops) { if (addr != NULL && !total_rs.is_reserved()) { // Failed to reserve at specified address - the requested memory // region is taken already, for example, by 'java' launcher. // Try again to reserver heap higher. - addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); + addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop); ReservedHeapSpace total_rs0(total_reserved, alignment, - UseLargePages, addr); + use_large_pages, addr); if (addr != NULL && !total_rs0.is_reserved()) { // Failed to reserve at specified address again - give up. - addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); + addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop); assert(addr == NULL, ""); ReservedHeapSpace total_rs1(total_reserved, alignment, - UseLargePages, addr); + use_large_pages, addr); total_rs = total_rs1; } else { total_rs = total_rs0; @@ -1014,7 +1031,7 @@ msg = java_lang_String::create_from_str("Metadata space", CHECK_false); java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg()); - msg = java_lang_String::create_from_str("Class Metadata space", CHECK_false); + msg = java_lang_String::create_from_str("Compressed class space", CHECK_false); java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg()); msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false); @@ -1101,6 +1118,8 @@ // Initialize performance counters for metaspaces MetaspaceCounters::initialize_performance_counters(); + CompressedClassSpaceCounters::initialize_performance_counters(); + MemoryService::add_metaspace_memory_pools(); GC_locker::unlock(); // allow gc after bootstrapping diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/memory/universe.hpp --- a/src/share/vm/memory/universe.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/memory/universe.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -179,9 +179,11 @@ // The particular choice of collected heap. static CollectedHeap* _collectedHeap; + static intptr_t _non_oop_bits; + // For UseCompressedOops. static struct NarrowPtrStruct _narrow_oop; - // For UseCompressedKlassPointers. + // For UseCompressedClassPointers. static struct NarrowPtrStruct _narrow_klass; static address _narrow_ptrs_base; @@ -229,7 +231,7 @@ _narrow_oop._base = base; } static void set_narrow_klass_base(address base) { - assert(UseCompressedKlassPointers, "no compressed klass ptrs?"); + assert(UseCompressedClassPointers, "no compressed klass ptrs?"); _narrow_klass._base = base; } static void set_narrow_oop_use_implicit_null_checks(bool use) { @@ -346,14 +348,14 @@ }; static NARROW_OOP_MODE narrow_oop_mode(); static const char* narrow_oop_mode_to_string(NARROW_OOP_MODE mode); - static char* preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode); + static char* preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode); static char* preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode); static address narrow_oop_base() { return _narrow_oop._base; } static bool is_narrow_oop_base(void* addr) { return (narrow_oop_base() == (address)addr); } static int narrow_oop_shift() { return _narrow_oop._shift; } static bool narrow_oop_use_implicit_null_checks() { return _narrow_oop._use_implicit_null_checks; } - // For UseCompressedKlassPointers + // For UseCompressedClassPointers static address narrow_klass_base() { return _narrow_klass._base; } static bool is_narrow_klass_base(void* addr) { return (narrow_klass_base() == (address)addr); } static int narrow_klass_shift() { return _narrow_klass._shift; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/arrayOop.hpp --- a/src/share/vm/oops/arrayOop.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/arrayOop.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -65,7 +65,7 @@ // declared nonstatic fields in arrayOopDesc if not compressed, otherwise // it occupies the second half of the _klass field in oopDesc. static int length_offset_in_bytes() { - return UseCompressedKlassPointers ? klass_gap_offset_in_bytes() : + return UseCompressedClassPointers ? klass_gap_offset_in_bytes() : sizeof(arrayOopDesc); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/constantPool.cpp --- a/src/share/vm/oops/constantPool.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/constantPool.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -108,16 +108,16 @@ void ConstantPool::initialize_resolved_references(ClassLoaderData* loader_data, intStack reference_map, int constant_pool_map_length, - TRAPS) { + TRAPS) { // Initialized the resolved object cache. int map_length = reference_map.length(); if (map_length > 0) { // Only need mapping back to constant pool entries. The map isn't used for - // invokedynamic resolved_reference entries. The constant pool cache index - // has the mapping back to both the constant pool and to the resolved - // reference index. + // invokedynamic resolved_reference entries. For invokedynamic entries, + // the constant pool cache index has the mapping back to both the constant + // pool and to the resolved reference index. if (constant_pool_map_length > 0) { - Array* om = MetadataFactory::new_array(loader_data, map_length, CHECK); + Array* om = MetadataFactory::new_array(loader_data, constant_pool_map_length, CHECK); for (int i = 0; i < constant_pool_map_length; i++) { int x = reference_map.at(i); @@ -182,16 +182,9 @@ int ConstantPool::cp_to_object_index(int cp_index) { // this is harder don't do this so much. - for (int i = 0; i< reference_map()->length(); i++) { - if (reference_map()->at(i) == cp_index) return i; - // Zero entry is divider between constant pool indices for strings, - // method handles and method types. After that the index is a constant - // pool cache index for invokedynamic. Stop when zero (which can never - // be a constant pool index) - if (reference_map()->at(i) == 0) break; - } - // We might not find the index. - return _no_index_sentinel; + int i = reference_map()->find(cp_index); + // We might not find the index for jsr292 call. + return (i < 0) ? _no_index_sentinel : i; } Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS) { @@ -396,32 +389,6 @@ } -// This is an interface for the compiler that allows accessing non-resolved entries -// in the constant pool - but still performs the validations tests. Must be used -// in a pre-parse of the compiler - to determine what it can do and not do. -// Note: We cannot update the ConstantPool from the vm_thread. -Klass* ConstantPool::klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int index, TRAPS) { - int which = this_oop->klass_ref_index_at(index); - CPSlot entry = this_oop->slot_at(which); - if (entry.is_resolved()) { - assert(entry.get_klass()->is_klass(), "must be"); - return entry.get_klass(); - } else { - assert(entry.is_unresolved(), "must be either symbol or klass"); - Symbol* name = entry.get_symbol(); - oop loader = this_oop->pool_holder()->class_loader(); - oop protection_domain = this_oop->pool_holder()->protection_domain(); - Handle h_loader(THREAD, loader); - Handle h_prot (THREAD, protection_domain); - KlassHandle k(THREAD, SystemDictionary::find(name, h_loader, h_prot, THREAD)); - - // Do access check for klasses - if( k.not_null() ) verify_constant_pool_resolve(this_oop, k, CHECK_NULL); - return k(); - } -} - - Method* ConstantPool::method_at_if_loaded(constantPoolHandle cpool, int which) { if (cpool->cache() == NULL) return NULL; // nothing to load yet @@ -866,8 +833,7 @@ // If the string has already been interned, this entry will be non-null oop str = this_oop->resolved_references()->obj_at(obj_index); if (str != NULL) return str; - - Symbol* sym = this_oop->unresolved_string_at(which); + Symbol* sym = this_oop->unresolved_string_at(which); str = StringTable::intern(sym, CHECK_(NULL)); this_oop->string_at_put(which, obj_index, str); assert(java_lang_String::is_instance(str), "must be string"); @@ -1645,9 +1611,11 @@ case JVM_CONSTANT_UnresolvedClassInError: case JVM_CONSTANT_StringIndex: case JVM_CONSTANT_MethodType: + case JVM_CONSTANT_MethodTypeInError: return 3; case JVM_CONSTANT_MethodHandle: + case JVM_CONSTANT_MethodHandleInError: return 4; //tag, ref_kind, ref_index case JVM_CONSTANT_Integer: @@ -1828,8 +1796,8 @@ case JVM_CONSTANT_MethodHandle: case JVM_CONSTANT_MethodHandleInError: { *bytes = JVM_CONSTANT_MethodHandle; - int kind = method_handle_ref_kind_at(idx); - idx1 = method_handle_index_at(idx); + int kind = method_handle_ref_kind_at_error_ok(idx); + idx1 = method_handle_index_at_error_ok(idx); *(bytes+1) = (unsigned char) kind; Bytes::put_Java_u2((address) (bytes+2), idx1); DBG(printf("JVM_CONSTANT_MethodHandle: %d %hd", kind, idx1)); @@ -1838,7 +1806,7 @@ case JVM_CONSTANT_MethodType: case JVM_CONSTANT_MethodTypeInError: { *bytes = JVM_CONSTANT_MethodType; - idx1 = method_type_index_at(idx); + idx1 = method_type_index_at_error_ok(idx); Bytes::put_Java_u2((address) (bytes+1), idx1); DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1)); break; @@ -1950,7 +1918,7 @@ st->print_cr(" - holder: " INTPTR_FORMAT, pool_holder()); } st->print_cr(" - cache: " INTPTR_FORMAT, cache()); - st->print_cr(" - resolved_references: " INTPTR_FORMAT, resolved_references()); + st->print_cr(" - resolved_references: " INTPTR_FORMAT, (void *)resolved_references()); st->print_cr(" - reference_map: " INTPTR_FORMAT, reference_map()); for (int index = 1; index < length(); index++) { // Index 0 is unused @@ -2026,12 +1994,12 @@ break; case JVM_CONSTANT_MethodHandle : case JVM_CONSTANT_MethodHandleInError : - st->print("ref_kind=%d", method_handle_ref_kind_at(index)); - st->print(" ref_index=%d", method_handle_index_at(index)); + st->print("ref_kind=%d", method_handle_ref_kind_at_error_ok(index)); + st->print(" ref_index=%d", method_handle_index_at_error_ok(index)); break; case JVM_CONSTANT_MethodType : case JVM_CONSTANT_MethodTypeInError : - st->print("signature_index=%d", method_type_index_at(index)); + st->print("signature_index=%d", method_type_index_at_error_ok(index)); break; case JVM_CONSTANT_InvokeDynamic : { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/constantPool.hpp --- a/src/share/vm/oops/constantPool.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/constantPool.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -231,7 +231,6 @@ static int cache_offset_in_bytes() { return offset_of(ConstantPool, _cache); } static int pool_holder_offset_in_bytes() { return offset_of(ConstantPool, _pool_holder); } static int resolved_references_offset_in_bytes() { return offset_of(ConstantPool, _resolved_references); } - static int reference_map_offset_in_bytes() { return offset_of(ConstantPool, _reference_map); } // Storing constants @@ -475,18 +474,42 @@ return *int_at_addr(which); } + private: + int method_handle_ref_kind_at(int which, bool error_ok) { + assert(tag_at(which).is_method_handle() || + (error_ok && tag_at(which).is_method_handle_in_error()), "Corrupted constant pool"); + return extract_low_short_from_int(*int_at_addr(which)); // mask out unwanted ref_index bits + } + int method_handle_index_at(int which, bool error_ok) { + assert(tag_at(which).is_method_handle() || + (error_ok && tag_at(which).is_method_handle_in_error()), "Corrupted constant pool"); + return extract_high_short_from_int(*int_at_addr(which)); // shift out unwanted ref_kind bits + } + int method_type_index_at(int which, bool error_ok) { + assert(tag_at(which).is_method_type() || + (error_ok && tag_at(which).is_method_type_in_error()), "Corrupted constant pool"); + return *int_at_addr(which); + } + public: int method_handle_ref_kind_at(int which) { - assert(tag_at(which).is_method_handle(), "Corrupted constant pool"); - return extract_low_short_from_int(*int_at_addr(which)); // mask out unwanted ref_index bits + return method_handle_ref_kind_at(which, false); + } + int method_handle_ref_kind_at_error_ok(int which) { + return method_handle_ref_kind_at(which, true); } int method_handle_index_at(int which) { - assert(tag_at(which).is_method_handle(), "Corrupted constant pool"); - return extract_high_short_from_int(*int_at_addr(which)); // shift out unwanted ref_kind bits + return method_handle_index_at(which, false); + } + int method_handle_index_at_error_ok(int which) { + return method_handle_index_at(which, true); } int method_type_index_at(int which) { - assert(tag_at(which).is_method_type(), "Corrupted constant pool"); - return *int_at_addr(which); + return method_type_index_at(which, false); } + int method_type_index_at_error_ok(int which) { + return method_type_index_at(which, true); + } + // Derived queries: Symbol* method_handle_name_ref_at(int which) { int member = method_handle_index_at(which); @@ -730,8 +753,6 @@ static oop method_type_at_if_loaded (constantPoolHandle this_oop, int which); static Klass* klass_at_if_loaded (constantPoolHandle this_oop, int which); static Klass* klass_ref_at_if_loaded (constantPoolHandle this_oop, int which); - // Same as above - but does LinkResolving. - static Klass* klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS); // Routines currently used for annotations (only called by jvm.cpp) but which might be used in the // future by other Java code. These take constant pool indices rather than diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/cpCache.cpp --- a/src/share/vm/oops/cpCache.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/cpCache.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -140,9 +140,10 @@ err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value)); } -void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code, - methodHandle method, - int vtable_index) { +void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code, + methodHandle method, + int vtable_index) { + bool is_vtable_call = (vtable_index >= 0); // FIXME: split this method on this boolean assert(method->interpreter_entry() != NULL, "should have been set at this point"); assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); @@ -160,7 +161,8 @@ // ...and fall through as if we were handling invokevirtual: case Bytecodes::_invokevirtual: { - if (method->can_be_statically_bound()) { + if (!is_vtable_call) { + assert(method->can_be_statically_bound(), ""); // set_f2_as_vfinal_method checks if is_vfinal flag is true. set_method_flags(as_TosState(method->result_type()), ( 1 << is_vfinal_shift) | @@ -169,6 +171,7 @@ method()->size_of_parameters()); set_f2_as_vfinal_method(method()); } else { + assert(!method->can_be_statically_bound(), ""); assert(vtable_index >= 0, "valid index"); assert(!method->is_final_method(), "sanity"); set_method_flags(as_TosState(method->result_type()), @@ -182,6 +185,7 @@ case Bytecodes::_invokespecial: case Bytecodes::_invokestatic: + assert(!is_vtable_call, ""); // Note: Read and preserve the value of the is_vfinal flag on any // invokevirtual bytecode shared with this constant pool cache entry. // It is cheap and safe to consult is_vfinal() at all times. @@ -232,8 +236,22 @@ NOT_PRODUCT(verify(tty)); } +void ConstantPoolCacheEntry::set_direct_call(Bytecodes::Code invoke_code, methodHandle method) { + int index = Method::nonvirtual_vtable_index; + // index < 0; FIXME: inline and customize set_direct_or_vtable_call + set_direct_or_vtable_call(invoke_code, method, index); +} -void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) { +void ConstantPoolCacheEntry::set_vtable_call(Bytecodes::Code invoke_code, methodHandle method, int index) { + // either the method is a miranda or its holder should accept the given index + assert(method->method_holder()->is_interface() || method->method_holder()->verify_vtable_index(index), ""); + // index >= 0; FIXME: inline and customize set_direct_or_vtable_call + set_direct_or_vtable_call(invoke_code, method, index); +} + +void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code, methodHandle method, int index) { + assert(method->method_holder()->verify_itable_index(index), ""); + assert(invoke_code == Bytecodes::_invokeinterface, ""); InstanceKlass* interf = method->method_holder(); assert(interf->is_interface(), "must be an interface"); assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here"); @@ -288,8 +306,8 @@ if (TraceInvokeDynamic) { tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ", invoke_code, - (intptr_t)appendix(), (has_appendix ? "" : " (unused)"), - (intptr_t)method_type(), (has_method_type ? "" : " (unused)"), + (void *)appendix(), (has_appendix ? "" : " (unused)"), + (void *)method_type(), (has_method_type ? "" : " (unused)"), (intptr_t)adapter()); adapter->print(); if (has_appendix) appendix()->print(); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/cpCache.hpp --- a/src/share/vm/oops/cpCache.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/cpCache.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -219,15 +219,29 @@ Klass* root_klass // needed by the GC to dirty the klass ); - void set_method( // sets entry to resolved method entry + private: + void set_direct_or_vtable_call( Bytecodes::Code invoke_code, // the bytecode used for invoking the method methodHandle method, // the method/prototype if any (NULL, otherwise) int vtable_index // the vtable index if any, else negative ); - void set_interface_call( - methodHandle method, // Resolved method - int index // Method index into interface + public: + void set_direct_call( // sets entry to exact concrete method entry + Bytecodes::Code invoke_code, // the bytecode used for invoking the method + methodHandle method // the method to call + ); + + void set_vtable_call( // sets entry to vtable index + Bytecodes::Code invoke_code, // the bytecode used for invoking the method + methodHandle method, // resolved method which declares the vtable index + int vtable_index // the vtable index + ); + + void set_itable_call( + Bytecodes::Code invoke_code, // the bytecode used; must be invokeinterface + methodHandle method, // the resolved interface method + int itable_index // index into itable for the method ); void set_method_handle( diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/fieldInfo.hpp --- a/src/share/vm/oops/fieldInfo.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/fieldInfo.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -240,6 +240,14 @@ return (access_flags() & JVM_ACC_FIELD_INTERNAL) != 0; } + bool is_stable() const { + return (access_flags() & JVM_ACC_FIELD_STABLE) != 0; + } + void set_stable(bool z) { + if (z) _shorts[access_flags_offset] |= JVM_ACC_FIELD_STABLE; + else _shorts[access_flags_offset] &= ~JVM_ACC_FIELD_STABLE; + } + Symbol* lookup_symbol(int symbol_index) const { assert(is_internal(), "only internal fields"); return vmSymbols::symbol_at((vmSymbols::SID)symbol_index); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/fieldStreams.hpp --- a/src/share/vm/oops/fieldStreams.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/fieldStreams.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "oops/instanceKlass.hpp" #include "oops/fieldInfo.hpp" +#include "runtime/fieldDescriptor.hpp" // The is the base class for iteration over the fields array // describing the declared fields in the class. Several subclasses @@ -43,8 +44,10 @@ int _index; int _limit; int _generic_signature_slot; + fieldDescriptor _fd_buf; FieldInfo* field() const { return FieldInfo::from_field_array(_fields, _index); } + InstanceKlass* field_holder() const { return _constants->pool_holder(); } int init_generic_signature_start_slot() { int length = _fields->length(); @@ -102,6 +105,7 @@ _index = 0; _limit = klass->java_fields_count(); init_generic_signature_start_slot(); + assert(klass == field_holder(), ""); } FieldStreamBase(instanceKlassHandle klass) { _fields = klass->fields(); @@ -109,6 +113,7 @@ _index = 0; _limit = klass->java_fields_count(); init_generic_signature_start_slot(); + assert(klass == field_holder(), ""); } // accessors @@ -180,6 +185,12 @@ return field()->contended_group(); } + // bridge to a heavier API: + fieldDescriptor& field_descriptor() const { + fieldDescriptor& field = const_cast(_fd_buf); + field.reinitialize(field_holder(), _index); + return field; + } }; // Iterate over only the internal fields diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/instanceKlass.cpp --- a/src/share/vm/oops/instanceKlass.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/instanceKlass.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -106,7 +106,7 @@ len = name->utf8_length(); \ } \ HS_DTRACE_PROBE4(hotspot, class__initialization__##type, \ - data, len, (clss)->class_loader(), thread_type); \ + data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type); \ } #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \ @@ -119,7 +119,7 @@ len = name->utf8_length(); \ } \ HS_DTRACE_PROBE5(hotspot, class__initialization__##type, \ - data, len, (clss)->class_loader(), thread_type, wait); \ + data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type, wait); \ } #else /* USDT2 */ @@ -269,7 +269,7 @@ set_fields(NULL, 0); set_constants(NULL); set_class_loader_data(NULL); - set_source_file_name(NULL); + set_source_file_name_index(0); set_source_debug_extension(NULL, 0); set_array_name(NULL); set_inner_classes(NULL); @@ -284,9 +284,8 @@ set_osr_nmethods_head(NULL); set_breakpoints(NULL); init_previous_versions(); - set_generic_signature(NULL); + set_generic_signature_index(0); release_set_methods_jmethod_ids(NULL); - release_set_methods_cached_itable_indices(NULL); set_annotations(NULL); set_jvmti_cached_class_field_map(NULL); set_initial_method_idnum(0); @@ -1149,7 +1148,7 @@ Symbol* f_name = fs.name(); Symbol* f_sig = fs.signature(); if (f_name == name && f_sig == sig) { - fd->initialize(const_cast(this), fs.index()); + fd->reinitialize(const_cast(this), fs.index()); return true; } } @@ -1218,7 +1217,7 @@ bool InstanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const { for (JavaFieldStream fs(this); !fs.done(); fs.next()) { if (fs.offset() == offset) { - fd->initialize(const_cast(this), fs.index()); + fd->reinitialize(const_cast(this), fs.index()); if (fd->is_static() == is_static) return true; } } @@ -1251,8 +1250,7 @@ void InstanceKlass::do_local_static_fields(FieldClosure* cl) { for (JavaFieldStream fs(this); !fs.done(); fs.next()) { if (fs.access_flags().is_static()) { - fieldDescriptor fd; - fd.initialize(this, fs.index()); + fieldDescriptor& fd = fs.field_descriptor(); cl->do_field(&fd); } } @@ -1268,8 +1266,7 @@ void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) { for (JavaFieldStream fs(this_oop()); !fs.done(); fs.next()) { if (fs.access_flags().is_static()) { - fieldDescriptor fd; - fd.initialize(this_oop(), fs.index()); + fieldDescriptor& fd = fs.field_descriptor(); f(&fd, CHECK); } } @@ -1291,7 +1288,7 @@ int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1), mtClass); int j = 0; for (int i = 0; i < length; i += 1) { - fd.initialize(this, i); + fd.reinitialize(this, i); if (!fd.is_static()) { fields_sorted[j + 0] = fd.offset(); fields_sorted[j + 1] = i; @@ -1303,7 +1300,7 @@ // _sort_Fn is defined in growableArray.hpp. qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset); for (int i = 0; i < length; i += 2) { - fd.initialize(this, fields_sorted[i + 1]); + fd.reinitialize(this, fields_sorted[i + 1]); assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields"); cl->do_field(&fd); } @@ -1422,6 +1419,8 @@ } // lookup a method in all the interfaces that this class implements +// Do NOT return private or static methods, new in JDK8 which are not externally visible +// They should only be found in the initial InterfaceMethodRef Method* InstanceKlass::lookup_method_in_all_interfaces(Symbol* name, Symbol* signature) const { Array* all_ifs = transitive_interfaces(); @@ -1430,7 +1429,7 @@ for (int i = 0; i < num_ifs; i++) { ik = InstanceKlass::cast(all_ifs->at(i)); Method* m = ik->lookup_method(name, signature); - if (m != NULL) { + if (m != NULL && m->is_public() && !m->is_static()) { return m; } } @@ -1686,87 +1685,6 @@ } -// Cache an itable index -void InstanceKlass::set_cached_itable_index(size_t idnum, int index) { - int* indices = methods_cached_itable_indices_acquire(); - int* to_dealloc_indices = NULL; - - // We use a double-check locking idiom here because this cache is - // performance sensitive. In the normal system, this cache only - // transitions from NULL to non-NULL which is safe because we use - // release_set_methods_cached_itable_indices() to advertise the - // new cache. A partially constructed cache should never be seen - // by a racing thread. Cache reads and writes proceed without a - // lock, but creation of the cache itself requires no leaks so a - // lock is generally acquired in that case. - // - // If the RedefineClasses() API has been used, then this cache can - // grow and we'll have transitions from non-NULL to bigger non-NULL. - // Cache creation requires no leaks and we require safety between all - // cache accesses and freeing of the old cache so a lock is generally - // acquired when the RedefineClasses() API has been used. - - if (indices == NULL || idnum_can_increment()) { - // we need a cache or the cache can grow - MutexLocker ml(JNICachedItableIndex_lock); - // reacquire the cache to see if another thread already did the work - indices = methods_cached_itable_indices_acquire(); - size_t length = 0; - // cache size is stored in element[0], other elements offset by one - if (indices == NULL || (length = (size_t)indices[0]) <= idnum) { - size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count()); - int* new_indices = NEW_C_HEAP_ARRAY(int, size+1, mtClass); - new_indices[0] = (int)size; - // copy any existing entries - size_t i; - for (i = 0; i < length; i++) { - new_indices[i+1] = indices[i+1]; - } - // Set all the rest to -1 - for (i = length; i < size; i++) { - new_indices[i+1] = -1; - } - if (indices != NULL) { - // We have an old cache to delete so save it for after we - // drop the lock. - to_dealloc_indices = indices; - } - release_set_methods_cached_itable_indices(indices = new_indices); - } - - if (idnum_can_increment()) { - // this cache can grow so we have to write to it safely - indices[idnum+1] = index; - } - } else { - CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); - } - - if (!idnum_can_increment()) { - // The cache cannot grow and this JNI itable index value does not - // have to be unique like a jmethodID. If there is a race to set it, - // it doesn't matter. - indices[idnum+1] = index; - } - - if (to_dealloc_indices != NULL) { - // we allocated a new cache so free the old one - FreeHeap(to_dealloc_indices); - } -} - - -// Retrieve a cached itable index -int InstanceKlass::cached_itable_index(size_t idnum) { - int* indices = methods_cached_itable_indices_acquire(); - if (indices != NULL && ((size_t)indices[0]) > idnum) { - // indices exist and are long enough, retrieve possible cached - return indices[idnum+1]; - } - return -1; -} - - // // Walk the list of dependent nmethods searching for nmethods which // are dependent on the changes that were passed in and mark them for @@ -2326,12 +2244,6 @@ } } - int* indices = methods_cached_itable_indices_acquire(); - if (indices != (int*)NULL) { - release_set_methods_cached_itable_indices(NULL); - FreeHeap(indices); - } - // release dependencies nmethodBucket* b = _dependencies; _dependencies = NULL; @@ -2368,18 +2280,12 @@ // unreference array name derived from this class name (arrays of an unloaded // class can't be referenced anymore). if (_array_name != NULL) _array_name->decrement_refcount(); - if (_source_file_name != NULL) _source_file_name->decrement_refcount(); if (_source_debug_extension != NULL) FREE_C_HEAP_ARRAY(char, _source_debug_extension, mtClass); assert(_total_instanceKlass_count >= 1, "Sanity check"); Atomic::dec(&_total_instanceKlass_count); } -void InstanceKlass::set_source_file_name(Symbol* n) { - _source_file_name = n; - if (_source_file_name != NULL) _source_file_name->increment_refcount(); -} - void InstanceKlass::set_source_debug_extension(char* array, int length) { if (array == NULL) { _source_debug_extension = NULL; @@ -2399,7 +2305,7 @@ } address InstanceKlass::static_field_addr(int offset) { - return (address)(offset + InstanceMirrorKlass::offset_of_static_fields() + (intptr_t)java_mirror()); + return (address)(offset + InstanceMirrorKlass::offset_of_static_fields() + cast_from_oop(java_mirror())); } @@ -2788,6 +2694,18 @@ "allocated", "loaded", "linked", "being_initialized", "fully_initialized", "initialization_error" }; +static void print_vtable(intptr_t* start, int len, outputStream* st) { + for (int i = 0; i < len; i++) { + intptr_t e = start[i]; + st->print("%d : " INTPTR_FORMAT, i, e); + if (e != 0 && ((Metadata*)e)->is_metaspace_object()) { + st->print(" "); + ((Metadata*)e)->print_value_on(st); + } + st->cr(); + } +} + void InstanceKlass::print_on(outputStream* st) const { assert(is_klass(), "must be klass"); Klass::print_on(st); @@ -2822,7 +2740,7 @@ st->print(BULLET"arrays: "); array_klasses()->print_value_on_maybe_null(st); st->cr(); st->print(BULLET"methods: "); methods()->print_value_on(st); st->cr(); - if (Verbose) { + if (Verbose || WizardMode) { Array* method_array = methods(); for(int i = 0; i < method_array->length(); i++) { st->print("%d : ", i); method_array->at(i)->print_value(); st->cr(); @@ -2853,24 +2771,17 @@ st->print(BULLET"field annotations: "); fields_annotations()->print_value_on(st); st->cr(); st->print(BULLET"field type annotations: "); fields_type_annotations()->print_value_on(st); st->cr(); { - ResourceMark rm; - // PreviousVersionInfo objects returned via PreviousVersionWalker - // contain a GrowableArray of handles. We have to clean up the - // GrowableArray _after_ the PreviousVersionWalker destructor - // has destroyed the handles. - { - bool have_pv = false; - PreviousVersionWalker pvw((InstanceKlass*)this); - for (PreviousVersionInfo * pv_info = pvw.next_previous_version(); - pv_info != NULL; pv_info = pvw.next_previous_version()) { - if (!have_pv) - st->print(BULLET"previous version: "); - have_pv = true; - pv_info->prev_constant_pool_handle()()->print_value_on(st); - } - if (have_pv) st->cr(); - } // pvw is cleaned up - } // rm is cleaned up + bool have_pv = false; + PreviousVersionWalker pvw(Thread::current(), (InstanceKlass*)this); + for (PreviousVersionNode * pv_node = pvw.next_previous_version(); + pv_node != NULL; pv_node = pvw.next_previous_version()) { + if (!have_pv) + st->print(BULLET"previous version: "); + have_pv = true; + pv_node->prev_constant_pool()->print_value_on(st); + } + if (have_pv) st->cr(); + } // pvw is cleaned up if (generic_signature() != NULL) { st->print(BULLET"generic signature: "); @@ -2880,7 +2791,9 @@ st->print(BULLET"inner classes: "); inner_classes()->print_value_on(st); st->cr(); st->print(BULLET"java mirror: "); java_mirror()->print_value_on(st); st->cr(); st->print(BULLET"vtable length %d (start addr: " INTPTR_FORMAT ")", vtable_length(), start_of_vtable()); st->cr(); + if (vtable_length() > 0 && (Verbose || WizardMode)) print_vtable(start_of_vtable(), vtable_length(), st); st->print(BULLET"itable length %d (start addr: " INTPTR_FORMAT ")", itable_length(), start_of_itable()); st->cr(); + if (itable_length() > 0 && (Verbose || WizardMode)) print_vtable(start_of_itable(), itable_length(), st); st->print_cr(BULLET"---- static fields (%d words):", static_field_size()); FieldPrinter print_static_field(st); ((InstanceKlass*)this)->do_local_static_fields(&print_static_field); @@ -2902,6 +2815,7 @@ void InstanceKlass::print_value_on(outputStream* st) const { assert(is_klass(), "must be klass"); + if (Verbose || WizardMode) access_flags().print_on(st); name()->print_value_on(st); } @@ -3398,34 +3312,34 @@ Array* old_methods = ikh->methods(); if (cp_ref->on_stack()) { - PreviousVersionNode * pv_node = NULL; - if (emcp_method_count == 0) { + PreviousVersionNode * pv_node = NULL; + if (emcp_method_count == 0) { // non-shared ConstantPool gets a reference - pv_node = new PreviousVersionNode(cp_ref, !cp_ref->is_shared(), NULL); - RC_TRACE(0x00000400, - ("add: all methods are obsolete; flushing any EMCP refs")); - } else { - int local_count = 0; + pv_node = new PreviousVersionNode(cp_ref, NULL); + RC_TRACE(0x00000400, + ("add: all methods are obsolete; flushing any EMCP refs")); + } else { + int local_count = 0; GrowableArray* method_refs = new (ResourceObj::C_HEAP, mtClass) - GrowableArray(emcp_method_count, true); - for (int i = 0; i < old_methods->length(); i++) { - if (emcp_methods->at(i)) { - // this old method is EMCP. Save it only if it's on the stack - Method* old_method = old_methods->at(i); - if (old_method->on_stack()) { - method_refs->append(old_method); + GrowableArray(emcp_method_count, true); + for (int i = 0; i < old_methods->length(); i++) { + if (emcp_methods->at(i)) { + // this old method is EMCP. Save it only if it's on the stack + Method* old_method = old_methods->at(i); + if (old_method->on_stack()) { + method_refs->append(old_method); + } + if (++local_count >= emcp_method_count) { + // no more EMCP methods so bail out now + break; } - if (++local_count >= emcp_method_count) { - // no more EMCP methods so bail out now - break; } } - } // non-shared ConstantPool gets a reference - pv_node = new PreviousVersionNode(cp_ref, !cp_ref->is_shared(), method_refs); + pv_node = new PreviousVersionNode(cp_ref, method_refs); } // append new previous version. - _previous_versions->append(pv_node); + _previous_versions->append(pv_node); } // Since the caller is the VMThread and we are at a safepoint, this @@ -3526,6 +3440,8 @@ return m; } } + // None found, return null for the caller to handle. + return NULL; } return m; } @@ -3542,10 +3458,9 @@ // Construct a PreviousVersionNode entry for the array hung off // the InstanceKlass. PreviousVersionNode::PreviousVersionNode(ConstantPool* prev_constant_pool, - bool prev_cp_is_weak, GrowableArray* prev_EMCP_methods) { + GrowableArray* prev_EMCP_methods) { _prev_constant_pool = prev_constant_pool; - _prev_cp_is_weak = prev_cp_is_weak; _prev_EMCP_methods = prev_EMCP_methods; } @@ -3561,99 +3476,38 @@ } } - -// Construct a PreviousVersionInfo entry -PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) { - _prev_constant_pool_handle = constantPoolHandle(); // NULL handle - _prev_EMCP_method_handles = NULL; - - ConstantPool* cp = pv_node->prev_constant_pool(); - assert(cp != NULL, "constant pool ref was unexpectedly cleared"); - if (cp == NULL) { - return; // robustness - } - - // make the ConstantPool* safe to return - _prev_constant_pool_handle = constantPoolHandle(cp); - - GrowableArray* method_refs = pv_node->prev_EMCP_methods(); - if (method_refs == NULL) { - // the InstanceKlass did not have any EMCP methods - return; - } - - _prev_EMCP_method_handles = new GrowableArray(10); - - int n_methods = method_refs->length(); - for (int i = 0; i < n_methods; i++) { - Method* method = method_refs->at(i); - assert (method != NULL, "method has been cleared"); - if (method == NULL) { - continue; // robustness - } - // make the Method* safe to return - _prev_EMCP_method_handles->append(methodHandle(method)); - } -} - - -// Destroy a PreviousVersionInfo -PreviousVersionInfo::~PreviousVersionInfo() { - // Since _prev_EMCP_method_handles is not C-heap allocated, we - // don't have to delete it. -} - - // Construct a helper for walking the previous versions array -PreviousVersionWalker::PreviousVersionWalker(InstanceKlass *ik) { +PreviousVersionWalker::PreviousVersionWalker(Thread* thread, InstanceKlass *ik) { + _thread = thread; _previous_versions = ik->previous_versions(); _current_index = 0; - // _hm needs no initialization _current_p = NULL; -} - - -// Destroy a PreviousVersionWalker -PreviousVersionWalker::~PreviousVersionWalker() { - // Delete the current info just in case the caller didn't walk to - // the end of the previous versions list. No harm if _current_p is - // already NULL. - delete _current_p; - - // When _hm is destroyed, all the Handles returned in - // PreviousVersionInfo objects will be destroyed. - // Also, after this destructor is finished it will be - // safe to delete the GrowableArray allocated in the - // PreviousVersionInfo objects. + _current_constant_pool_handle = constantPoolHandle(thread, ik->constants()); } // Return the interesting information for the next previous version // of the klass. Returns NULL if there are no more previous versions. -PreviousVersionInfo* PreviousVersionWalker::next_previous_version() { +PreviousVersionNode* PreviousVersionWalker::next_previous_version() { if (_previous_versions == NULL) { // no previous versions so nothing to return return NULL; } - delete _current_p; // cleanup the previous info for the caller - _current_p = NULL; // reset to NULL so we don't delete same object twice + _current_p = NULL; // reset to NULL + _current_constant_pool_handle = NULL; int length = _previous_versions->length(); while (_current_index < length) { PreviousVersionNode * pv_node = _previous_versions->at(_current_index++); - PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP, mtClass) - PreviousVersionInfo(pv_node); - - constantPoolHandle cp_h = pv_info->prev_constant_pool_handle(); - assert (!cp_h.is_null(), "null cp found in previous version"); - - // The caller will need to delete pv_info when they are done with it. - _current_p = pv_info; - return pv_info; + + // Save a handle to the constant pool for this previous version, + // which keeps all the methods from being deallocated. + _current_constant_pool_handle = constantPoolHandle(_thread, pv_node->prev_constant_pool()); + _current_p = pv_node; + return pv_node; } - // all of the underlying nodes' info has been deleted return NULL; } // end next_previous_version() diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/instanceKlass.hpp --- a/src/share/vm/oops/instanceKlass.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/instanceKlass.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -201,14 +201,10 @@ // number_of_inner_classes * 4 + enclosing_method_attribute_size. Array* _inner_classes; - // Name of source file containing this klass, NULL if not specified. - Symbol* _source_file_name; // the source debug extension for this klass, NULL if not specified. // Specified as UTF-8 string without terminating zero byte in the classfile, // it is stored in the instanceklass as a NULL-terminated UTF-8 string char* _source_debug_extension; - // Generic signature, or null if none. - Symbol* _generic_signature; // Array name derived from this class which needs unreferencing // if this class is unloaded. Symbol* _array_name; @@ -217,6 +213,12 @@ // (including inherited fields but after header_size()). int _nonstatic_field_size; int _static_field_size; // number words used by static fields (oop and non-oop) in this klass + // Constant pool index to the utf8 entry of the Generic signature, + // or 0 if none. + u2 _generic_signature_index; + // Constant pool index to the utf8 entry for the name of source file + // containing this klass, 0 if not specified. + u2 _source_file_name_index; u2 _static_oop_field_count;// number of static oop fields in this klass u2 _java_fields_count; // The number of declared Java fields int _nonstatic_oop_map_size;// size in words of nonstatic oop map blocks @@ -243,7 +245,6 @@ MemberNameTable* _member_names; // Member names JNIid* _jni_ids; // First JNI identifier for static fields in this class jmethodID* _methods_jmethod_ids; // jmethodIDs corresponding to method_idnum, or NULL if none - int* _methods_cached_itable_indices; // itable_index cache for JNI invoke corresponding to methods idnum, or NULL nmethodBucket* _dependencies; // list of dependent nmethods nmethod* _osr_nmethods_head; // Head of list of on-stack replacement nmethods for this class BreakpointInfo* _breakpoints; // bpt lists, managed by Method* @@ -570,8 +571,16 @@ } // source file name - Symbol* source_file_name() const { return _source_file_name; } - void set_source_file_name(Symbol* n); + Symbol* source_file_name() const { + return (_source_file_name_index == 0) ? + (Symbol*)NULL : _constants->symbol_at(_source_file_name_index); + } + u2 source_file_name_index() const { + return _source_file_name_index; + } + void set_source_file_name_index(u2 sourcefile_index) { + _source_file_name_index = sourcefile_index; + } // minor and major version numbers of class file u2 minor_version() const { return _minor_version; } @@ -648,8 +657,16 @@ void set_initial_method_idnum(u2 value) { _idnum_allocated_count = value; } // generics support - Symbol* generic_signature() const { return _generic_signature; } - void set_generic_signature(Symbol* sig) { _generic_signature = sig; } + Symbol* generic_signature() const { + return (_generic_signature_index == 0) ? + (Symbol*)NULL : _constants->symbol_at(_generic_signature_index); + } + u2 generic_signature_index() const { + return _generic_signature_index; + } + void set_generic_signature_index(u2 sig_index) { + _generic_signature_index = sig_index; + } u2 enclosing_method_data(int offset); u2 enclosing_method_class_index() { @@ -672,10 +689,6 @@ size_t *length_p, jmethodID* id_p); jmethodID jmethod_id_or_null(Method* method); - // cached itable index support - void set_cached_itable_index(size_t idnum, int index); - int cached_itable_index(size_t idnum); - // annotations support Annotations* annotations() const { return _annotations; } void set_annotations(Annotations* anno) { _annotations = anno; } @@ -976,11 +989,6 @@ void release_set_methods_jmethod_ids(jmethodID* jmeths) { OrderAccess::release_store_ptr(&_methods_jmethod_ids, jmeths); } - int* methods_cached_itable_indices_acquire() const - { return (int*)OrderAccess::load_ptr_acquire(&_methods_cached_itable_indices); } - void release_set_methods_cached_itable_indices(int* indices) - { OrderAccess::release_store_ptr(&_methods_cached_itable_indices, indices); } - // Lock during initialization public: // Lock for (1) initialization; (2) access to the ConstantPool of this class. @@ -1118,21 +1126,11 @@ // A collection point for interesting information about the previous -// version(s) of an InstanceKlass. This class uses weak references to -// the information so that the information may be collected as needed -// by the system. If the information is shared, then a regular -// reference must be used because a weak reference would be seen as -// collectible. A GrowableArray of PreviousVersionNodes is attached -// to the InstanceKlass as needed. See PreviousVersionWalker below. +// version(s) of an InstanceKlass. A GrowableArray of PreviousVersionNodes +// is attached to the InstanceKlass as needed. See PreviousVersionWalker below. class PreviousVersionNode : public CHeapObj { private: - // A shared ConstantPool is never collected so we'll always have - // a reference to it so we can update items in the cache. We'll - // have a weak reference to a non-shared ConstantPool until all - // of the methods (EMCP or obsolete) have been collected; the - // non-shared ConstantPool becomes collectible at that point. - ConstantPool* _prev_constant_pool; // regular or weak reference - bool _prev_cp_is_weak; // true if not a shared ConstantPool + ConstantPool* _prev_constant_pool; // If the previous version of the InstanceKlass doesn't have any // EMCP methods, then _prev_EMCP_methods will be NULL. If all the @@ -1141,8 +1139,8 @@ GrowableArray* _prev_EMCP_methods; public: - PreviousVersionNode(ConstantPool* prev_constant_pool, bool prev_cp_is_weak, - GrowableArray* prev_EMCP_methods); + PreviousVersionNode(ConstantPool* prev_constant_pool, + GrowableArray* prev_EMCP_methods); ~PreviousVersionNode(); ConstantPool* prev_constant_pool() const { return _prev_constant_pool; @@ -1153,59 +1151,26 @@ }; -// A Handle-ized version of PreviousVersionNode. -class PreviousVersionInfo : public ResourceObj { - private: - constantPoolHandle _prev_constant_pool_handle; - // If the previous version of the InstanceKlass doesn't have any - // EMCP methods, then _prev_EMCP_methods will be NULL. Since the - // methods cannot be collected while we hold a handle, - // _prev_EMCP_methods should never have a length of zero. - GrowableArray* _prev_EMCP_method_handles; - -public: - PreviousVersionInfo(PreviousVersionNode *pv_node); - ~PreviousVersionInfo(); - constantPoolHandle prev_constant_pool_handle() const { - return _prev_constant_pool_handle; - } - GrowableArray* prev_EMCP_method_handles() const { - return _prev_EMCP_method_handles; - } -}; - - -// Helper object for walking previous versions. This helper cleans up -// the Handles that it allocates when the helper object is destroyed. -// The PreviousVersionInfo object returned by next_previous_version() -// is only valid until a subsequent call to next_previous_version() or -// the helper object is destroyed. +// Helper object for walking previous versions. class PreviousVersionWalker : public StackObj { private: + Thread* _thread; GrowableArray* _previous_versions; int _current_index; - // Fields for cleaning up when we are done walking the previous versions: - // A HandleMark for the PreviousVersionInfo handles: - HandleMark _hm; + + // A pointer to the current node object so we can handle the deletes. + PreviousVersionNode* _current_p; - // It would be nice to have a ResourceMark field in this helper also, - // but the ResourceMark code says to be careful to delete handles held - // in GrowableArrays _before_ deleting the GrowableArray. Since we - // can't guarantee the order in which the fields are destroyed, we - // have to let the creator of the PreviousVersionWalker object do - // the right thing. Also, adding a ResourceMark here causes an - // include loop. - - // A pointer to the current info object so we can handle the deletes. - PreviousVersionInfo * _current_p; + // The constant pool handle keeps all the methods in this class from being + // deallocated from the metaspace during class unloading. + constantPoolHandle _current_constant_pool_handle; public: - PreviousVersionWalker(InstanceKlass *ik); - ~PreviousVersionWalker(); + PreviousVersionWalker(Thread* thread, InstanceKlass *ik); // Return the interesting information for the next previous version // of the klass. Returns NULL if there are no more previous versions. - PreviousVersionInfo* next_previous_version(); + PreviousVersionNode* next_previous_version(); }; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/instanceMirrorKlass.hpp --- a/src/share/vm/oops/instanceMirrorKlass.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/instanceMirrorKlass.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,7 +66,7 @@ // Static field offset is an offset into the Heap, should be converted by // based on UseCompressedOop for traversal static HeapWord* start_of_static_fields(oop obj) { - return (HeapWord*)((intptr_t)obj + offset_of_static_fields()); + return (HeapWord*)(cast_from_oop(obj) + offset_of_static_fields()); } static void init_offset_of_static_fields() { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/instanceOop.hpp --- a/src/share/vm/oops/instanceOop.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/instanceOop.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -37,9 +37,9 @@ // If compressed, the offset of the fields of the instance may not be aligned. static int base_offset_in_bytes() { - // offset computation code breaks if UseCompressedKlassPointers + // offset computation code breaks if UseCompressedClassPointers // only is true - return (UseCompressedOops && UseCompressedKlassPointers) ? + return (UseCompressedOops && UseCompressedClassPointers) ? klass_gap_offset_in_bytes() : sizeof(instanceOopDesc); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/instanceRefKlass.cpp --- a/src/share/vm/oops/instanceRefKlass.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/instanceRefKlass.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,7 +51,7 @@ T heap_oop = oopDesc::load_heap_oop(referent_addr); debug_only( if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj); + gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (void *)obj); } ) if (!oopDesc::is_null(heap_oop)) { @@ -62,7 +62,7 @@ ref->InstanceKlass::oop_follow_contents(obj); debug_only( if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, obj); + gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, (void *)obj); } ) return; @@ -70,7 +70,7 @@ // treat referent as normal oop debug_only( if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, obj); + gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, (void *)obj); } ) MarkSweep::mark_and_push(referent_addr); @@ -130,7 +130,7 @@ T heap_oop = oopDesc::load_heap_oop(referent_addr); debug_only( if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj); + gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (void *)obj); } ) if (!oopDesc::is_null(heap_oop)) { @@ -142,7 +142,7 @@ ref->InstanceKlass::oop_follow_contents(cm, obj); debug_only( if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, obj); + gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, (void *)obj); } ) return; @@ -150,7 +150,7 @@ // treat referent as normal oop debug_only( if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, obj); + gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, (void *)obj); } ) PSParallelCompact::mark_and_push(cm, referent_addr); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/klass.cpp --- a/src/share/vm/oops/klass.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/klass.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -139,7 +139,7 @@ return NULL; } -void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) { +void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw() { return Metaspace::allocate(loader_data, word_size, /*read_only*/false, MetaspaceObj::ClassType, CHECK_NULL); } @@ -674,13 +674,23 @@ #ifndef PRODUCT -void Klass::verify_vtable_index(int i) { +bool Klass::verify_vtable_index(int i) { if (oop_is_instance()) { - assert(i>=0 && i<((InstanceKlass*)this)->vtable_length()/vtableEntry::size(), "index out of bounds"); + int limit = ((InstanceKlass*)this)->vtable_length()/vtableEntry::size(); + assert(i >= 0 && i < limit, err_msg("index %d out of bounds %d", i, limit)); } else { assert(oop_is_array(), "Must be"); - assert(i>=0 && i<((ArrayKlass*)this)->vtable_length()/vtableEntry::size(), "index out of bounds"); + int limit = ((ArrayKlass*)this)->vtable_length()/vtableEntry::size(); + assert(i >= 0 && i < limit, err_msg("index %d out of bounds %d", i, limit)); } + return true; +} + +bool Klass::verify_itable_index(int i) { + assert(oop_is_instance(), ""); + int method_count = klassItable::method_count_for_interface(this); + assert(i >= 0 && i < method_count, "index out of bounds"); + return true; } #endif diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/klass.hpp --- a/src/share/vm/oops/klass.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/klass.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -180,7 +180,7 @@ // Constructor Klass(); - void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS); + void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw(); public: bool is_klass() const volatile { return true; } @@ -357,7 +357,8 @@ static int layout_helper_log2_element_size(jint lh) { assert(lh < (jint)_lh_neutral_value, "must be array"); int l2esz = (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask; - assert(l2esz <= LogBitsPerLong, "sanity"); + assert(l2esz <= LogBitsPerLong, + err_msg("sanity. l2esz: 0x%x for lh: 0x%x", (uint)l2esz, (uint)lh)); return l2esz; } static jint array_layout_helper(jint tag, int hsize, BasicType etype, int log2_esize) { @@ -700,7 +701,8 @@ void verify(bool check_dictionary = true) { verify_on(tty, check_dictionary); } #ifndef PRODUCT - void verify_vtable_index(int index); + bool verify_vtable_index(int index); + bool verify_itable_index(int index); #endif virtual void oop_verify_on(oop obj, outputStream* st); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/klassVtable.cpp --- a/src/share/vm/oops/klassVtable.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/klassVtable.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -47,11 +47,12 @@ // this function computes the vtable size (including the size needed for miranda -// methods) and the number of miranda methods in this class +// methods) and the number of miranda methods in this class. // Note on Miranda methods: Let's say there is a class C that implements -// interface I. Let's say there is a method m in I that neither C nor any -// of its super classes implement (i.e there is no method of any access, with -// the same name and signature as m), then m is a Miranda method which is +// interface I, and none of C's superclasses implements I. +// Let's say there is an abstract method m in I that neither C +// nor any of its super classes implement (i.e there is no method of any access, +// with the same name and signature as m), then m is a Miranda method which is // entered as a public abstract method in C's vtable. From then on it should // treated as any other public method in C for method over-ride purposes. void klassVtable::compute_vtable_size_and_num_mirandas( @@ -111,10 +112,13 @@ } int klassVtable::index_of(Method* m, int len) const { - assert(m->vtable_index() >= 0, "do not ask this of non-vtable methods"); + assert(m->has_vtable_index(), "do not ask this of non-vtable methods"); return m->vtable_index(); } +// Copy super class's vtable to the first part (prefix) of this class's vtable, +// and return the number of entries copied. Expects that 'super' is the Java +// super class (arrays can have "array" super classes that must be skipped). int klassVtable::initialize_from_super(KlassHandle super) { if (super.is_null()) { return 0; @@ -139,14 +143,14 @@ } } -// Revised lookup semantics introduced 1.3 (Kestral beta) +// +// Revised lookup semantics introduced 1.3 (Kestrel beta) void klassVtable::initialize_vtable(bool checkconstraints, TRAPS) { // Note: Arrays can have intermediate array supers. Use java_super to skip them. KlassHandle super (THREAD, klass()->java_super()); int nofNewEntries = 0; - if (PrintVtables && !klass()->oop_is_array()) { ResourceMark rm(THREAD); tty->print_cr("Initializing: %s", _klass->name()->as_C_string()); @@ -174,8 +178,10 @@ int len = methods->length(); int initialized = super_vtable_len; - // update_inherited_vtable can stop for gc - ensure using handles + // Check each of this class's methods against super; + // if override, replace in copy of super vtable, otherwise append to end for (int i = 0; i < len; i++) { + // update_inherited_vtable can stop for gc - ensure using handles HandleMark hm(THREAD); assert(methods->at(i)->is_method(), "must be a Method*"); methodHandle mh(THREAD, methods->at(i)); @@ -189,11 +195,11 @@ } } - // add miranda methods; it will also update the value of initialized - fill_in_mirandas(&initialized); + // add miranda methods to end of vtable. + initialized = fill_in_mirandas(initialized); // In class hierarchies where the accessibility is not increasing (i.e., going from private -> - // package_private -> publicprotected), the vtable might actually be smaller than our initial + // package_private -> public/protected), the vtable might actually be smaller than our initial // calculation. assert(initialized <= _length, "vtable initialization failed"); for(;initialized < _length; initialized++) { @@ -248,14 +254,8 @@ return superk; } -// Methods that are "effectively" final don't need vtable entries. -bool method_is_effectively_final( - AccessFlags klass_flags, methodHandle target) { - return target->is_final() || klass_flags.is_final() && !target->is_overpass(); -} - // Update child's copy of super vtable for overrides -// OR return true if a new vtable entry is required +// OR return true if a new vtable entry is required. // Only called for InstanceKlass's, i.e. not for arrays // If that changed, could not use _klass as handle for klass bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len, @@ -263,6 +263,7 @@ ResourceMark rm; bool allocate_new = true; assert(klass->oop_is_instance(), "must be InstanceKlass"); + assert(klass == target_method()->method_holder(), "caller resp."); // Initialize the method's vtable index to "nonvirtual". // If we allocate a vtable entry, we will update it to a non-negative number. @@ -273,11 +274,17 @@ return false; } - if (method_is_effectively_final(klass->access_flags(), target_method)) { + if (target_method->is_final_method(klass->access_flags())) { // a final method never needs a new entry; final methods can be statically // resolved and they have to be present in the vtable only if they override // a super's method, in which case they re-use its entry allocate_new = false; + } else if (klass->is_interface()) { + allocate_new = false; // see note below in needs_new_vtable_entry + // An interface never allocates new vtable slots, only inherits old ones. + // This method will either be assigned its own itable index later, + // or be assigned an inherited vtable index in the loop below. + target_method()->set_vtable_index(Method::pending_itable_index); } // we need a new entry if there is no superclass @@ -285,9 +292,10 @@ return allocate_new; } - // private methods always have a new entry in the vtable + // private methods in classes always have a new entry in the vtable // specification interpretation since classic has // private methods not overriding + // JDK8 adds private methods in interfaces which require invokespecial if (target_method()->is_private()) { return allocate_new; } @@ -411,8 +419,14 @@ Symbol* classname, AccessFlags class_flags, TRAPS) { + if (class_flags.is_interface()) { + // Interfaces do not use vtables, so there is no point to assigning + // a vtable index to any of their methods. If we refrain from doing this, + // we can use Method::_vtable_index to hold the itable index + return false; + } - if (method_is_effectively_final(class_flags, target_method) || + if (target_method->is_final_method(class_flags) || // a final method never needs a new entry; final methods can be statically // resolved and they have to be present in the vtable only if they override // a super's method, in which case they re-use its entry @@ -429,9 +443,10 @@ return true; } - // private methods always have a new entry in the vtable + // private methods in classes always have a new entry in the vtable // specification interpretation since classic has // private methods not overriding + // JDK8 adds private methods in interfaces which require invokespecial if (target_method()->is_private()) { return true; } @@ -500,13 +515,14 @@ return Method::invalid_vtable_index; } -// check if an entry is miranda +// check if an entry at an index is miranda +// requires that method m at entry be declared ("held") by an interface. bool klassVtable::is_miranda_entry_at(int i) { Method* m = method_at(i); Klass* method_holder = m->method_holder(); InstanceKlass *mhk = InstanceKlass::cast(method_holder); - // miranda methods are interface methods in a class's vtable + // miranda methods are public abstract instance interface methods in a class's vtable if (mhk->is_interface()) { assert(m->is_public(), "should be public"); assert(ik()->implements_interface(method_holder) , "this class should implement the interface"); @@ -516,8 +532,12 @@ return false; } -// check if a method is a miranda method, given a class's methods table and it's super +// check if a method is a miranda method, given a class's methods table and its super +// "miranda" means not static, not defined by this class, and not defined +// in super unless it is private and therefore inaccessible to this class. // the caller must make sure that the method belongs to an interface implemented by the class +// Miranda methods only include public interface instance methods +// Not private methods, not static methods, not default = concrete abstract bool klassVtable::is_miranda(Method* m, Array* class_methods, Klass* super) { if (m->is_static()) { return false; @@ -541,6 +561,14 @@ return false; } +// Scans current_interface_methods for miranda methods that do not +// already appear in new_mirandas and are also not defined-and-non-private +// in super (superclass). These mirandas are added to all_mirandas if it is +// not null; in addition, those that are not duplicates of miranda methods +// inherited by super from its interfaces are added to new_mirandas. +// Thus, new_mirandas will be the set of mirandas that this class introduces, +// all_mirandas will be the set of all mirandas applicable to this class +// including all defined in superclasses. void klassVtable::add_new_mirandas_to_lists( GrowableArray* new_mirandas, GrowableArray* all_mirandas, Array* current_interface_methods, Array* class_methods, @@ -599,17 +627,22 @@ } } -// fill in mirandas -void klassVtable::fill_in_mirandas(int* initialized) { +// Discover miranda methods ("miranda" = "interface abstract, no binding"), +// and append them into the vtable starting at index initialized, +// return the new value of initialized. +int klassVtable::fill_in_mirandas(int initialized) { GrowableArray mirandas(20); get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(), ik()->local_interfaces()); for (int i = 0; i < mirandas.length(); i++) { - put_method_at(mirandas.at(i), *initialized); - ++(*initialized); + put_method_at(mirandas.at(i), initialized); + ++initialized; } + return initialized; } +// Copy this class's vtable to the vtable beginning at start. +// Used to copy superclass vtable to prefix of subclass's vtable. void klassVtable::copy_vtable_to(vtableEntry* start) { Copy::disjoint_words((HeapWord*)table(), (HeapWord*)start, _length * vtableEntry::size()); } @@ -723,6 +756,12 @@ // Initialization void klassItable::initialize_itable(bool checkconstraints, TRAPS) { + if (_klass->is_interface()) { + // This needs to go after vtable indexes are assigned but + // before implementors need to know the number of itable indexes. + assign_itable_indexes_for_interface(_klass()); + } + // Cannot be setup doing bootstrapping, interfaces don't have // itables, and klass with only ones entry have empty itables if (Universe::is_bootstrapping() || @@ -754,45 +793,89 @@ } +inline bool interface_method_needs_itable_index(Method* m) { + if (m->is_static()) return false; // e.g., Stream.empty + if (m->is_initializer()) return false; // or + // If an interface redeclares a method from java.lang.Object, + // it should already have a vtable index, don't touch it. + // e.g., CharSequence.toString (from initialize_vtable) + // if (m->has_vtable_index()) return false; // NO! + return true; +} + +int klassItable::assign_itable_indexes_for_interface(Klass* klass) { + // an interface does not have an itable, but its methods need to be numbered + if (TraceItables) tty->print_cr("%3d: Initializing itable for interface %s", ++initialize_count, + klass->name()->as_C_string()); + Array* methods = InstanceKlass::cast(klass)->methods(); + int nof_methods = methods->length(); + int ime_num = 0; + for (int i = 0; i < nof_methods; i++) { + Method* m = methods->at(i); + if (interface_method_needs_itable_index(m)) { + assert(!m->is_final_method(), "no final interface methods"); + // If m is already assigned a vtable index, do not disturb it. + if (!m->has_vtable_index()) { + assert(m->vtable_index() == Method::pending_itable_index, "set by initialize_vtable"); + m->set_itable_index(ime_num); + // Progress to next itable entry + ime_num++; + } + } + } + assert(ime_num == method_count_for_interface(klass), "proper sizing"); + return ime_num; +} + +int klassItable::method_count_for_interface(Klass* interf) { + assert(interf->oop_is_instance(), "must be"); + assert(interf->is_interface(), "must be"); + Array* methods = InstanceKlass::cast(interf)->methods(); + int nof_methods = methods->length(); + while (nof_methods > 0) { + Method* m = methods->at(nof_methods-1); + if (m->has_itable_index()) { + int length = m->itable_index() + 1; +#ifdef ASSERT + while (nof_methods = 0) { + m = methods->at(--nof_methods); + assert(!m->has_itable_index() || m->itable_index() < length, ""); + } +#endif //ASSERT + return length; // return the rightmost itable index, plus one + } + nof_methods -= 1; + } + // no methods have itable indexes + return 0; +} + + void klassItable::initialize_itable_for_interface(int method_table_offset, KlassHandle interf_h, bool checkconstraints, TRAPS) { Array* methods = InstanceKlass::cast(interf_h())->methods(); int nof_methods = methods->length(); HandleMark hm; - KlassHandle klass = _klass; assert(nof_methods > 0, "at least one method must exist for interface to be in vtable"); Handle interface_loader (THREAD, InstanceKlass::cast(interf_h())->class_loader()); - int ime_num = 0; - // Skip first Method* if it is a class initializer - int i = methods->at(0)->is_static_initializer() ? 1 : 0; - - // m, method_name, method_signature, klass reset each loop so they - // don't need preserving across check_signature_loaders call - // methods needs a handle in case of gc from check_signature_loaders - for(; i < nof_methods; i++) { + int ime_count = method_count_for_interface(interf_h()); + for (int i = 0; i < nof_methods; i++) { Method* m = methods->at(i); - Symbol* method_name = m->name(); - Symbol* method_signature = m->signature(); - - // This is same code as in Linkresolver::lookup_instance_method_in_klasses - Method* target = klass->uncached_lookup_method(method_name, method_signature); - while (target != NULL && target->is_static()) { - // continue with recursive lookup through the superclass - Klass* super = target->method_holder()->super(); - target = (super == NULL) ? (Method*)NULL : super->uncached_lookup_method(method_name, method_signature); + methodHandle target; + if (m->has_itable_index()) { + LinkResolver::lookup_instance_method_in_klasses(target, _klass, m->name(), m->signature(), CHECK); } if (target == NULL || !target->is_public() || target->is_abstract()) { // Entry do not resolve. Leave it empty } else { // Entry did resolve, check loader constraints before initializing // if checkconstraints requested - methodHandle target_h (THREAD, target); // preserve across gc if (checkconstraints) { Handle method_holder_loader (THREAD, target->method_holder()->class_loader()); if (method_holder_loader() != interface_loader()) { ResourceMark rm(THREAD); Symbol* failed_type_symbol = - SystemDictionary::check_signature_loaders(method_signature, + SystemDictionary::check_signature_loaders(m->signature(), method_holder_loader, interface_loader, true, CHECK); @@ -803,9 +886,9 @@ "and the class loader (instance of %s) for interface " "%s have different Class objects for the type %s " "used in the signature"; - char* sig = target_h()->name_and_sig_as_C_string(); + char* sig = target()->name_and_sig_as_C_string(); const char* loader1 = SystemDictionary::loader_name(method_holder_loader()); - char* current = klass->name()->as_C_string(); + char* current = _klass->name()->as_C_string(); const char* loader2 = SystemDictionary::loader_name(interface_loader()); char* iface = InstanceKlass::cast(interf_h())->name()->as_C_string(); char* failed_type_name = failed_type_symbol->as_C_string(); @@ -821,10 +904,10 @@ } // ime may have moved during GC so recalculate address - itableOffsetEntry::method_entry(_klass(), method_table_offset)[ime_num].initialize(target_h()); + int ime_num = m->itable_index(); + assert(ime_num < ime_count, "oob"); + itableOffsetEntry::method_entry(_klass(), method_table_offset)[ime_num].initialize(target()); } - // Progress to next entry - ime_num++; } } @@ -913,20 +996,22 @@ virtual void doit(Klass* intf, int method_count) = 0; }; -// Visit all interfaces with at-least one method (excluding ) +// Visit all interfaces with at least one itable method void visit_all_interfaces(Array* transitive_intf, InterfaceVisiterClosure *blk) { // Handle array argument for(int i = 0; i < transitive_intf->length(); i++) { Klass* intf = transitive_intf->at(i); assert(intf->is_interface(), "sanity check"); - // Find no. of methods excluding a - int method_count = InstanceKlass::cast(intf)->methods()->length(); - if (method_count > 0) { - Method* m = InstanceKlass::cast(intf)->methods()->at(0); - assert(m != NULL && m->is_method(), "sanity check"); - if (m->name() == vmSymbols::object_initializer_name()) { - method_count--; + // Find no. of itable methods + int method_count = 0; + // method_count = klassItable::method_count_for_interface(intf); + Array* methods = InstanceKlass::cast(intf)->methods(); + if (methods->length() > 0) { + for (int i = methods->length(); --i >= 0; ) { + if (interface_method_needs_itable_index(methods->at(i))) { + method_count++; + } } } @@ -1024,40 +1109,26 @@ } -// m must be a method in an interface -int klassItable::compute_itable_index(Method* m) { - InstanceKlass* intf = m->method_holder(); - assert(intf->is_interface(), "sanity check"); - Array* methods = intf->methods(); - int index = 0; - while(methods->at(index) != m) { - index++; - assert(index < methods->length(), "should find index for resolve_invoke"); - } - // Adjust for , which is left out of table if first method - if (methods->length() > 0 && methods->at(0)->is_static_initializer()) { - index--; - } - return index; -} - - -// inverse to compute_itable_index +// inverse to itable_index Method* klassItable::method_for_itable_index(Klass* intf, int itable_index) { assert(InstanceKlass::cast(intf)->is_interface(), "sanity check"); + assert(intf->verify_itable_index(itable_index), ""); Array* methods = InstanceKlass::cast(intf)->methods(); + if (itable_index < 0 || itable_index >= method_count_for_interface(intf)) + return NULL; // help caller defend against bad indexes + int index = itable_index; - // Adjust for , which is left out of table if first method - if (methods->length() > 0 && methods->at(0)->is_static_initializer()) { - index++; + Method* m = methods->at(index); + int index2 = -1; + while (!m->has_itable_index() || + (index2 = m->itable_index()) != itable_index) { + assert(index2 < itable_index, "monotonic"); + if (++index == methods->length()) + return NULL; + m = methods->at(index); } - - if (itable_index < 0 || index >= methods->length()) - return NULL; // help caller defend against bad indexes - - Method* m = methods->at(index); - assert(compute_itable_index(m) == itable_index, "correct inverse"); + assert(m->itable_index() == itable_index, "correct inverse"); return m; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/klassVtable.hpp --- a/src/share/vm/oops/klassVtable.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/klassVtable.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -124,7 +124,7 @@ // support for miranda methods bool is_miranda_entry_at(int i); - void fill_in_mirandas(int* initialized); + int fill_in_mirandas(int initialized); static bool is_miranda(Method* m, Array* class_methods, Klass* super); static void add_new_mirandas_to_lists( GrowableArray* new_mirandas, @@ -150,6 +150,8 @@ // from_compiled_code_entry_point -> nmethod entry point // from_interpreter_entry_point -> i2cadapter class vtableEntry VALUE_OBJ_CLASS_SPEC { + friend class VMStructs; + public: // size in words static int size() { @@ -288,12 +290,12 @@ #endif // INCLUDE_JVMTI // Setup of itable + static int assign_itable_indexes_for_interface(Klass* klass); + static int method_count_for_interface(Klass* klass); static int compute_itable_size(Array* transitive_interfaces); static void setup_itable_offset_table(instanceKlassHandle klass); // Resolving of method to index - static int compute_itable_index(Method* m); - // ...and back again: static Method* method_for_itable_index(Klass* klass, int itable_index); // Debugging/Statistics diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/method.cpp --- a/src/share/vm/oops/method.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/method.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -509,24 +509,31 @@ return _access_flags.has_loops(); } - -bool Method::is_final_method() const { - // %%% Should return true for private methods also, - // since there is no way to override them. - return is_final() || method_holder()->is_final(); +bool Method::is_final_method(AccessFlags class_access_flags) const { + // or "does_not_require_vtable_entry" + // overpass can occur, is not final (reuses vtable entry) + // private methods get vtable entries for backward class compatibility. + if (is_overpass()) return false; + return is_final() || class_access_flags.is_final(); } - -bool Method::is_strict_method() const { - return is_strict(); +bool Method::is_final_method() const { + return is_final_method(method_holder()->access_flags()); } - -bool Method::can_be_statically_bound() const { - if (is_final_method()) return true; +bool Method::can_be_statically_bound(AccessFlags class_access_flags) const { + if (is_final_method(class_access_flags)) return true; +#ifdef ASSERT + bool is_nonv = (vtable_index() == nonvirtual_vtable_index); + if (class_access_flags.is_interface()) assert(is_nonv == is_static(), err_msg("is_nonv=%s", is_nonv)); +#endif + assert(valid_vtable_index() || valid_itable_index(), "method must be linked before we ask this question"); return vtable_index() == nonvirtual_vtable_index; } +bool Method::can_be_statically_bound() const { + return can_be_statically_bound(method_holder()->access_flags()); +} bool Method::is_accessor() const { if (code_size() != 5) return false; @@ -720,11 +727,25 @@ } } +bool Method::is_always_compilable() const { + // Generated adapters must be compiled + if (is_method_handle_intrinsic()) { + bool is_executeCompiled = intrinsic_id() == vmIntrinsics::_CompilerToVMImpl_executeCompiledMethod); + if (is_synthetic() || is_executeCompiled) { + assert(!is_not_c1_compilable() || is_executeCompiled, "sanity check"); + assert(!is_not_c2_compilable() || is_executeCompiled, "sanity check"); + return true; + } + } + + return false; +} + bool Method::is_not_compilable(int comp_level) const { if (number_of_breakpoints() > 0) return true; - if (is_method_handle_intrinsic()) - return !is_synthetic() && intrinsic_id() != vmIntrinsics::_CompilerToVMImpl_executeCompiledMethod; // the generated adapters must be compiled + if (is_always_compilable()) + return false; if (comp_level == CompLevel_any) return is_not_c1_compilable() || is_not_c2_compilable(); if (is_c1_compile(comp_level)) @@ -736,6 +757,10 @@ // call this when compiler finds that this method is not compilable void Method::set_not_compilable(int comp_level, bool report, const char* reason) { + if (is_always_compilable()) { + // Don't mark a method which should be always compilable + return; + } print_made_not_compilable(comp_level, /*is_osr*/ false, report, reason); if (comp_level == CompLevel_all) { set_not_c1_compilable(); @@ -747,6 +772,7 @@ set_not_c2_compilable(); } CompilationPolicy::policy()->disable_compilation(this); + assert(!CompilationPolicy::can_be_compiled(this, comp_level), "sanity check"); } bool Method::is_not_osr_compilable(int comp_level) const { @@ -773,6 +799,7 @@ set_not_c2_osr_compilable(); } CompilationPolicy::policy()->disable_compilation(this); + assert(!CompilationPolicy::can_be_osr_compiled(this, comp_level), "sanity check"); } // Revert to using the interpreter and clear out the nmethod @@ -885,16 +912,6 @@ // This function must not hit a safepoint! address Method::verified_code_entry() { debug_only(No_Safepoint_Verifier nsv;) - nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code); - if (code == NULL && UseCodeCacheFlushing) { - nmethod *saved_code = CodeCache::reanimate_saved_code(this); - if (saved_code != NULL) { - methodHandle method(this); - assert( ! saved_code->is_osr_method(), "should not get here for osr" ); - set_code( method, saved_code ); - } - } - assert(_from_compiled_entry != NULL, "must be set"); return _from_compiled_entry; } @@ -958,7 +975,7 @@ assert(ik->is_subclass_of(method_holder()), "should be subklass"); assert(ik->vtable() != NULL, "vtable should exist"); - if (vtable_index() == nonvirtual_vtable_index) { + if (!has_vtable_index()) { return false; } else { Method* vt_m = ik->method_at_vtable(vtable_index()); @@ -1950,7 +1967,7 @@ void Method::print_value_on(outputStream* st) const { assert(is_method(), "must be method"); - st->print_cr(internal_name()); + st->print(internal_name()); print_address_on(st); st->print(" "); name()->print_value_on(st); @@ -1958,6 +1975,7 @@ signature()->print_value_on(st); st->print(" in "); method_holder()->print_value_on(st); + if (WizardMode) st->print("#%d", _vtable_index); if (WizardMode) st->print("[%d,%d]", size_of_parameters(), max_locals()); if (WizardMode && code() != NULL) st->print(" ((nmethod*)%p)", code()); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/method.hpp --- a/src/share/vm/oops/method.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/method.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -448,16 +448,22 @@ enum VtableIndexFlag { // Valid vtable indexes are non-negative (>= 0). // These few negative values are used as sentinels. - highest_unused_vtable_index_value = -5, + itable_index_max = -10, // first itable index, growing downward + pending_itable_index = -9, // itable index will be assigned invalid_vtable_index = -4, // distinct from any valid vtable index garbage_vtable_index = -3, // not yet linked; no vtable layout yet nonvirtual_vtable_index = -2 // there is no need for vtable dispatch // 6330203 Note: Do not use -1, which was overloaded with many meanings. }; DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; }) - int vtable_index() const { assert(valid_vtable_index(), ""); - return _vtable_index; } + bool has_vtable_index() const { return _vtable_index >= 0; } + int vtable_index() const { return _vtable_index; } void set_vtable_index(int index) { _vtable_index = index; } + DEBUG_ONLY(bool valid_itable_index() const { return _vtable_index <= pending_itable_index; }) + bool has_itable_index() const { return _vtable_index <= itable_index_max; } + int itable_index() const { assert(valid_itable_index(), ""); + return itable_index_max - _vtable_index; } + void set_itable_index(int index) { _vtable_index = itable_index_max - index; assert(valid_itable_index(), ""); } // interpreter entry address interpreter_entry() const { return _i2i_entry; } @@ -560,10 +566,11 @@ // checks method and its method holder bool is_final_method() const; - bool is_strict_method() const; + bool is_final_method(AccessFlags class_access_flags) const; // true if method needs no dynamic dispatch (final and/or no vtable entry) bool can_be_statically_bound() const; + bool can_be_statically_bound(AccessFlags class_access_flags) const; // returns true if the method has any backward branches. bool has_loops() { @@ -740,10 +747,6 @@ // so handles are not used to avoid deadlock. jmethodID find_jmethod_id_or_null() { return method_holder()->jmethod_id_or_null(this); } - // JNI static invoke cached itable index accessors - int cached_itable_index() { return method_holder()->cached_itable_index(method_idnum()); } - void set_cached_itable_index(int index) { method_holder()->set_cached_itable_index(method_idnum(), index); } - // Support for inlining of intrinsic methods vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; } void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u1) id; } @@ -796,6 +799,7 @@ void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) { set_not_osr_compilable(comp_level, false); } + bool is_always_compilable() const; private: void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/methodData.hpp --- a/src/share/vm/oops/methodData.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/methodData.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -72,6 +72,8 @@ // // Overlay for generic profiling data. class DataLayout VALUE_OBJ_CLASS_SPEC { + friend class VMStructs; + private: // Every data layout begins with a header. This header // contains a tag, which is used to indicate the size/layout @@ -331,10 +333,10 @@ return (int)data()->cell_at(index); } void set_oop_at(int index, oop value) { - set_intptr_at(index, (intptr_t) value); + set_intptr_at(index, cast_from_oop(value)); } oop oop_at(int index) { - return (oop)intptr_at(index); + return cast_to_oop(intptr_at(index)); } void set_flag_at(int flag_number) { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/oop.inline.hpp --- a/src/share/vm/oops/oop.inline.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/oop.inline.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -69,7 +69,7 @@ } inline Klass* oopDesc::klass() const { - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { return Klass::decode_klass_not_null(_metadata._compressed_klass); } else { return _metadata._klass; @@ -78,7 +78,7 @@ inline Klass* oopDesc::klass_or_null() const volatile { // can be NULL in CMS - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { return Klass::decode_klass(_metadata._compressed_klass); } else { return _metadata._klass; @@ -86,19 +86,19 @@ } inline int oopDesc::klass_gap_offset_in_bytes() { - assert(UseCompressedKlassPointers, "only applicable to compressed klass pointers"); + assert(UseCompressedClassPointers, "only applicable to compressed klass pointers"); return oopDesc::klass_offset_in_bytes() + sizeof(narrowKlass); } inline Klass** oopDesc::klass_addr() { // Only used internally and with CMS and will not work with // UseCompressedOops - assert(!UseCompressedKlassPointers, "only supported with uncompressed klass pointers"); + assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers"); return (Klass**) &_metadata._klass; } inline narrowKlass* oopDesc::compressed_klass_addr() { - assert(UseCompressedKlassPointers, "only called by compressed klass pointers"); + assert(UseCompressedClassPointers, "only called by compressed klass pointers"); return &_metadata._compressed_klass; } @@ -106,7 +106,7 @@ // since klasses are promoted no store check is needed assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*"); assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*"); - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { *compressed_klass_addr() = Klass::encode_klass_not_null(k); } else { *klass_addr() = k; @@ -118,7 +118,7 @@ } inline void oopDesc::set_klass_gap(int v) { - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v; } } @@ -126,7 +126,7 @@ inline void oopDesc::set_klass_to_list_ptr(oop k) { // This is only to be used during GC, for from-space objects, so no // barrier is needed. - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k); // may be null (parnew overflow handling) } else { _metadata._klass = (Klass*)(address)k; @@ -135,7 +135,7 @@ inline oop oopDesc::list_ptr_from_klass() { // This is only to be used during GC, for from-space objects. - if (UseCompressedKlassPointers) { + if (UseCompressedClassPointers) { return decode_heap_oop((narrowOop)_metadata._compressed_klass); } else { // Special case for GC @@ -183,7 +183,7 @@ // in inner GC loops so these are separated. inline bool check_obj_alignment(oop obj) { - return (intptr_t)obj % MinObjAlignmentInBytes == 0; + return cast_from_oop(obj) % MinObjAlignmentInBytes == 0; } inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/oopsHierarchy.hpp --- a/src/share/vm/oops/oopsHierarchy.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/oopsHierarchy.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -55,11 +55,16 @@ // to and from the underlying oopDesc pointer type. // // Because oop and its subclasses Oop are class types, arbitrary -// conversions are not accepted by the compiler, and you may get a message -// about overloading ambiguity (between long and int is common when converting -// from a constant in 64 bit mode), or unable to convert from type to 'oop'. -// Applying a cast to one of these conversion operators first will get to the -// underlying oopDesc* type if appropriate. +// conversions are not accepted by the compiler. Applying a cast to +// an oop will cause the best matched conversion operator to be +// invoked returning the underlying oopDesc* type if appropriate. +// No copy constructors, explicit user conversions or operators of +// numerical type should be defined within the oop class. Most C++ +// compilers will issue a compile time error concerning the overloading +// ambiguity between operators of numerical and pointer types. If +// a conversion to or from an oop to a numerical type is needed, +// use the inline template methods, cast_*_oop, defined below. +// // Converting NULL to oop to Handle implicit is no longer accepted by the // compiler because there are too many steps in the conversion. Use Handle() // instead, which generates less code anyway. @@ -83,12 +88,9 @@ void raw_set_obj(const void* p) { _o = (oopDesc*)p; } oop() { set_obj(NULL); } + oop(const oop& o) { set_obj(o.obj()); } oop(const volatile oop& o) { set_obj(o.obj()); } oop(const void* p) { set_obj(p); } - oop(intptr_t i) { set_obj((void *)i); } -#ifdef _LP64 - oop(int i) { set_obj((void *)i); } -#endif ~oop() { if (CheckUnhandledOops) unregister_oop(); } @@ -101,8 +103,6 @@ bool operator==(void *p) const { return obj() == p; } bool operator!=(const volatile oop o) const { return obj() != o.obj(); } bool operator!=(void *p) const { return obj() != p; } - bool operator==(intptr_t p) const { return obj() == (oopDesc*)p; } - bool operator!=(intptr_t p) const { return obj() != (oopDesc*)p; } bool operator<(oop o) const { return obj() < o.obj(); } bool operator>(oop o) const { return obj() > o.obj(); } @@ -110,8 +110,18 @@ bool operator>=(oop o) const { return obj() >= o.obj(); } bool operator!() const { return !obj(); } - // Cast + // Assignment + oop& operator=(const oop& o) { _o = o.obj(); return *this; } +#ifndef SOLARIS + volatile oop& operator=(const oop& o) volatile { _o = o.obj(); return *this; } +#endif + volatile oop& operator=(const volatile oop& o) volatile { _o = o.obj(); return *this; } + + // Explict user conversions operator void* () const { return (void *)obj(); } +#ifndef SOLARIS + operator void* () const volatile { return (void *)obj(); } +#endif operator HeapWord* () const { return (HeapWord*)obj(); } operator oopDesc* () const { return obj(); } operator intptr_t* () const { return (intptr_t*)obj(); } @@ -119,7 +129,6 @@ operator markOop () const { return markOop(obj()); } operator address () const { return (address)obj(); } - operator intptr_t () const volatile { return (intptr_t)obj(); } // from javaCalls.cpp operator jobject () const { return (jobject)obj(); } @@ -141,12 +150,26 @@ class type##Oop : public oop { \ public: \ type##Oop() : oop() {} \ + type##Oop(const oop& o) : oop(o) {} \ type##Oop(const volatile oop& o) : oop(o) {} \ type##Oop(const void* p) : oop(p) {} \ operator type##OopDesc* () const { return (type##OopDesc*)obj(); } \ type##OopDesc* operator->() const { \ return (type##OopDesc*)obj(); \ } \ + type##Oop& operator=(const type##Oop& o) { \ + oop::operator=(o); \ + return *this; \ + } \ + NOT_SOLARIS( \ + volatile type##Oop& operator=(const type##Oop& o) volatile { \ + (void)const_cast(oop::operator=(o)); \ + return *this; \ + }) \ + volatile type##Oop& operator=(const volatile type##Oop& o) volatile {\ + (void)const_cast(oop::operator=(o)); \ + return *this; \ + } \ }; DEF_OOP(instance); @@ -156,6 +179,16 @@ #endif // CHECK_UNHANDLED_OOPS +// For CHECK_UNHANDLED_OOPS, it is ambiguous C++ behavior to have the oop +// structure contain explicit user defined conversions of both numerical +// and pointer type. Define inline methods to provide the numerical conversions. +template inline oop cast_to_oop(T value) { + return (oop)(CHECK_UNHANDLED_OOPS_ONLY((void *))(value)); +} +template inline T cast_from_oop(oop o) { + return (T)(CHECK_UNHANDLED_OOPS_ONLY((void*))o); +} + // The metadata hierarchy is separate from the oop hierarchy // class MetaspaceObj diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/symbol.cpp --- a/src/share/vm/oops/symbol.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/symbol.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -41,19 +41,19 @@ } } -void* Symbol::operator new(size_t sz, int len, TRAPS) { +void* Symbol::operator new(size_t sz, int len, TRAPS) throw() { int alloc_size = size(len)*HeapWordSize; address res = (address) AllocateHeap(alloc_size, mtSymbol); return res; } -void* Symbol::operator new(size_t sz, int len, Arena* arena, TRAPS) { +void* Symbol::operator new(size_t sz, int len, Arena* arena, TRAPS) throw() { int alloc_size = size(len)*HeapWordSize; address res = (address)arena->Amalloc(alloc_size); return res; } -void* Symbol::operator new(size_t sz, int len, ClassLoaderData* loader_data, TRAPS) { +void* Symbol::operator new(size_t sz, int len, ClassLoaderData* loader_data, TRAPS) throw() { address res; int alloc_size = size(len)*HeapWordSize; res = (address) Metaspace::allocate(loader_data, size(len), true, diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/oops/symbol.hpp --- a/src/share/vm/oops/symbol.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/oops/symbol.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -45,7 +45,7 @@ // in the SymbolTable bucket (the _literal field in HashtableEntry) // that points to the Symbol. All other stores of a Symbol* // to a field of a persistent variable (e.g., the _name filed in -// FieldAccessInfo or _ptr in a CPSlot) is reference counted. +// fieldDescriptor or _ptr in a CPSlot) is reference counted. // // 1) The lookup of a "name" in the SymbolTable either creates a Symbol F for // "name" and returns a pointer to F or finds a pre-existing Symbol F for @@ -136,9 +136,9 @@ } Symbol(const u1* name, int length, int refcount); - void* operator new(size_t size, int len, TRAPS); - void* operator new(size_t size, int len, Arena* arena, TRAPS); - void* operator new(size_t size, int len, ClassLoaderData* loader_data, TRAPS); + void* operator new(size_t size, int len, TRAPS) throw(); + void* operator new(size_t size, int len, Arena* arena, TRAPS) throw(); + void* operator new(size_t size, int len, ClassLoaderData* loader_data, TRAPS) throw(); void operator delete(void* p); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/block.cpp --- a/src/share/vm/opto/block.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/block.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -35,10 +35,6 @@ #include "opto/rootnode.hpp" #include "utilities/copy.hpp" -// Optimization - Graph Style - - -//----------------------------------------------------------------------------- void Block_Array::grow( uint i ) { assert(i >= Max(), "must be an overflow"); debug_only(_limit = i+1); @@ -54,7 +50,6 @@ Copy::zero_to_bytes( &_blocks[old], (_size-old)*sizeof(Block*) ); } -//============================================================================= void Block_List::remove(uint i) { assert(i < _cnt, "index out of bounds"); Copy::conjoint_words_to_lower((HeapWord*)&_blocks[i+1], (HeapWord*)&_blocks[i], ((_cnt-i-1)*sizeof(Block*))); @@ -76,8 +71,6 @@ } #endif -//============================================================================= - uint Block::code_alignment() { // Check for Root block if (_pre_order == 0) return CodeEntryAlignment; @@ -113,16 +106,15 @@ return unit_sz; // no particular alignment } -//----------------------------------------------------------------------------- // Compute the size of first 'inst_cnt' instructions in this block. // Return the number of instructions left to compute if the block has // less then 'inst_cnt' instructions. Stop, and return 0 if sum_size // exceeds OptoLoopAlignment. uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt, PhaseRegAlloc* ra) { - uint last_inst = _nodes.size(); + uint last_inst = number_of_nodes(); for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) { - uint inst_size = _nodes[j]->size(ra); + uint inst_size = get_node(j)->size(ra); if( inst_size > 0 ) { inst_cnt--; uint sz = sum_size + inst_size; @@ -138,10 +130,9 @@ return inst_cnt; } -//----------------------------------------------------------------------------- uint Block::find_node( const Node *n ) const { - for( uint i = 0; i < _nodes.size(); i++ ) { - if( _nodes[i] == n ) + for( uint i = 0; i < number_of_nodes(); i++ ) { + if( get_node(i) == n ) return i; } ShouldNotReachHere(); @@ -150,10 +141,9 @@ // Find and remove n from block list void Block::find_remove( const Node *n ) { - _nodes.remove(find_node(n)); + remove_node(find_node(n)); } -//------------------------------is_Empty--------------------------------------- // Return empty status of a block. Empty blocks contain only the head, other // ideal nodes, and an optional trailing goto. int Block::is_Empty() const { @@ -164,10 +154,10 @@ } int success_result = completely_empty; - int end_idx = _nodes.size()-1; + int end_idx = number_of_nodes() - 1; // Check for ending goto - if ((end_idx > 0) && (_nodes[end_idx]->is_MachGoto())) { + if ((end_idx > 0) && (get_node(end_idx)->is_MachGoto())) { success_result = empty_with_goto; end_idx--; } @@ -180,7 +170,7 @@ // Ideal nodes are allowable in empty blocks: skip them Only MachNodes // turn directly into code, because only MachNodes have non-trivial // emit() functions. - while ((end_idx > 0) && !_nodes[end_idx]->is_Mach()) { + while ((end_idx > 0) && !get_node(end_idx)->is_Mach()) { end_idx--; } @@ -192,7 +182,6 @@ return not_empty; } -//------------------------------has_uncommon_code------------------------------ // Return true if the block's code implies that it is likely to be // executed infrequently. Check to see if the block ends in a Halt or // a low probability call. @@ -218,18 +207,17 @@ return op == Op_Halt; } -//------------------------------is_uncommon------------------------------------ // True if block is low enough frequency or guarded by a test which // mostly does not go here. -bool Block::is_uncommon(PhaseCFG* cfg) const { +bool PhaseCFG::is_uncommon(const Block* block) { // Initial blocks must never be moved, so are never uncommon. - if (head()->is_Root() || head()->is_Start()) return false; + if (block->head()->is_Root() || block->head()->is_Start()) return false; // Check for way-low freq - if( _freq < BLOCK_FREQUENCY(0.00001f) ) return true; + if(block->_freq < BLOCK_FREQUENCY(0.00001f) ) return true; // Look for code shape indicating uncommon_trap or slow path - if (has_uncommon_code()) return true; + if (block->has_uncommon_code()) return true; const float epsilon = 0.05f; const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon); @@ -237,8 +225,8 @@ uint freq_preds = 0; uint uncommon_for_freq_preds = 0; - for( uint i=1; iget_block_for_node(pred(i)); + for( uint i=1; i< block->num_preds(); i++ ) { + Block* guard = get_block_for_node(block->pred(i)); // Check to see if this block follows its guard 1 time out of 10000 // or less. // @@ -256,14 +244,14 @@ uncommon_preds++; } else { freq_preds++; - if( _freq < guard->_freq * guard_factor ) { + if(block->_freq < guard->_freq * guard_factor ) { uncommon_for_freq_preds++; } } } - if( num_preds() > 1 && + if( block->num_preds() > 1 && // The block is uncommon if all preds are uncommon or - (uncommon_preds == (num_preds()-1) || + (uncommon_preds == (block->num_preds()-1) || // it is uncommon for all frequent preds. uncommon_for_freq_preds == freq_preds) ) { return true; @@ -271,7 +259,6 @@ return false; } -//------------------------------dump------------------------------------------- #ifndef PRODUCT void Block::dump_bidx(const Block* orig, outputStream* st) const { if (_pre_order) st->print("B%d",_pre_order); @@ -357,20 +344,19 @@ void Block::dump(const PhaseCFG* cfg) const { dump_head(cfg); - for (uint i=0; i< _nodes.size(); i++) { - _nodes[i]->dump(); + for (uint i=0; i< number_of_nodes(); i++) { + get_node(i)->dump(); } tty->print("\n"); } #endif -//============================================================================= -//------------------------------PhaseCFG--------------------------------------- PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher) : Phase(CFG) , _block_arena(arena) +, _root(root) +, _matcher(matcher) , _node_to_block_mapping(arena) -, _root(root) , _node_latency(NULL) #ifndef PRODUCT , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining")) @@ -390,11 +376,10 @@ _goto->set_req(0,_goto); // Build the CFG in Reverse Post Order - _num_blocks = build_cfg(); - _broot = get_block_for_node(_root); + _number_of_blocks = build_cfg(); + _root_block = get_block_for_node(_root); } -//------------------------------build_cfg-------------------------------------- // Build a proper looking CFG. Make every block begin with either a StartNode // or a RegionNode. Make every block end with either a Goto, If or Return. // The RootNode both starts and ends it's own block. Do this with a recursive @@ -449,7 +434,7 @@ map_node_to_block(p, bb); map_node_to_block(x, bb); if( x != p ) { // Only for root is x == p - bb->_nodes.push((Node*)x); + bb->push_node((Node*)x); } // Now handle predecessors ++sum; // Count 1 for self block @@ -484,11 +469,11 @@ assert( x != proj, "" ); // Map basic block of projection map_node_to_block(proj, pb); - pb->_nodes.push(proj); + pb->push_node(proj); } // Insert self as a child of my predecessor block pb->_succs.map(pb->_num_succs++, get_block_for_node(np)); - assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(), + assert( pb->get_node(pb->number_of_nodes() - pb->_num_succs)->is_block_proj(), "too many control users, not a CFG?" ); } } @@ -496,13 +481,12 @@ return sum; } -//------------------------------insert_goto_at--------------------------------- // Inserts a goto & corresponding basic block between // block[block_no] and its succ_no'th successor block void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) { // get block with block_no - assert(block_no < _num_blocks, "illegal block number"); - Block* in = _blocks[block_no]; + assert(block_no < number_of_blocks(), "illegal block number"); + Block* in = get_block(block_no); // get successor block succ_no assert(succ_no < in->_num_succs, "illegal successor number"); Block* out = in->_succs[succ_no]; @@ -511,7 +495,7 @@ // surrounding blocks. float freq = in->_freq * in->succ_prob(succ_no); // get ProjNode corresponding to the succ_no'th successor of the in block - ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj(); + ProjNode* proj = in->get_node(in->number_of_nodes() - in->_num_succs + succ_no)->as_Proj(); // create region for basic block RegionNode* region = new (C) RegionNode(2); region->init_req(1, proj); @@ -523,7 +507,7 @@ Node* gto = _goto->clone(); // get a new goto node gto->set_req(0, region); // add it to the basic block - block->_nodes.push(gto); + block->push_node(gto); map_node_to_block(gto, block); C->regalloc()->set_bad(gto->_idx); // hook up successor block @@ -537,17 +521,15 @@ // Set the frequency of the new block block->_freq = freq; // add new basic block to basic block list - _blocks.insert(block_no + 1, block); - _num_blocks++; + add_block_at(block_no + 1, block); } -//------------------------------no_flip_branch--------------------------------- // Does this block end in a multiway branch that cannot have the default case // flipped for another case? static bool no_flip_branch( Block *b ) { - int branch_idx = b->_nodes.size() - b->_num_succs-1; + int branch_idx = b->number_of_nodes() - b->_num_succs-1; if( branch_idx < 1 ) return false; - Node *bra = b->_nodes[branch_idx]; + Node *bra = b->get_node(branch_idx); if( bra->is_Catch() ) return true; if( bra->is_Mach() ) { @@ -560,7 +542,6 @@ return false; } -//------------------------------convert_NeverBranch_to_Goto-------------------- // Check for NeverBranch at block end. This needs to become a GOTO to the // true target. NeverBranch are treated as a conditional branch that always // goes the same direction for most of the optimizer and are used to give a @@ -569,16 +550,16 @@ void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) { // Find true target int end_idx = b->end_idx(); - int idx = b->_nodes[end_idx+1]->as_Proj()->_con; + int idx = b->get_node(end_idx+1)->as_Proj()->_con; Block *succ = b->_succs[idx]; Node* gto = _goto->clone(); // get a new goto node gto->set_req(0, b->head()); - Node *bp = b->_nodes[end_idx]; - b->_nodes.map(end_idx,gto); // Slam over NeverBranch + Node *bp = b->get_node(end_idx); + b->map_node(gto, end_idx); // Slam over NeverBranch map_node_to_block(gto, b); C->regalloc()->set_bad(gto->_idx); - b->_nodes.pop(); // Yank projections - b->_nodes.pop(); // Yank projections + b->pop_node(); // Yank projections + b->pop_node(); // Yank projections b->_succs.map(0,succ); // Map only successor b->_num_succs = 1; // remap successor's predecessors if necessary @@ -594,11 +575,10 @@ // Scan through block, yanking dead path from // all regions and phis. dead->head()->del_req(j); - for( int k = 1; dead->_nodes[k]->is_Phi(); k++ ) - dead->_nodes[k]->del_req(j); + for( int k = 1; dead->get_node(k)->is_Phi(); k++ ) + dead->get_node(k)->del_req(j); } -//------------------------------move_to_next----------------------------------- // Helper function to move block bx to the slot following b_index. Return // true if the move is successful, otherwise false bool PhaseCFG::move_to_next(Block* bx, uint b_index) { @@ -606,20 +586,22 @@ // Return false if bx is already scheduled. uint bx_index = bx->_pre_order; - if ((bx_index <= b_index) && (_blocks[bx_index] == bx)) { + if ((bx_index <= b_index) && (get_block(bx_index) == bx)) { return false; } // Find the current index of block bx on the block list bx_index = b_index + 1; - while( bx_index < _num_blocks && _blocks[bx_index] != bx ) bx_index++; - assert(_blocks[bx_index] == bx, "block not found"); + while (bx_index < number_of_blocks() && get_block(bx_index) != bx) { + bx_index++; + } + assert(get_block(bx_index) == bx, "block not found"); // If the previous block conditionally falls into bx, return false, // because moving bx will create an extra jump. for(uint k = 1; k < bx->num_preds(); k++ ) { Block* pred = get_block_for_node(bx->pred(k)); - if (pred == _blocks[bx_index-1]) { + if (pred == get_block(bx_index - 1)) { if (pred->_num_succs != 1) { return false; } @@ -632,14 +614,13 @@ return true; } -//------------------------------move_to_end------------------------------------ // Move empty and uncommon blocks to the end. void PhaseCFG::move_to_end(Block *b, uint i) { int e = b->is_Empty(); if (e != Block::not_empty) { if (e == Block::empty_with_goto) { // Remove the goto, but leave the block. - b->_nodes.pop(); + b->pop_node(); } // Mark this block as a connector block, which will cause it to be // ignored in certain functions such as non_connector_successor(). @@ -650,31 +631,31 @@ _blocks.push(b); } -//---------------------------set_loop_alignment-------------------------------- // Set loop alignment for every block void PhaseCFG::set_loop_alignment() { - uint last = _num_blocks; - assert( _blocks[0] == _broot, "" ); + uint last = number_of_blocks(); + assert(get_block(0) == get_root_block(), ""); - for (uint i = 1; i < last; i++ ) { - Block *b = _blocks[i]; - if (b->head()->is_Loop()) { - b->set_loop_alignment(b); + for (uint i = 1; i < last; i++) { + Block* block = get_block(i); + if (block->head()->is_Loop()) { + block->set_loop_alignment(block); } } } -//-----------------------------remove_empty------------------------------------ // Make empty basic blocks to be "connector" blocks, Move uncommon blocks // to the end. -void PhaseCFG::remove_empty() { +void PhaseCFG::remove_empty_blocks() { // Move uncommon blocks to the end - uint last = _num_blocks; - assert( _blocks[0] == _broot, "" ); + uint last = number_of_blocks(); + assert(get_block(0) == get_root_block(), ""); for (uint i = 1; i < last; i++) { - Block *b = _blocks[i]; - if (b->is_connector()) break; + Block* block = get_block(i); + if (block->is_connector()) { + break; + } // Check for NeverBranch at block end. This needs to become a GOTO to the // true target. NeverBranch are treated as a conditional branch that @@ -682,124 +663,127 @@ // to give a fake exit path to infinite loops. At this late stage they // need to turn into Goto's so that when you enter the infinite loop you // indeed hang. - if( b->_nodes[b->end_idx()]->Opcode() == Op_NeverBranch ) - convert_NeverBranch_to_Goto(b); + if (block->get_node(block->end_idx())->Opcode() == Op_NeverBranch) { + convert_NeverBranch_to_Goto(block); + } // Look for uncommon blocks and move to end. if (!C->do_freq_based_layout()) { - if (b->is_uncommon(this)) { - move_to_end(b, i); + if (is_uncommon(block)) { + move_to_end(block, i); last--; // No longer check for being uncommon! - if( no_flip_branch(b) ) { // Fall-thru case must follow? - b = _blocks[i]; // Find the fall-thru block - move_to_end(b, i); + if (no_flip_branch(block)) { // Fall-thru case must follow? + // Find the fall-thru block + block = get_block(i); + move_to_end(block, i); last--; } - i--; // backup block counter post-increment + // backup block counter post-increment + i--; } } } // Move empty blocks to the end - last = _num_blocks; + last = number_of_blocks(); for (uint i = 1; i < last; i++) { - Block *b = _blocks[i]; - if (b->is_Empty() != Block::not_empty) { - move_to_end(b, i); + Block* block = get_block(i); + if (block->is_Empty() != Block::not_empty) { + move_to_end(block, i); last--; i--; } } // End of for all blocks } -//-----------------------------fixup_flow-------------------------------------- // Fix up the final control flow for basic blocks. void PhaseCFG::fixup_flow() { // Fixup final control flow for the blocks. Remove jump-to-next // block. If neither arm of a IF follows the conditional branch, we // have to add a second jump after the conditional. We place the // TRUE branch target in succs[0] for both GOTOs and IFs. - for (uint i=0; i < _num_blocks; i++) { - Block *b = _blocks[i]; - b->_pre_order = i; // turn pre-order into block-index + for (uint i = 0; i < number_of_blocks(); i++) { + Block* block = get_block(i); + block->_pre_order = i; // turn pre-order into block-index // Connector blocks need no further processing. - if (b->is_connector()) { - assert((i+1) == _num_blocks || _blocks[i+1]->is_connector(), - "All connector blocks should sink to the end"); + if (block->is_connector()) { + assert((i+1) == number_of_blocks() || get_block(i + 1)->is_connector(), "All connector blocks should sink to the end"); continue; } - assert(b->is_Empty() != Block::completely_empty, - "Empty blocks should be connectors"); + assert(block->is_Empty() != Block::completely_empty, "Empty blocks should be connectors"); - Block *bnext = (i < _num_blocks-1) ? _blocks[i+1] : NULL; - Block *bs0 = b->non_connector_successor(0); + Block* bnext = (i < number_of_blocks() - 1) ? get_block(i + 1) : NULL; + Block* bs0 = block->non_connector_successor(0); // Check for multi-way branches where I cannot negate the test to // exchange the true and false targets. - if( no_flip_branch( b ) ) { + if (no_flip_branch(block)) { // Find fall through case - if must fall into its target - int branch_idx = b->_nodes.size() - b->_num_succs; - for (uint j2 = 0; j2 < b->_num_succs; j2++) { - const ProjNode* p = b->_nodes[branch_idx + j2]->as_Proj(); + int branch_idx = block->number_of_nodes() - block->_num_succs; + for (uint j2 = 0; j2 < block->_num_succs; j2++) { + const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj(); if (p->_con == 0) { // successor j2 is fall through case - if (b->non_connector_successor(j2) != bnext) { + if (block->non_connector_successor(j2) != bnext) { // but it is not the next block => insert a goto insert_goto_at(i, j2); } // Put taken branch in slot 0 - if( j2 == 0 && b->_num_succs == 2) { + if (j2 == 0 && block->_num_succs == 2) { // Flip targets in succs map - Block *tbs0 = b->_succs[0]; - Block *tbs1 = b->_succs[1]; - b->_succs.map( 0, tbs1 ); - b->_succs.map( 1, tbs0 ); + Block *tbs0 = block->_succs[0]; + Block *tbs1 = block->_succs[1]; + block->_succs.map(0, tbs1); + block->_succs.map(1, tbs0); } break; } } + // Remove all CatchProjs - for (uint j1 = 0; j1 < b->_num_succs; j1++) b->_nodes.pop(); + for (uint j = 0; j < block->_num_succs; j++) { + block->pop_node(); + } - } else if (b->_num_succs == 1) { + } else if (block->_num_succs == 1) { // Block ends in a Goto? if (bnext == bs0) { // We fall into next block; remove the Goto - b->_nodes.pop(); + block->pop_node(); } - } else if( b->_num_succs == 2 ) { // Block ends in a If? + } else if(block->_num_succs == 2) { // Block ends in a If? // Get opcode of 1st projection (matches _succs[0]) // Note: Since this basic block has 2 exits, the last 2 nodes must // be projections (in any order), the 3rd last node must be // the IfNode (we have excluded other 2-way exits such as // CatchNodes already). - MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach(); - ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj(); - ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj(); + MachNode* iff = block->get_node(block->number_of_nodes() - 3)->as_Mach(); + ProjNode* proj0 = block->get_node(block->number_of_nodes() - 2)->as_Proj(); + ProjNode* proj1 = block->get_node(block->number_of_nodes() - 1)->as_Proj(); // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1]. - assert(proj0->raw_out(0) == b->_succs[0]->head(), "Mismatch successor 0"); - assert(proj1->raw_out(0) == b->_succs[1]->head(), "Mismatch successor 1"); + assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0"); + assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1"); - Block *bs1 = b->non_connector_successor(1); + Block* bs1 = block->non_connector_successor(1); // Check for neither successor block following the current // block ending in a conditional. If so, move one of the // successors after the current one, provided that the // successor was previously unscheduled, but moveable // (i.e., all paths to it involve a branch). - if( !C->do_freq_based_layout() && bnext != bs0 && bnext != bs1 ) { + if (!C->do_freq_based_layout() && bnext != bs0 && bnext != bs1) { // Choose the more common successor based on the probability // of the conditional branch. - Block *bx = bs0; - Block *by = bs1; + Block* bx = bs0; + Block* by = bs1; // _prob is the probability of taking the true path. Make // p the probability of taking successor #1. float p = iff->as_MachIf()->_prob; - if( proj0->Opcode() == Op_IfTrue ) { + if (proj0->Opcode() == Op_IfTrue) { p = 1.0 - p; } @@ -826,14 +810,16 @@ // succs[1]. if (bnext == bs0) { // Fall-thru case in succs[0], so flip targets in succs map - Block *tbs0 = b->_succs[0]; - Block *tbs1 = b->_succs[1]; - b->_succs.map( 0, tbs1 ); - b->_succs.map( 1, tbs0 ); + Block* tbs0 = block->_succs[0]; + Block* tbs1 = block->_succs[1]; + block->_succs.map(0, tbs1); + block->_succs.map(1, tbs0); // Flip projection for each target - { ProjNode *tmp = proj0; proj0 = proj1; proj1 = tmp; } + ProjNode* tmp = proj0; + proj0 = proj1; + proj1 = tmp; - } else if( bnext != bs1 ) { + } else if(bnext != bs1) { // Need a double-branch // The existing conditional branch need not change. // Add a unconditional branch to the false target. @@ -843,12 +829,12 @@ } // Make sure we TRUE branch to the target - if( proj0->Opcode() == Op_IfFalse ) { + if (proj0->Opcode() == Op_IfFalse) { iff->as_MachIf()->negate(); } - b->_nodes.pop(); // Remove IfFalse & IfTrue projections - b->_nodes.pop(); + block->pop_node(); // Remove IfFalse & IfTrue projections + block->pop_node(); } else { // Multi-exit block, e.g. a switch statement @@ -858,7 +844,6 @@ } -//------------------------------dump------------------------------------------- #ifndef PRODUCT void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited ) const { const Node *x = end->is_block_proj(); @@ -884,10 +869,11 @@ } void PhaseCFG::dump( ) const { - tty->print("\n--- CFG --- %d BBs\n",_num_blocks); + tty->print("\n--- CFG --- %d BBs\n", number_of_blocks()); if (_blocks.size()) { // Did we do basic-block layout? - for (uint i = 0; i < _num_blocks; i++) { - _blocks[i]->dump(this); + for (uint i = 0; i < number_of_blocks(); i++) { + const Block* block = get_block(i); + block->dump(this); } } else { // Else do it with a DFS VectorSet visited(_block_arena); @@ -896,27 +882,26 @@ } void PhaseCFG::dump_headers() { - for( uint i = 0; i < _num_blocks; i++ ) { - if (_blocks[i]) { - _blocks[i]->dump_head(this); + for (uint i = 0; i < number_of_blocks(); i++) { + Block* block = get_block(i); + if (block != NULL) { + block->dump_head(this); } } } -void PhaseCFG::verify( ) const { +void PhaseCFG::verify() const { #ifdef ASSERT // Verify sane CFG - for (uint i = 0; i < _num_blocks; i++) { - Block *b = _blocks[i]; - uint cnt = b->_nodes.size(); + for (uint i = 0; i < number_of_blocks(); i++) { + Block* block = get_block(i); + uint cnt = block->number_of_nodes(); uint j; for (j = 0; j < cnt; j++) { - Node *n = b->_nodes[j]; - assert(get_block_for_node(n) == b, ""); - if (j >= 1 && n->is_Mach() && - n->as_Mach()->ideal_Opcode() == Op_CreateEx) { - assert(j == 1 || b->_nodes[j-1]->is_Phi(), - "CreateEx must be first instruction in block"); + Node *n = block->get_node(j); + assert(get_block_for_node(n) == block, ""); + if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) { + assert(j == 1 || block->get_node(j-1)->is_Phi(), "CreateEx must be first instruction in block"); } for (uint k = 0; k < n->req(); k++) { Node *def = n->in(k); @@ -926,8 +911,7 @@ // Uses must follow their definition if they are at the same block. // Mostly done to check that MachSpillCopy nodes are placed correctly // when CreateEx node is moved in build_ifg_physical(). - if (get_block_for_node(def) == b && - !(b->head()->is_Loop() && n->is_Phi()) && + if (get_block_for_node(def) == block && !(block->head()->is_Loop() && n->is_Phi()) && // See (+++) comment in reg_split.cpp !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) { bool is_loop = false; @@ -939,29 +923,29 @@ } } } - assert(is_loop || b->find_node(def) < j, "uses must follow definitions"); + assert(is_loop || block->find_node(def) < j, "uses must follow definitions"); } } } } - j = b->end_idx(); - Node *bp = (Node*)b->_nodes[b->_nodes.size()-1]->is_block_proj(); - assert( bp, "last instruction must be a block proj" ); - assert( bp == b->_nodes[j], "wrong number of successors for this block" ); + j = block->end_idx(); + Node* bp = (Node*)block->get_node(block->number_of_nodes() - 1)->is_block_proj(); + assert(bp, "last instruction must be a block proj"); + assert(bp == block->get_node(j), "wrong number of successors for this block"); if (bp->is_Catch()) { - while (b->_nodes[--j]->is_MachProj()) ; - assert(b->_nodes[j]->is_MachCall(), "CatchProj must follow call"); + while (block->get_node(--j)->is_MachProj()) { + ; + } + assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call"); } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) { - assert(b->_num_succs == 2, "Conditional branch must have two targets"); + assert(block->_num_succs == 2, "Conditional branch must have two targets"); } } #endif } #endif -//============================================================================= -//------------------------------UnionFind-------------------------------------- UnionFind::UnionFind( uint max ) : _cnt(max), _max(max), _indices(NEW_RESOURCE_ARRAY(uint,max)) { Copy::zero_to_bytes( _indices, sizeof(uint)*max ); } @@ -986,7 +970,6 @@ for( uint i=0; ifreq(); @@ -1087,7 +1065,6 @@ return dist1 - dist0; } -//------------------------------trace_frequency_order-------------------------- // Comparison function for edges extern "C" int trace_frequency_order(const void *p0, const void *p1) { Trace *tr0 = *(Trace **) p0; @@ -1113,17 +1090,15 @@ return diff; } -//------------------------------find_edges------------------------------------- // Find edges of interest, i.e, those which can fall through. Presumes that // edges which don't fall through are of low frequency and can be generally // ignored. Initialize the list of traces. -void PhaseBlockLayout::find_edges() -{ +void PhaseBlockLayout::find_edges() { // Walk the blocks, creating edges and Traces uint i; Trace *tr = NULL; - for (i = 0; i < _cfg._num_blocks; i++) { - Block *b = _cfg._blocks[i]; + for (i = 0; i < _cfg.number_of_blocks(); i++) { + Block* b = _cfg.get_block(i); tr = new Trace(b, next, prev); traces[tr->id()] = tr; @@ -1147,7 +1122,7 @@ if (n->num_preds() != 1) break; i++; - assert(n = _cfg._blocks[i], "expecting next block"); + assert(n = _cfg.get_block(i), "expecting next block"); tr->append(n); uf->map(n->_pre_order, tr->id()); traces[n->_pre_order] = NULL; @@ -1171,8 +1146,8 @@ } // Group connector blocks into one trace - for (i++; i < _cfg._num_blocks; i++) { - Block *b = _cfg._blocks[i]; + for (i++; i < _cfg.number_of_blocks(); i++) { + Block *b = _cfg.get_block(i); assert(b->is_connector(), "connector blocks at the end"); tr->append(b); uf->map(b->_pre_order, tr->id()); @@ -1180,10 +1155,8 @@ } } -//------------------------------union_traces---------------------------------- // Union two traces together in uf, and null out the trace in the list -void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace) -{ +void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace) { uint old_id = old_trace->id(); uint updated_id = updated_trace->id(); @@ -1207,10 +1180,8 @@ traces[hi_id] = NULL; } -//------------------------------grow_traces------------------------------------- // Append traces together via the most frequently executed edges -void PhaseBlockLayout::grow_traces() -{ +void PhaseBlockLayout::grow_traces() { // Order the edges, and drive the growth of Traces via the most // frequently executed edges. edges->sort(edge_order); @@ -1252,11 +1223,9 @@ } } -//------------------------------merge_traces----------------------------------- // Embed one trace into another, if the fork or join points are sufficiently // balanced. -void PhaseBlockLayout::merge_traces(bool fall_thru_only) -{ +void PhaseBlockLayout::merge_traces(bool fall_thru_only) { // Walk the edge list a another time, looking at unprocessed edges. // Fold in diamonds for (int i = 0; i < edges->length(); i++) { @@ -1310,7 +1279,7 @@ src_trace->insert_after(src_block, targ_trace); union_traces(src_trace, targ_trace); } else if (src_at_tail) { - if (src_trace != trace(_cfg._broot)) { + if (src_trace != trace(_cfg.get_root_block())) { e->set_state(CFGEdge::connected); targ_trace->insert_before(targ_block, src_trace); union_traces(targ_trace, src_trace); @@ -1319,7 +1288,7 @@ } else if (e->state() == CFGEdge::open) { // Append traces, even without a fall-thru connection. // But leave root entry at the beginning of the block list. - if (targ_trace != trace(_cfg._broot)) { + if (targ_trace != trace(_cfg.get_root_block())) { e->set_state(CFGEdge::connected); src_trace->append(targ_trace); union_traces(src_trace, targ_trace); @@ -1328,11 +1297,9 @@ } } -//----------------------------reorder_traces----------------------------------- // Order the sequence of the traces in some desirable way, and fixup the // jumps at the end of each block. -void PhaseBlockLayout::reorder_traces(int count) -{ +void PhaseBlockLayout::reorder_traces(int count) { ResourceArea *area = Thread::current()->resource_area(); Trace ** new_traces = NEW_ARENA_ARRAY(area, Trace *, count); Block_List worklist; @@ -1347,15 +1314,14 @@ } // The entry block should be first on the new trace list. - Trace *tr = trace(_cfg._broot); + Trace *tr = trace(_cfg.get_root_block()); assert(tr == new_traces[0], "entry trace misplaced"); // Sort the new trace list by frequency qsort(new_traces + 1, new_count - 1, sizeof(new_traces[0]), trace_frequency_order); // Patch up the successor blocks - _cfg._blocks.reset(); - _cfg._num_blocks = 0; + _cfg.clear_blocks(); for (int i = 0; i < new_count; i++) { Trace *tr = new_traces[i]; if (tr != NULL) { @@ -1364,17 +1330,15 @@ } } -//------------------------------PhaseBlockLayout------------------------------- // Order basic blocks based on frequency -PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg) : - Phase(BlockLayout), - _cfg(cfg) -{ +PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg) +: Phase(BlockLayout) +, _cfg(cfg) { ResourceMark rm; ResourceArea *area = Thread::current()->resource_area(); // List of traces - int size = _cfg._num_blocks + 1; + int size = _cfg.number_of_blocks() + 1; traces = NEW_ARENA_ARRAY(area, Trace *, size); memset(traces, 0, size*sizeof(Trace*)); next = NEW_ARENA_ARRAY(area, Block *, size); @@ -1407,11 +1371,10 @@ // Re-order all the remaining traces by frequency reorder_traces(size); - assert(_cfg._num_blocks >= (uint) (size - 1), "number of blocks can not shrink"); + assert(_cfg.number_of_blocks() >= (uint) (size - 1), "number of blocks can not shrink"); } -//------------------------------backedge--------------------------------------- // Edge e completes a loop in a trace. If the target block is head of the // loop, rotate the loop block so that the loop ends in a conditional branch. bool Trace::backedge(CFGEdge *e) { @@ -1463,14 +1426,12 @@ return loop_rotated; } -//------------------------------fixup_blocks----------------------------------- // push blocks onto the CFG list // ensure that blocks have the correct two-way branch sense void Trace::fixup_blocks(PhaseCFG &cfg) { Block *last = last_block(); for (Block *b = first_block(); b != NULL; b = next(b)) { - cfg._blocks.push(b); - cfg._num_blocks++; + cfg.add_block(b); if (!b->is_connector()) { int nfallthru = b->num_fall_throughs(); if (b != last) { @@ -1479,9 +1440,9 @@ Block *bnext = next(b); Block *bs0 = b->non_connector_successor(0); - MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach(); - ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj(); - ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj(); + MachNode *iff = b->get_node(b->number_of_nodes() - 3)->as_Mach(); + ProjNode *proj0 = b->get_node(b->number_of_nodes() - 2)->as_Proj(); + ProjNode *proj1 = b->get_node(b->number_of_nodes() - 1)->as_Proj(); if (bnext == bs0) { // Fall-thru case in succs[0], should be in succs[1] @@ -1493,8 +1454,8 @@ b->_succs.map( 1, tbs0 ); // Flip projections to match targets - b->_nodes.map(b->_nodes.size()-2, proj1); - b->_nodes.map(b->_nodes.size()-1, proj0); + b->map_node(proj1, b->number_of_nodes() - 2); + b->map_node(proj0, b->number_of_nodes() - 1); } } } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/block.hpp --- a/src/share/vm/opto/block.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/block.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -105,15 +105,53 @@ // any optimization pass. They are created late in the game. class Block : public CFGElement { friend class VMStructs; - public: + +private: // Nodes in this block, in order Node_List _nodes; +public: + + // Get the node at index 'at_index', if 'at_index' is out of bounds return NULL + Node* get_node(uint at_index) const { + return _nodes[at_index]; + } + + // Get the number of nodes in this block + uint number_of_nodes() const { + return _nodes.size(); + } + + // Map a node 'node' to index 'to_index' in the block, if the index is out of bounds the size of the node list is increased + void map_node(Node* node, uint to_index) { + _nodes.map(to_index, node); + } + + // Insert a node 'node' at index 'at_index', moving all nodes that are on a higher index one step, if 'at_index' is out of bounds we crash + void insert_node(Node* node, uint at_index) { + _nodes.insert(at_index, node); + } + + // Remove a node at index 'at_index' + void remove_node(uint at_index) { + _nodes.remove(at_index); + } + + // Push a node 'node' onto the node list + void push_node(Node* node) { + _nodes.push(node); + } + + // Pop the last node off the node list + Node* pop_node() { + return _nodes.pop(); + } + // Basic blocks have a Node which defines Control for all Nodes pinned in // this block. This Node is a RegionNode. Exception-causing Nodes // (division, subroutines) and Phi functions are always pinned. Later, // every Node will get pinned to some block. - Node *head() const { return _nodes[0]; } + Node *head() const { return get_node(0); } // CAUTION: num_preds() is ONE based, so that predecessor numbers match // input edges to Regions and Phis. @@ -274,29 +312,12 @@ // Add an instruction to an existing block. It must go after the head // instruction and before the end instruction. - void add_inst( Node *n ) { _nodes.insert(end_idx(),n); } + void add_inst( Node *n ) { insert_node(n, end_idx()); } // Find node in block uint find_node( const Node *n ) const; // Find and remove n from block list void find_remove( const Node *n ); - // helper function that adds caller save registers to MachProjNode - void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe); - // Schedule a call next in the block - uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray &ready_cnt, MachCallNode *mcall, VectorSet &next_call); - - // Perform basic-block local scheduling - Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray &ready_cnt, VectorSet &next_call, uint sched_slot); - void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg); - void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg); - bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray &ready_cnt, VectorSet &next_call); - // Cleanup if any code lands between a Call and his Catch - void call_catch_cleanup(PhaseCFG* cfg, Compile *C); - // Detect implicit-null-check opportunities. Basically, find NULL checks - // with suitable memory ops nearby. Use the memory op to do the NULL check. - // I can generate a memory op if there is not one nearby. - void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons); - // Return the empty status of a block enum { not_empty, empty_with_goto, completely_empty }; int is_Empty() const; @@ -328,10 +349,6 @@ // Examine block's code shape to predict if it is not commonly executed. bool has_uncommon_code() const; - // Use frequency calculations and code shape to predict if the block - // is uncommon. - bool is_uncommon(PhaseCFG* cfg) const; - #ifndef PRODUCT // Debugging print of basic block void dump_bidx(const Block* orig, outputStream* st = tty) const; @@ -348,20 +365,98 @@ class PhaseCFG : public Phase { friend class VMStructs; private: + + // Root of whole program + RootNode* _root; + + // The block containing the root node + Block* _root_block; + + // List of basic blocks that are created during CFG creation + Block_List _blocks; + + // Count of basic blocks + uint _number_of_blocks; + // Arena for the blocks to be stored in Arena* _block_arena; + // The matcher for this compilation + Matcher& _matcher; + // Map nodes to owning basic block Block_Array _node_to_block_mapping; + // Loop from the root + CFGLoop* _root_loop; + + // Outmost loop frequency + float _outer_loop_frequency; + + // Per node latency estimation, valid only during GCM + GrowableArray* _node_latency; + // Build a proper looking cfg. Return count of basic blocks uint build_cfg(); - // Perform DFS search. + // Build the dominator tree so that we know where we can move instructions + void build_dominator_tree(); + + // Estimate block frequencies based on IfNode probabilities, so that we know where we want to move instructions + void estimate_block_frequency(); + + // Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific + // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block. + // Move nodes to ensure correctness from GVN and also try to move nodes out of loops. + void global_code_motion(); + + // Schedule Nodes early in their basic blocks. + bool schedule_early(VectorSet &visited, Node_List &roots); + + // For each node, find the latest block it can be scheduled into + // and then select the cheapest block between the latest and earliest + // block to place the node. + void schedule_late(VectorSet &visited, Node_List &stack); + + // Compute the (backwards) latency of a node from a single use + int latency_from_use(Node *n, const Node *def, Node *use); + + // Compute the (backwards) latency of a node from the uses of this instruction + void partial_latency_of_defs(Node *n); + + // Compute the instruction global latency with a backwards walk + void compute_latencies_backwards(VectorSet &visited, Node_List &stack); + + // Pick a block between early and late that is a cheaper alternative + // to late. Helper for schedule_late. + Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self); + + bool schedule_local(Block* block, GrowableArray& ready_cnt, VectorSet& next_call); + void set_next_call(Block* block, Node* n, VectorSet& next_call); + void needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call); + + // Perform basic-block local scheduling + Node* select(Block* block, Node_List& worklist, GrowableArray& ready_cnt, VectorSet& next_call, uint sched_slot); + + // Schedule a call next in the block + uint sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray& ready_cnt, MachCallNode* mcall, VectorSet& next_call); + + // Cleanup if any code lands between a Call and his Catch + void call_catch_cleanup(Block* block); + + Node* catch_cleanup_find_cloned_def(Block* use_blk, Node* def, Block* def_blk, int n_clone_idx); + void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx); + + // Detect implicit-null-check opportunities. Basically, find NULL checks + // with suitable memory ops nearby. Use the memory op to do the NULL check. + // I can generate a memory op if there is not one nearby. + void implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons); + + // Perform a Depth First Search (DFS). // Setup 'vertex' as DFS to vertex mapping. // Setup 'semi' as vertex to DFS mapping. // Set 'parent' to DFS parent. - uint DFS( Tarjan *tarjan ); + uint do_DFS(Tarjan* tarjan, uint rpo_counter); // Helper function to insert a node into a block void schedule_node_into_block( Node *n, Block *b ); @@ -372,7 +467,8 @@ void schedule_pinned_nodes( VectorSet &visited ); // I'll need a few machine-specific GotoNodes. Clone from this one. - MachNode *_goto; + // Used when building the CFG and creating end nodes for blocks. + MachNode* _goto; Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false); void verify_anti_dependences(Block* LCA, Node* load) { @@ -380,17 +476,77 @@ insert_anti_dependences(LCA, load, true); } + bool move_to_next(Block* bx, uint b_index); + void move_to_end(Block* bx, uint b_index); + + void insert_goto_at(uint block_no, uint succ_no); + + // Check for NeverBranch at block end. This needs to become a GOTO to the + // true target. NeverBranch are treated as a conditional branch that always + // goes the same direction for most of the optimizer and are used to give a + // fake exit path to infinite loops. At this late stage they need to turn + // into Goto's so that when you enter the infinite loop you indeed hang. + void convert_NeverBranch_to_Goto(Block *b); + + CFGLoop* create_loop_tree(); + + #ifndef PRODUCT + bool _trace_opto_pipelining; // tracing flag + #endif + public: PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher); - uint _num_blocks; // Count of basic blocks - Block_List _blocks; // List of basic blocks - RootNode *_root; // Root of whole program - Block *_broot; // Basic block of root - uint _rpo_ctr; - CFGLoop* _root_loop; - float _outer_loop_freq; // Outmost loop frequency + void set_latency_for_node(Node* node, int latency) { + _node_latency->at_put_grow(node->_idx, latency); + } + + uint get_latency_for_node(Node* node) { + return _node_latency->at_grow(node->_idx); + } + + // Get the outer most frequency + float get_outer_loop_frequency() const { + return _outer_loop_frequency; + } + + // Get the root node of the CFG + RootNode* get_root_node() const { + return _root; + } + + // Get the block of the root node + Block* get_root_block() const { + return _root_block; + } + // Add a block at a position and moves the later ones one step + void add_block_at(uint pos, Block* block) { + _blocks.insert(pos, block); + _number_of_blocks++; + } + + // Adds a block to the top of the block list + void add_block(Block* block) { + _blocks.push(block); + _number_of_blocks++; + } + + // Clear the list of blocks + void clear_blocks() { + _blocks.reset(); + _number_of_blocks = 0; + } + + // Get the block at position pos in _blocks + Block* get_block(uint pos) const { + return _blocks[pos]; + } + + // Number of blocks + uint number_of_blocks() const { + return _number_of_blocks; + } // set which block this node should reside in void map_node_to_block(const Node* node, Block* block) { @@ -412,73 +568,31 @@ return (_node_to_block_mapping.lookup(node->_idx) != NULL); } - // Per node latency estimation, valid only during GCM - GrowableArray *_node_latency; - -#ifndef PRODUCT - bool _trace_opto_pipelining; // tracing flag -#endif + // Use frequency calculations and code shape to predict if the block + // is uncommon. + bool is_uncommon(const Block* block); #ifdef ASSERT Unique_Node_List _raw_oops; #endif - // Build dominators - void Dominators(); - - // Estimate block frequencies based on IfNode probabilities - void Estimate_Block_Frequency(); - - // Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific - // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block. - void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list ); + // Do global code motion by first building dominator tree and estimate block frequency + // Returns true on success + bool do_global_code_motion(); // Compute the (backwards) latency of a node from the uses void latency_from_uses(Node *n); - // Compute the (backwards) latency of a node from a single use - int latency_from_use(Node *n, const Node *def, Node *use); - - // Compute the (backwards) latency of a node from the uses of this instruction - void partial_latency_of_defs(Node *n); - - // Schedule Nodes early in their basic blocks. - bool schedule_early(VectorSet &visited, Node_List &roots); - - // For each node, find the latest block it can be scheduled into - // and then select the cheapest block between the latest and earliest - // block to place the node. - void schedule_late(VectorSet &visited, Node_List &stack); - - // Pick a block between early and late that is a cheaper alternative - // to late. Helper for schedule_late. - Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self); - - // Compute the instruction global latency with a backwards walk - void ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack); - // Set loop alignment void set_loop_alignment(); // Remove empty basic blocks - void remove_empty(); + void remove_empty_blocks(); void fixup_flow(); - bool move_to_next(Block* bx, uint b_index); - void move_to_end(Block* bx, uint b_index); - void insert_goto_at(uint block_no, uint succ_no); - // Check for NeverBranch at block end. This needs to become a GOTO to the - // true target. NeverBranch are treated as a conditional branch that always - // goes the same direction for most of the optimizer and are used to give a - // fake exit path to infinite loops. At this late stage they need to turn - // into Goto's so that when you enter the infinite loop you indeed hang. - void convert_NeverBranch_to_Goto(Block *b); - - CFGLoop* create_loop_tree(); - - // Insert a node into a block, and update the _bbs - void insert( Block *b, uint idx, Node *n ) { - b->_nodes.insert( idx, n ); + // Insert a node into a block at index and map the node to the block + void insert(Block *b, uint idx, Node *n) { + b->insert_node(n , idx); map_node_to_block(n, b); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/buildOopMap.cpp --- a/src/share/vm/opto/buildOopMap.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/buildOopMap.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -87,7 +87,6 @@ // OptoReg::Bad for not-callee-saved. -//------------------------------OopFlow---------------------------------------- // Structure to pass around struct OopFlow : public ResourceObj { short *_callees; // Array mapping register to callee-saved @@ -119,12 +118,11 @@ OopMap *build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live ); }; -//------------------------------compute_reach---------------------------------- // Given reaching-defs for this block start, compute it for this block end void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash ) { - for( uint i=0; i<_b->_nodes.size(); i++ ) { - Node *n = _b->_nodes[i]; + for( uint i=0; i<_b->number_of_nodes(); i++ ) { + Node *n = _b->get_node(i); if( n->jvms() ) { // Build an OopMap here? JVMState *jvms = n->jvms(); @@ -177,7 +175,6 @@ } } -//------------------------------merge------------------------------------------ // Merge the given flow into the 'this' flow void OopFlow::merge( OopFlow *flow, int max_reg ) { assert( _b == NULL, "merging into a happy flow" ); @@ -197,14 +194,12 @@ } -//------------------------------clone------------------------------------------ void OopFlow::clone( OopFlow *flow, int max_size ) { _b = flow->_b; memcpy( _callees, flow->_callees, sizeof(short)*max_size); memcpy( _defs , flow->_defs , sizeof(Node*)*max_size); } -//------------------------------make------------------------------------------- OopFlow *OopFlow::make( Arena *A, int max_size, Compile* C ) { short *callees = NEW_ARENA_ARRAY(A,short,max_size+1); Node **defs = NEW_ARENA_ARRAY(A,Node*,max_size+1); @@ -215,7 +210,6 @@ return flow; } -//------------------------------bit twiddlers---------------------------------- static int get_live_bit( int *live, int reg ) { return live[reg>>LogBitsPerInt] & (1<<(reg&(BitsPerInt-1))); } static void set_live_bit( int *live, int reg ) { @@ -223,7 +217,6 @@ static void clr_live_bit( int *live, int reg ) { live[reg>>LogBitsPerInt] &= ~(1<<(reg&(BitsPerInt-1))); } -//------------------------------build_oop_map---------------------------------- // Build an oopmap from the current flow info OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live ) { int framesize = regalloc->_framesize; @@ -412,19 +405,18 @@ return omap; } -//------------------------------do_liveness------------------------------------ // Compute backwards liveness on registers -static void do_liveness( PhaseRegAlloc *regalloc, PhaseCFG *cfg, Block_List *worklist, int max_reg_ints, Arena *A, Dict *safehash ) { - int *live = NEW_ARENA_ARRAY(A, int, (cfg->_num_blocks+1) * max_reg_ints); - int *tmp_live = &live[cfg->_num_blocks * max_reg_ints]; - Node *root = cfg->C->root(); +static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* worklist, int max_reg_ints, Arena* A, Dict* safehash) { + int* live = NEW_ARENA_ARRAY(A, int, (cfg->number_of_blocks() + 1) * max_reg_ints); + int* tmp_live = &live[cfg->number_of_blocks() * max_reg_ints]; + Node* root = cfg->get_root_node(); // On CISC platforms, get the node representing the stack pointer that regalloc // used for spills Node *fp = NodeSentinel; if (UseCISCSpill && root->req() > 1) { fp = root->in(1)->in(TypeFunc::FramePtr); } - memset( live, 0, cfg->_num_blocks * (max_reg_ints<number_of_blocks() * (max_reg_ints << LogBytesPerInt)); // Push preds onto worklist for (uint i = 1; i < root->req(); i++) { Block* block = cfg->get_block_for_node(root->in(i)); @@ -455,8 +447,8 @@ } // Now walk tmp_live up the block backwards, computing live - for( int k=b->_nodes.size()-1; k>=0; k-- ) { - Node *n = b->_nodes[k]; + for( int k=b->number_of_nodes()-1; k>=0; k-- ) { + Node *n = b->get_node(k); // KILL def'd bits int first = regalloc->get_reg_first(n); int second = regalloc->get_reg_second(n); @@ -549,29 +541,32 @@ // Scan for any missing safepoints. Happens to infinite loops // ala ZKM.jar uint i; - for( i=1; i_num_blocks; i++ ) { - Block *b = cfg->_blocks[i]; + for (i = 1; i < cfg->number_of_blocks(); i++) { + Block* block = cfg->get_block(i); uint j; - for( j=1; j_nodes.size(); j++ ) - if( b->_nodes[j]->jvms() && - (*safehash)[b->_nodes[j]] == NULL ) + for (j = 1; j < block->number_of_nodes(); j++) { + if (block->get_node(j)->jvms() && (*safehash)[block->get_node(j)] == NULL) { break; - if( j_nodes.size() ) break; + } + } + if (j < block->number_of_nodes()) { + break; + } } - if( i == cfg->_num_blocks ) + if (i == cfg->number_of_blocks()) { break; // Got 'em all + } #ifndef PRODUCT if( PrintOpto && Verbose ) tty->print_cr("retripping live calc"); #endif // Force the issue (expensively): recheck everybody - for( i=1; i_num_blocks; i++ ) - worklist->push(cfg->_blocks[i]); + for (i = 1; i < cfg->number_of_blocks(); i++) { + worklist->push(cfg->get_block(i)); + } } - } -//------------------------------BuildOopMaps----------------------------------- // Collect GC mask info - where are all the OOPs? void Compile::BuildOopMaps() { NOT_PRODUCT( TracePhase t3("bldOopMaps", &_t_buildOopMaps, TimeCompiler); ) @@ -592,12 +587,12 @@ OopFlow *free_list = NULL; // Free, unused // Array mapping blocks to completed oopflows - OopFlow **flows = NEW_ARENA_ARRAY(A, OopFlow*, _cfg->_num_blocks); - memset( flows, 0, _cfg->_num_blocks*sizeof(OopFlow*) ); + OopFlow **flows = NEW_ARENA_ARRAY(A, OopFlow*, _cfg->number_of_blocks()); + memset( flows, 0, _cfg->number_of_blocks() * sizeof(OopFlow*) ); // Do the first block 'by hand' to prime the worklist - Block *entry = _cfg->_blocks[1]; + Block *entry = _cfg->get_block(1); OopFlow *rootflow = OopFlow::make(A,max_reg,this); // Initialize to 'bottom' (not 'top') memset( rootflow->_callees, OptoReg::Bad, max_reg*sizeof(short) ); @@ -623,7 +618,9 @@ Block *b = worklist.pop(); // Ignore root block - if( b == _cfg->_broot ) continue; + if (b == _cfg->get_root_block()) { + continue; + } // Block is already done? Happens if block has several predecessors, // he can get on the worklist more than once. if( flows[b->_pre_order] ) continue; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/bytecodeInfo.cpp --- a/src/share/vm/opto/bytecodeInfo.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/bytecodeInfo.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -123,7 +123,7 @@ // Allows targeted inlining if(callee_method->should_inline()) { *wci_result = *(WarmCallInfo::always_hot()); - if (PrintInlining && Verbose) { + if (C->print_inlining() && Verbose) { CompileTask::print_inline_indent(inline_level()); tty->print_cr("Inlined method is hot: "); } @@ -137,7 +137,7 @@ if(callee_method->interpreter_throwout_count() > InlineThrowCount && size < InlineThrowMaxSize ) { wci_result->set_profit(wci_result->profit() * 100); - if (PrintInlining && Verbose) { + if (C->print_inlining() && Verbose) { CompileTask::print_inline_indent(inline_level()); tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count()); } @@ -495,7 +495,7 @@ C->log()->inline_fail(inline_msg); } } - if (PrintInlining) { + if (C->print_inlining()) { C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg); if (callee_method == NULL) tty->print(" callee not monotonic or profiled"); if (Verbose && callee_method) { @@ -544,7 +544,7 @@ #ifndef PRODUCT if (UseOldInlining && InlineWarmCalls - && (PrintOpto || PrintOptoInlining || PrintInlining)) { + && (PrintOpto || C->print_inlining())) { bool cold = wci.is_cold(); bool hot = !cold && wci.is_hot(); bool old_cold = !success; @@ -621,7 +621,7 @@ callee_method->is_compiled_lambda_form()) { max_inline_level_adjust += 1; // don't count method handle calls from java.lang.invoke implem } - if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) { + if (max_inline_level_adjust != 0 && C->print_inlining() && (Verbose || WizardMode)) { CompileTask::print_inline_indent(inline_level()); tty->print_cr(" \\-> discounting inline depth"); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/c2_globals.hpp --- a/src/share/vm/opto/c2_globals.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/c2_globals.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -421,7 +421,7 @@ product(bool, UseDivMod, true, \ "Use combined DivMod instruction if available") \ \ - product(intx, MinJumpTableSize, 18, \ + product_pd(intx, MinJumpTableSize, \ "Minimum number of targets in a generated jump table") \ \ product(intx, MaxJumpTableSize, 65000, \ @@ -448,6 +448,9 @@ product(bool, EliminateAutoBox, true, \ "Control optimizations for autobox elimination") \ \ + experimental(bool, UseImplicitStableValues, false, \ + "Mark well-known stable fields as such (e.g. String.value)") \ + \ product(intx, AutoBoxCacheMax, 128, \ "Sets max value cached by the java.lang.Integer autobox cache") \ \ @@ -633,7 +636,9 @@ \ diagnostic(bool, OptimizeExpensiveOps, true, \ "Find best control for expensive operations") \ - + \ + product(bool, UseMathExactIntrinsics, true, \ + "Enables intrinsification of various java.lang.Math funcitons") C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG) diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/callGenerator.hpp --- a/src/share/vm/opto/callGenerator.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/callGenerator.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,6 +65,8 @@ virtual bool is_predicted() const { return false; } // is_trap: Does not return to the caller. (E.g., uncommon trap.) virtual bool is_trap() const { return false; } + // does_virtual_dispatch: Should try inlining as normal method first. + virtual bool does_virtual_dispatch() const { return false; } // is_late_inline: supports conversion of call into an inline virtual bool is_late_inline() const { return false; } @@ -159,8 +161,9 @@ virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); } static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) { - if (PrintInlining) + if (C->print_inlining()) { C->print_inlining(callee, inline_level, bci, msg); + } } }; @@ -260,7 +263,7 @@ // Because WarmInfo objects live over the entire lifetime of the // Compile object, they are allocated into the comp_arena, which // does not get resource marked or reset during the compile process - void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); } + void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } void operator delete( void * ) { } // fast deallocation static WarmCallInfo* always_hot(); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/callnode.cpp --- a/src/share/vm/opto/callnode.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/callnode.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -458,7 +458,7 @@ st->print("={"); uint nf = spobj->n_fields(); if (nf > 0) { - uint first_ind = spobj->first_index(); + uint first_ind = spobj->first_index(mcall->jvms()); Node* fld_node = mcall->in(first_ind); ciField* cifield; if (iklass != NULL) { @@ -1063,7 +1063,6 @@ int scloff = jvms->scloff(); int endoff = jvms->endoff(); assert(endoff == (int)req(), "no other states or debug info after me"); - assert(jvms->scl_size() == 0, "parsed code should not have scalar objects"); Node* top = Compile::current()->top(); for (uint i = 0; i < grow_by; i++) { ins_req(monoff, top); @@ -1079,32 +1078,31 @@ const int MonitorEdges = 2; assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); assert(req() == jvms()->endoff(), "correct sizing"); - assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects"); int nextmon = jvms()->scloff(); if (GenerateSynchronizationCode) { - add_req(lock->box_node()); - add_req(lock->obj_node()); + ins_req(nextmon, lock->box_node()); + ins_req(nextmon+1, lock->obj_node()); } else { Node* top = Compile::current()->top(); - add_req(top); - add_req(top); + ins_req(nextmon, top); + ins_req(nextmon, top); } - jvms()->set_scloff(nextmon+MonitorEdges); + jvms()->set_scloff(nextmon + MonitorEdges); jvms()->set_endoff(req()); } void SafePointNode::pop_monitor() { // Delete last monitor from debug info - assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects"); debug_only(int num_before_pop = jvms()->nof_monitors()); - const int MonitorEdges = (1<scloff(); int endoff = jvms()->endoff(); int new_scloff = scloff - MonitorEdges; int new_endoff = endoff - MonitorEdges; jvms()->set_scloff(new_scloff); jvms()->set_endoff(new_endoff); - while (scloff > new_scloff) del_req(--scloff); + while (scloff > new_scloff) del_req_ordered(--scloff); assert(jvms()->nof_monitors() == num_before_pop-1, ""); } @@ -1169,13 +1167,12 @@ } SafePointScalarObjectNode* -SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const { +SafePointScalarObjectNode::clone(Dict* sosn_map) const { void* cached = (*sosn_map)[(void*)this]; if (cached != NULL) { return (SafePointScalarObjectNode*)cached; } SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone(); - res->_first_index += jvms_adj; sosn_map->Insert((void*)this, (void*)res); return res; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/callnode.hpp --- a/src/share/vm/opto/callnode.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/callnode.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -216,7 +216,7 @@ // Because JVMState objects live over the entire lifetime of the // Compile object, they are allocated into the comp_arena, which // does not get resource marked or reset during the compile process - void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); } + void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } void operator delete( void * ) { } // fast deallocation // Create a new JVMState, ready for abstract interpretation. @@ -449,14 +449,17 @@ // at a safepoint. class SafePointScalarObjectNode: public TypeNode { - uint _first_index; // First input edge index of a SafePoint node where + uint _first_index; // First input edge relative index of a SafePoint node where // states of the scalarized object fields are collected. + // It is relative to the last (youngest) jvms->_scloff. uint _n_fields; // Number of non-static fields of the scalarized object. DEBUG_ONLY(AllocateNode* _alloc;) virtual uint hash() const ; // { return NO_HASH; } virtual uint cmp( const Node &n ) const; + uint first_index() const { return _first_index; } + public: SafePointScalarObjectNode(const TypeOopPtr* tp, #ifdef ASSERT @@ -469,7 +472,10 @@ virtual const RegMask &out_RegMask() const; virtual uint match_edge(uint idx) const; - uint first_index() const { return _first_index; } + uint first_index(JVMState* jvms) const { + assert(jvms != NULL, "missed JVMS"); + return jvms->scloff() + _first_index; + } uint n_fields() const { return _n_fields; } #ifdef ASSERT @@ -485,7 +491,7 @@ // corresponds appropriately to "this" in "new_call". Assumes that // "sosn_map" is a map, specific to the translation of "s" to "new_call", // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. - SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const; + SafePointScalarObjectNode* clone(Dict* sosn_map) const; #ifndef PRODUCT virtual void dump_spec(outputStream *st) const; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/cfgnode.cpp --- a/src/share/vm/opto/cfgnode.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/cfgnode.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1932,7 +1932,7 @@ #ifdef _LP64 // Push DecodeN/DecodeNKlass down through phi. // The rest of phi graph will transform by split EncodeP node though phis up. - if ((UseCompressedOops || UseCompressedKlassPointers) && can_reshape && progress == NULL) { + if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == NULL) { bool may_push = true; bool has_decodeN = false; bool is_decodeN = false; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/chaitin.cpp --- a/src/share/vm/opto/chaitin.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/chaitin.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -40,10 +40,8 @@ #include "opto/opcodes.hpp" #include "opto/rootnode.hpp" -//============================================================================= - #ifndef PRODUCT -void LRG::dump( ) const { +void LRG::dump() const { ttyLocker ttyl; tty->print("%d ",num_regs()); _mask.dump(); @@ -94,7 +92,6 @@ } #endif -//------------------------------score------------------------------------------ // Compute score from cost and area. Low score is best to spill. static double raw_score( double cost, double area ) { return cost - (area*RegisterCostAreaRatio) * 1.52588e-5; @@ -125,41 +122,23 @@ return score; } -//------------------------------LRG_List--------------------------------------- -LRG_List::LRG_List( uint max ) : _cnt(max), _max(max), _lidxs(NEW_RESOURCE_ARRAY(uint,max)) { - memset( _lidxs, 0, sizeof(uint)*max ); -} - -void LRG_List::extend( uint nidx, uint lidx ) { - _nesting.check(); - if( nidx >= _max ) { - uint size = 16; - while( size <= nidx ) size <<=1; - _lidxs = REALLOC_RESOURCE_ARRAY( uint, _lidxs, _max, size ); - _max = size; - } - while( _cnt <= nidx ) - _lidxs[_cnt++] = 0; - _lidxs[nidx] = lidx; -} - #define NUMBUCKS 3 // Straight out of Tarjan's union-find algorithm uint LiveRangeMap::find_compress(uint lrg) { uint cur = lrg; - uint next = _uf_map[cur]; + uint next = _uf_map.at(cur); while (next != cur) { // Scan chain of equivalences assert( next < cur, "always union smaller"); cur = next; // until find a fixed-point - next = _uf_map[cur]; + next = _uf_map.at(cur); } // Core of union-find algorithm: update chain of // equivalences to be equal to the root. while (lrg != next) { - uint tmp = _uf_map[lrg]; - _uf_map.map(lrg, next); + uint tmp = _uf_map.at(lrg); + _uf_map.at_put(lrg, next); lrg = tmp; } return lrg; @@ -169,10 +148,10 @@ void LiveRangeMap::reset_uf_map(uint max_lrg_id) { _max_lrg_id= max_lrg_id; // Force the Union-Find mapping to be at least this large - _uf_map.extend(_max_lrg_id, 0); + _uf_map.at_put_grow(_max_lrg_id, 0); // Initialize it to be the ID mapping. for (uint i = 0; i < _max_lrg_id; ++i) { - _uf_map.map(i, i); + _uf_map.at_put(i, i); } } @@ -180,12 +159,12 @@ // the Union-Find mapping after this call. void LiveRangeMap::compress_uf_map_for_nodes() { // For all Nodes, compress mapping - uint unique = _names.Size(); + uint unique = _names.length(); for (uint i = 0; i < unique; ++i) { - uint lrg = _names[i]; + uint lrg = _names.at(i); uint compressed_lrg = find(lrg); if (lrg != compressed_lrg) { - _names.map(i, compressed_lrg); + _names.at_put(i, compressed_lrg); } } } @@ -202,16 +181,15 @@ return lrg; } - uint next = _uf_map[lrg]; + uint next = _uf_map.at(lrg); while (next != lrg) { // Scan chain of equivalences assert(next < lrg, "always union smaller"); lrg = next; // until find a fixed-point - next = _uf_map[lrg]; + next = _uf_map.at(lrg); } return next; } -//------------------------------Chaitin---------------------------------------- PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher) : PhaseRegAlloc(unique, cfg, matcher, #ifndef PRODUCT @@ -220,7 +198,7 @@ NULL #endif ) - , _lrg_map(unique) + , _lrg_map(Thread::current()->resource_area(), unique) , _live(0) , _spilled_once(Thread::current()->resource_area()) , _spilled_twice(Thread::current()->resource_area()) @@ -232,31 +210,31 @@ { NOT_PRODUCT( Compile::TracePhase t3("ctorChaitin", &_t_ctorChaitin, TimeCompiler); ) - _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg._outer_loop_freq); + _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency()); // Build a list of basic blocks, sorted by frequency - _blks = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks ); + _blks = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks()); // Experiment with sorting strategies to speed compilation double cutoff = BLOCK_FREQUENCY(1.0); // Cutoff for high frequency bucket Block **buckets[NUMBUCKS]; // Array of buckets uint buckcnt[NUMBUCKS]; // Array of bucket counters double buckval[NUMBUCKS]; // Array of bucket value cutoffs for (uint i = 0; i < NUMBUCKS; i++) { - buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg._num_blocks); + buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks()); buckcnt[i] = 0; // Bump by three orders of magnitude each time cutoff *= 0.001; buckval[i] = cutoff; - for (uint j = 0; j < _cfg._num_blocks; j++) { + for (uint j = 0; j < _cfg.number_of_blocks(); j++) { buckets[i][j] = NULL; } } // Sort blocks into buckets - for (uint i = 0; i < _cfg._num_blocks; i++) { + for (uint i = 0; i < _cfg.number_of_blocks(); i++) { for (uint j = 0; j < NUMBUCKS; j++) { - if ((j == NUMBUCKS - 1) || (_cfg._blocks[i]->_freq > buckval[j])) { + if ((j == NUMBUCKS - 1) || (_cfg.get_block(i)->_freq > buckval[j])) { // Assign block to end of list for appropriate bucket - buckets[j][buckcnt[j]++] = _cfg._blocks[i]; + buckets[j][buckcnt[j]++] = _cfg.get_block(i); break; // kick out of inner loop } } @@ -269,10 +247,9 @@ } } - assert(blkcnt == _cfg._num_blocks, "Block array not totally filled"); + assert(blkcnt == _cfg.number_of_blocks(), "Block array not totally filled"); } -//------------------------------Union------------------------------------------ // union 2 sets together. void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) { uint src = _lrg_map.find(src_n); @@ -285,7 +262,6 @@ _lrg_map.uf_map(dst, src); } -//------------------------------new_lrg---------------------------------------- void PhaseChaitin::new_lrg(const Node *x, uint lrg) { // Make the Node->LRG mapping _lrg_map.extend(x->_idx,lrg); @@ -294,24 +270,28 @@ } -bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) { - Block* bcon = _cfg.get_block_for_node(con); - uint cindex = bcon->find_node(con); - Node *con_next = bcon->_nodes[cindex+1]; - if (con_next->in(0) != con || !con_next->is_MachProj()) { - return false; // No MachProj's follow +int PhaseChaitin::clone_projs(Block* b, uint idx, Node* orig, Node* copy, uint& max_lrg_id) { + assert(b->find_node(copy) == (idx - 1), "incorrect insert index for copy kill projections"); + DEBUG_ONLY( Block* borig = _cfg.get_block_for_node(orig); ) + int found_projs = 0; + uint cnt = orig->outcnt(); + for (uint i = 0; i < cnt; i++) { + Node* proj = orig->raw_out(i); + if (proj->is_MachProj()) { + assert(proj->outcnt() == 0, "only kill projections are expected here"); + assert(_cfg.get_block_for_node(proj) == borig, "incorrect block for kill projections"); + found_projs++; + // Copy kill projections after the cloned node + Node* kills = proj->clone(); + kills->set_req(0, copy); + b->insert_node(kills, idx++); + _cfg.map_node_to_block(kills, b); + new_lrg(kills, max_lrg_id++); + } } - - // Copy kills after the cloned constant - Node *kills = con_next->clone(); - kills->set_req(0, copy); - b->_nodes.insert(idx, kills); - _cfg.map_node_to_block(kills, b); - new_lrg(kills, max_lrg_id); - return true; + return found_projs; } -//------------------------------compact---------------------------------------- // Renumber the live ranges to compact them. Makes the IFG smaller. void PhaseChaitin::compact() { // Current the _uf_map contains a series of short chains which are headed @@ -677,76 +657,79 @@ C->set_indexSet_arena(NULL); // ResourceArea is at end of scope } -//------------------------------de_ssa----------------------------------------- void PhaseChaitin::de_ssa() { // Set initial Names for all Nodes. Most Nodes get the virtual register // number. A few get the ZERO live range number. These do not // get allocated, but instead rely on correct scheduling to ensure that // only one instance is simultaneously live at a time. uint lr_counter = 1; - for( uint i = 0; i < _cfg._num_blocks; i++ ) { - Block *b = _cfg._blocks[i]; - uint cnt = b->_nodes.size(); + for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) { + Block* block = _cfg.get_block(i); + uint cnt = block->number_of_nodes(); // Handle all the normal Nodes in the block for( uint j = 0; j < cnt; j++ ) { - Node *n = b->_nodes[j]; + Node *n = block->get_node(j); // Pre-color to the zero live range, or pick virtual register const RegMask &rm = n->out_RegMask(); _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0); } } + // Reset the Union-Find mapping to be identity _lrg_map.reset_uf_map(lr_counter); } -//------------------------------gather_lrg_masks------------------------------- // Gather LiveRanGe information, including register masks. Modification of // cisc spillable in_RegMasks should not be done before AggressiveCoalesce. void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { // Nail down the frame pointer live range - uint fp_lrg = _lrg_map.live_range_id(_cfg._root->in(1)->in(TypeFunc::FramePtr)); + uint fp_lrg = _lrg_map.live_range_id(_cfg.get_root_node()->in(1)->in(TypeFunc::FramePtr)); lrgs(fp_lrg)._cost += 1e12; // Cost is infinite // For all blocks - for( uint i = 0; i < _cfg._num_blocks; i++ ) { - Block *b = _cfg._blocks[i]; + for (uint i = 0; i < _cfg.number_of_blocks(); i++) { + Block* block = _cfg.get_block(i); // For all instructions - for( uint j = 1; j < b->_nodes.size(); j++ ) { - Node *n = b->_nodes[j]; + for (uint j = 1; j < block->number_of_nodes(); j++) { + Node* n = block->get_node(j); uint input_edge_start =1; // Skip control most nodes - if( n->is_Mach() ) input_edge_start = n->as_Mach()->oper_input_base(); + if (n->is_Mach()) { + input_edge_start = n->as_Mach()->oper_input_base(); + } uint idx = n->is_Copy(); // Get virtual register number, same as LiveRanGe index uint vreg = _lrg_map.live_range_id(n); - LRG &lrg = lrgs(vreg); - if( vreg ) { // No vreg means un-allocable (e.g. memory) + LRG& lrg = lrgs(vreg); + if (vreg) { // No vreg means un-allocable (e.g. memory) // Collect has-copy bit - if( idx ) { + if (idx) { lrg._has_copy = 1; uint clidx = _lrg_map.live_range_id(n->in(idx)); - LRG ©_src = lrgs(clidx); + LRG& copy_src = lrgs(clidx); copy_src._has_copy = 1; } // Check for float-vs-int live range (used in register-pressure // calculations) const Type *n_type = n->bottom_type(); - if (n_type->is_floatingpoint()) + if (n_type->is_floatingpoint()) { lrg._is_float = 1; + } // Check for twice prior spilling. Once prior spilling might have // spilled 'soft', 2nd prior spill should have spilled 'hard' and // further spilling is unlikely to make progress. - if( _spilled_once.test(n->_idx) ) { + if (_spilled_once.test(n->_idx)) { lrg._was_spilled1 = 1; - if( _spilled_twice.test(n->_idx) ) + if (_spilled_twice.test(n->_idx)) { lrg._was_spilled2 = 1; + } } #ifndef PRODUCT @@ -783,16 +766,18 @@ // Check for bound register masks const RegMask &lrgmask = lrg.mask(); - if (lrgmask.is_bound(ireg)) + if (lrgmask.is_bound(ireg)) { lrg._is_bound = 1; + } // Check for maximum frequency value - if (lrg._maxfreq < b->_freq) - lrg._maxfreq = b->_freq; + if (lrg._maxfreq < block->_freq) { + lrg._maxfreq = block->_freq; + } // Check for oop-iness, or long/double // Check for multi-kill projection - switch( ireg ) { + switch (ireg) { case MachProjNode::fat_proj: // Fat projections have size equal to number of registers killed lrg.set_num_regs(rm.Size()); @@ -962,7 +947,7 @@ // AggressiveCoalesce. This effectively pre-virtual-splits // around uncommon uses of common defs. const RegMask &rm = n->in_RegMask(k); - if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * b->_freq) { + if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * block->_freq) { // Since we are BEFORE aggressive coalesce, leave the register // mask untrimmed by the call. This encourages more coalescing. // Later, AFTER aggressive, this live range will have to spill @@ -1006,8 +991,9 @@ } // Check for maximum frequency value - if( lrg._maxfreq < b->_freq ) - lrg._maxfreq = b->_freq; + if (lrg._maxfreq < block->_freq) { + lrg._maxfreq = block->_freq; + } } // End for all allocated inputs } // end for all instructions @@ -1029,7 +1015,6 @@ } } -//------------------------------set_was_low------------------------------------ // Set the was-lo-degree bit. Conservative coalescing should not change the // colorability of the graph. If any live range was of low-degree before // coalescing, it should Simplify. This call sets the was-lo-degree bit. @@ -1066,7 +1051,6 @@ #define REGISTER_CONSTRAINED 16 -//------------------------------cache_lrg_info--------------------------------- // Compute cost/area ratio, in case we spill. Build the lo-degree list. void PhaseChaitin::cache_lrg_info( ) { @@ -1100,7 +1084,6 @@ } } -//------------------------------Pre-Simplify----------------------------------- // Simplify the IFG by removing LRGs of low degree that have NO copies void PhaseChaitin::Pre_Simplify( ) { @@ -1151,7 +1134,6 @@ // No more lo-degree no-copy live ranges to simplify } -//------------------------------Simplify--------------------------------------- // Simplify the IFG by removing LRGs of low degree. void PhaseChaitin::Simplify( ) { @@ -1288,7 +1270,6 @@ } -//------------------------------is_legal_reg----------------------------------- // Is 'reg' register legal for 'lrg'? static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) { if (reg >= chunk && reg < (chunk + RegMask::CHUNK_SIZE) && @@ -1315,7 +1296,6 @@ return false; } -//------------------------------bias_color------------------------------------- // Choose a color using the biasing heuristic OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) { @@ -1377,7 +1357,6 @@ return OptoReg::add( reg, chunk ); } -//------------------------------choose_color----------------------------------- // Choose a color in the current chunk OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) { assert( C->in_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP-1)), "must not allocate stack0 (inside preserve area)"); @@ -1399,7 +1378,6 @@ return lrg.mask().find_last_elem(); } -//------------------------------Select----------------------------------------- // Select colors by re-inserting LRGs back into the IFG. LRGs are re-inserted // in reverse order of removal. As long as nothing of hi-degree was yanked, // everything going back is guaranteed a color. Select that color. If some @@ -1574,8 +1552,6 @@ return spill_reg-LRG::SPILL_REG; // Return number of spills } - -//------------------------------copy_was_spilled------------------------------- // Copy 'was_spilled'-edness from the source Node to the dst Node. void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) { if( _spilled_once.test(src->_idx) ) { @@ -1588,14 +1564,12 @@ } } -//------------------------------set_was_spilled-------------------------------- // Set the 'spilled_once' or 'spilled_twice' flag on a node. void PhaseChaitin::set_was_spilled( Node *n ) { if( _spilled_once.test_set(n->_idx) ) _spilled_twice.set(n->_idx); } -//------------------------------fixup_spills----------------------------------- // Convert Ideal spill instructions into proper FramePtr + offset Loads and // Stores. Use-def chains are NOT preserved, but Node->LRG->reg maps are. void PhaseChaitin::fixup_spills() { @@ -1605,16 +1579,16 @@ NOT_PRODUCT( Compile::TracePhase t3("fixupSpills", &_t_fixupSpills, TimeCompiler); ) // Grab the Frame Pointer - Node *fp = _cfg._broot->head()->in(1)->in(TypeFunc::FramePtr); + Node *fp = _cfg.get_root_block()->head()->in(1)->in(TypeFunc::FramePtr); // For all blocks - for( uint i = 0; i < _cfg._num_blocks; i++ ) { - Block *b = _cfg._blocks[i]; + for (uint i = 0; i < _cfg.number_of_blocks(); i++) { + Block* block = _cfg.get_block(i); // For all instructions in block - uint last_inst = b->end_idx(); - for( uint j = 1; j <= last_inst; j++ ) { - Node *n = b->_nodes[j]; + uint last_inst = block->end_idx(); + for (uint j = 1; j <= last_inst; j++) { + Node* n = block->get_node(j); // Dead instruction??? assert( n->outcnt() != 0 ||// Nothing dead after post alloc @@ -1651,7 +1625,7 @@ assert( cisc->oper_input_base() == 2, "Only adding one edge"); cisc->ins_req(1,src); // Requires a memory edge } - b->_nodes.map(j,cisc); // Insert into basic block + block->map_node(cisc, j); // Insert into basic block n->subsume_by(cisc, C); // Correct graph // ++_used_cisc_instructions; @@ -1677,7 +1651,6 @@ } // End of for all blocks } -//------------------------------find_base_for_derived-------------------------- // Helper to stretch above; recursively discover the base Node for a // given derived Node. Easy for AddP-related machine nodes, but needs // to be recursive for derived Phis. @@ -1707,16 +1680,16 @@ // Initialize it once and make it shared: // set control to _root and place it into Start block // (where top() node is placed). - base->init_req(0, _cfg._root); + base->init_req(0, _cfg.get_root_node()); Block *startb = _cfg.get_block_for_node(C->top()); - startb->_nodes.insert(startb->find_node(C->top()), base ); + startb->insert_node(base, startb->find_node(C->top())); _cfg.map_node_to_block(base, startb); assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet"); } if (_lrg_map.live_range_id(base) == 0) { new_lrg(base, maxlrg++); } - assert(base->in(0) == _cfg._root && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared"); + assert(base->in(0) == _cfg.get_root_node() && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared"); derived_base_map[derived->_idx] = base; return base; } @@ -1754,9 +1727,9 @@ // Search the current block for an existing base-Phi Block *b = _cfg.get_block_for_node(derived); for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi - Node *phi = b->_nodes[i]; + Node *phi = b->get_node(i); if( !phi->is_Phi() ) { // Found end of Phis with no match? - b->_nodes.insert( i, base ); // Must insert created Phi here as base + b->insert_node(base, i); // Must insert created Phi here as base _cfg.map_node_to_block(base, b); new_lrg(base,maxlrg++); break; @@ -1779,8 +1752,6 @@ return base; } - -//------------------------------stretch_base_pointer_live_ranges--------------- // At each Safepoint, insert extra debug edges for each pair of derived value/ // base pointer that is live across the Safepoint for oopmap building. The // edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the @@ -1792,14 +1763,14 @@ memset( derived_base_map, 0, sizeof(Node*)*C->unique() ); // For all blocks in RPO do... - for( uint i=0; i<_cfg._num_blocks; i++ ) { - Block *b = _cfg._blocks[i]; + for (uint i = 0; i < _cfg.number_of_blocks(); i++) { + Block* block = _cfg.get_block(i); // Note use of deep-copy constructor. I cannot hammer the original // liveout bits, because they are needed by the following coalesce pass. - IndexSet liveout(_live->live(b)); + IndexSet liveout(_live->live(block)); - for( uint j = b->end_idx() + 1; j > 1; j-- ) { - Node *n = b->_nodes[j-1]; + for (uint j = block->end_idx() + 1; j > 1; j--) { + Node* n = block->get_node(j - 1); // Pre-split compares of loop-phis. Loop-phis form a cycle we would // like to see in the same register. Compare uses the loop-phi and so @@ -1814,7 +1785,7 @@ Node *phi = n->in(1); if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) { Block *phi_block = _cfg.get_block_for_node(phi); - if (_cfg.get_block_for_node(phi_block->pred(2)) == b) { + if (_cfg.get_block_for_node(phi_block->pred(2)) == block) { const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI]; Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask ); insert_proj( phi_block, 1, spill, maxlrg++ ); @@ -1868,7 +1839,7 @@ if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND (_lrg_map.live_range_id(base) > 0) && // not a constant - _cfg.get_block_for_node(base) != b) { // base not def'd in blk) + _cfg.get_block_for_node(base) != block) { // base not def'd in blk) // Base pointer is not currently live. Since I stretched // the base pointer to here and it crosses basic-block // boundaries, the global live info is now incorrect. @@ -1903,15 +1874,12 @@ return must_recompute_live != 0; } - -//------------------------------add_reference---------------------------------- // Extend the node to LRG mapping void PhaseChaitin::add_reference(const Node *node, const Node *old_node) { _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node)); } -//------------------------------dump------------------------------------------- #ifndef PRODUCT void PhaseChaitin::dump(const Node *n) const { uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0; @@ -1995,8 +1963,8 @@ b->dump_head(&_cfg); // For all instructions - for( uint j = 0; j < b->_nodes.size(); j++ ) - dump(b->_nodes[j]); + for( uint j = 0; j < b->number_of_nodes(); j++ ) + dump(b->get_node(j)); // Print live-out info at end of block if( _live ) { tty->print("Liveout: "); @@ -2017,8 +1985,9 @@ _matcher._new_SP, _framesize ); // For all blocks - for( uint i = 0; i < _cfg._num_blocks; i++ ) - dump(_cfg._blocks[i]); + for (uint i = 0; i < _cfg.number_of_blocks(); i++) { + dump(_cfg.get_block(i)); + } // End of per-block dump tty->print("\n"); @@ -2059,7 +2028,6 @@ tty->print_cr(""); } -//------------------------------dump_degree_lists------------------------------ void PhaseChaitin::dump_degree_lists() const { // Dump lo-degree list tty->print("Lo degree: "); @@ -2080,7 +2048,6 @@ tty->print_cr(""); } -//------------------------------dump_simplified-------------------------------- void PhaseChaitin::dump_simplified() const { tty->print("Simplified: "); for( uint i = _simplified; i; i = lrgs(i)._next ) @@ -2099,7 +2066,6 @@ return buf+strlen(buf); } -//------------------------------dump_register---------------------------------- // Dump a register name into a buffer. Be intelligent if we get called // before allocation is complete. char *PhaseChaitin::dump_register( const Node *n, char *buf ) const { @@ -2133,7 +2099,6 @@ return buf+strlen(buf); } -//----------------------dump_for_spill_split_recycle-------------------------- void PhaseChaitin::dump_for_spill_split_recycle() const { if( WizardMode && (PrintCompilation || PrintOpto) ) { // Display which live ranges need to be split and the allocator's state @@ -2149,7 +2114,6 @@ } } -//------------------------------dump_frame------------------------------------ void PhaseChaitin::dump_frame() const { const char *fp = OptoReg::regname(OptoReg::c_frame_pointer); const TypeTuple *domain = C->tf()->domain(); @@ -2255,17 +2219,16 @@ tty->print_cr("#"); } -//------------------------------dump_bb---------------------------------------- void PhaseChaitin::dump_bb( uint pre_order ) const { tty->print_cr("---dump of B%d---",pre_order); - for( uint i = 0; i < _cfg._num_blocks; i++ ) { - Block *b = _cfg._blocks[i]; - if( b->_pre_order == pre_order ) - dump(b); + for (uint i = 0; i < _cfg.number_of_blocks(); i++) { + Block* block = _cfg.get_block(i); + if (block->_pre_order == pre_order) { + dump(block); + } } } -//------------------------------dump_lrg--------------------------------------- void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const { tty->print_cr("---dump of L%d---",lidx); @@ -2287,17 +2250,17 @@ tty->cr(); } // For all blocks - for( uint i = 0; i < _cfg._num_blocks; i++ ) { - Block *b = _cfg._blocks[i]; + for (uint i = 0; i < _cfg.number_of_blocks(); i++) { + Block* block = _cfg.get_block(i); int dump_once = 0; // For all instructions - for( uint j = 0; j < b->_nodes.size(); j++ ) { - Node *n = b->_nodes[j]; + for( uint j = 0; j < block->number_of_nodes(); j++ ) { + Node *n = block->get_node(j); if (_lrg_map.find_const(n) == lidx) { if (!dump_once++) { tty->cr(); - b->dump_head(&_cfg); + block->dump_head(&_cfg); } dump(n); continue; @@ -2312,7 +2275,7 @@ if (_lrg_map.find_const(m) == lidx) { if (!dump_once++) { tty->cr(); - b->dump_head(&_cfg); + block->dump_head(&_cfg); } dump(n); } @@ -2324,7 +2287,6 @@ } #endif // not PRODUCT -//------------------------------print_chaitin_statistics------------------------------- int PhaseChaitin::_final_loads = 0; int PhaseChaitin::_final_stores = 0; int PhaseChaitin::_final_memoves= 0; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/chaitin.hpp --- a/src/share/vm/opto/chaitin.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/chaitin.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -283,8 +283,8 @@ // Straight out of Tarjan's union-find algorithm uint find_compress(const Node *node) { - uint lrg_id = find_compress(_names[node->_idx]); - _names.map(node->_idx, lrg_id); + uint lrg_id = find_compress(_names.at(node->_idx)); + _names.at_put(node->_idx, lrg_id); return lrg_id; } @@ -305,40 +305,40 @@ } uint size() const { - return _names.Size(); + return _names.length(); } uint live_range_id(uint idx) const { - return _names[idx]; + return _names.at(idx); } uint live_range_id(const Node *node) const { - return _names[node->_idx]; + return _names.at(node->_idx); } uint uf_live_range_id(uint lrg_id) const { - return _uf_map[lrg_id]; + return _uf_map.at(lrg_id); } void map(uint idx, uint lrg_id) { - _names.map(idx, lrg_id); + _names.at_put(idx, lrg_id); } void uf_map(uint dst_lrg_id, uint src_lrg_id) { - _uf_map.map(dst_lrg_id, src_lrg_id); + _uf_map.at_put(dst_lrg_id, src_lrg_id); } void extend(uint idx, uint lrg_id) { - _names.extend(idx, lrg_id); + _names.at_put_grow(idx, lrg_id); } void uf_extend(uint dst_lrg_id, uint src_lrg_id) { - _uf_map.extend(dst_lrg_id, src_lrg_id); + _uf_map.at_put_grow(dst_lrg_id, src_lrg_id); } - LiveRangeMap(uint unique) - : _names(unique) - , _uf_map(unique) + LiveRangeMap(Arena* arena, uint unique) + : _names(arena, unique, unique, 0) + , _uf_map(arena, unique, unique, 0) , _max_lrg_id(0) {} uint find_id( const Node *n ) { @@ -355,14 +355,14 @@ void compress_uf_map_for_nodes(); uint find(uint lidx) { - uint uf_lidx = _uf_map[lidx]; + uint uf_lidx = _uf_map.at(lidx); return (uf_lidx == lidx) ? uf_lidx : find_compress(lidx); } // Convert a Node into a Live Range Index - a lidx uint find(const Node *node) { uint lidx = live_range_id(node); - uint uf_lidx = _uf_map[lidx]; + uint uf_lidx = _uf_map.at(lidx); return (uf_lidx == lidx) ? uf_lidx : find_compress(node); } @@ -371,10 +371,10 @@ // Like Find above, but no path compress, so bad asymptotic behavior uint find_const(const Node *node) const { - if(node->_idx >= _names.Size()) { + if(node->_idx >= (uint)_names.length()) { return 0; // not mapped, usual for debug dump } - return find_const(_names[node->_idx]); + return find_const(_names.at(node->_idx)); } }; @@ -412,33 +412,22 @@ uint split_DEF( Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray splits, int slidx ); uint split_USE( Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray splits, int slidx ); - bool clone_projs(Block *b, uint idx, Node *con, Node *copy, LiveRangeMap &lrg_map) { - bool found_projs = clone_projs_shared(b, idx, con, copy, lrg_map.max_lrg_id()); - - if(found_projs) { - uint max_lrg_id = lrg_map.max_lrg_id(); - lrg_map.set_max_lrg_id(max_lrg_id + 1); - } - - return found_projs; - } - //------------------------------clone_projs------------------------------------ // After cloning some rematerialized instruction, clone any MachProj's that // follow it. Example: Intel zero is XOR, kills flags. Sparc FP constants // use G3 as an address temp. - bool clone_projs(Block *b, uint idx, Node *con, Node *copy, uint &max_lrg_id) { - bool found_projs = clone_projs_shared(b, idx, con, copy, max_lrg_id); + int clone_projs(Block* b, uint idx, Node* orig, Node* copy, uint& max_lrg_id); - if(found_projs) { - max_lrg_id++; + int clone_projs(Block* b, uint idx, Node* orig, Node* copy, LiveRangeMap& lrg_map) { + uint max_lrg_id = lrg_map.max_lrg_id(); + int found_projs = clone_projs(b, idx, orig, copy, max_lrg_id); + if (found_projs > 0) { + // max_lrg_id is updated during call above + lrg_map.set_max_lrg_id(max_lrg_id); } - return found_projs; } - bool clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id); - Node *split_Rematerialize(Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru); // True if lidx is used before any real register is def'd in the block diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/classes.cpp --- a/src/share/vm/opto/classes.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/classes.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -32,6 +32,7 @@ #include "opto/loopnode.hpp" #include "opto/machnode.hpp" #include "opto/memnode.hpp" +#include "opto/mathexactnode.hpp" #include "opto/mulnode.hpp" #include "opto/multnode.hpp" #include "opto/node.hpp" diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/classes.hpp --- a/src/share/vm/opto/classes.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/classes.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -29,6 +29,7 @@ macro(AbsF) macro(AbsI) macro(AddD) +macro(AddExactI) macro(AddF) macro(AddI) macro(AddL) @@ -133,6 +134,7 @@ macro(ExpD) macro(FastLock) macro(FastUnlock) +macro(FlagsProj) macro(Goto) macro(Halt) macro(If) @@ -167,6 +169,7 @@ macro(LoopLimit) macro(Mach) macro(MachProj) +macro(MathExact) macro(MaxI) macro(MemBarAcquire) macro(MemBarAcquireLock) diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/coalesce.cpp --- a/src/share/vm/opto/coalesce.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/coalesce.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -34,8 +34,6 @@ #include "opto/matcher.hpp" #include "opto/regmask.hpp" -//============================================================================= -//------------------------------Dump------------------------------------------- #ifndef PRODUCT void PhaseCoalesce::dump(Node *n) const { // Being a const function means I cannot use 'Find' @@ -43,12 +41,11 @@ tty->print("L%d/N%d ",r,n->_idx); } -//------------------------------dump------------------------------------------- void PhaseCoalesce::dump() const { // I know I have a block layout now, so I can print blocks in a loop - for( uint i=0; i<_phc._cfg._num_blocks; i++ ) { + for( uint i=0; i<_phc._cfg.number_of_blocks(); i++ ) { uint j; - Block *b = _phc._cfg._blocks[i]; + Block* b = _phc._cfg.get_block(i); // Print a nice block header tty->print("B%d: ",b->_pre_order); for( j=1; jnum_preds(); j++ ) @@ -57,9 +54,9 @@ for( j=0; j_num_succs; j++ ) tty->print("B%d ",b->_succs[j]->_pre_order); tty->print(" IDom: B%d/#%d\n", b->_idom ? b->_idom->_pre_order : 0, b->_dom_depth); - uint cnt = b->_nodes.size(); + uint cnt = b->number_of_nodes(); for( j=0; j_nodes[j]; + Node *n = b->get_node(j); dump( n ); tty->print("\t%s\t",n->Name()); @@ -85,7 +82,6 @@ } #endif -//------------------------------combine_these_two------------------------------ // Combine the live ranges def'd by these 2 Nodes. N2 is an input to N1. void PhaseCoalesce::combine_these_two(Node *n1, Node *n2) { uint lr1 = _phc._lrg_map.find(n1); @@ -127,18 +123,15 @@ } } -//------------------------------coalesce_driver-------------------------------- // Copy coalescing -void PhaseCoalesce::coalesce_driver( ) { - +void PhaseCoalesce::coalesce_driver() { verify(); // Coalesce from high frequency to low - for( uint i=0; i<_phc._cfg._num_blocks; i++ ) - coalesce( _phc._blks[i] ); - + for (uint i = 0; i < _phc._cfg.number_of_blocks(); i++) { + coalesce(_phc._blks[i]); + } } -//------------------------------insert_copy_with_overlap----------------------- // I am inserting copies to come out of SSA form. In the general case, I am // doing a parallel renaming. I'm in the Named world now, so I can't do a // general parallel renaming. All the copies now use "names" (live-ranges) @@ -159,7 +152,7 @@ // after the last use. Last use is really first-use on a backwards scan. uint i = b->end_idx()-1; while(1) { - Node *n = b->_nodes[i]; + Node *n = b->get_node(i); // Check for end of virtual copies; this is also the end of the // parallel renaming effort. if (n->_idx < _unique) { @@ -181,7 +174,7 @@ // the last kill. Thus it is the first kill on a backwards scan. i = b->end_idx()-1; while (1) { - Node *n = b->_nodes[i]; + Node *n = b->get_node(i); // Check for end of virtual copies; this is also the end of the // parallel renaming effort. if (n->_idx < _unique) { @@ -207,16 +200,15 @@ tmp ->set_req(idx,copy->in(idx)); copy->set_req(idx,tmp); // Save source in temp early, before source is killed - b->_nodes.insert(kill_src_idx,tmp); + b->insert_node(tmp, kill_src_idx); _phc._cfg.map_node_to_block(tmp, b); last_use_idx++; } // Insert just after last use - b->_nodes.insert(last_use_idx+1,copy); + b->insert_node(copy, last_use_idx + 1); } -//------------------------------insert_copies---------------------------------- void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { // We do LRGs compressing and fix a liveout data only here since the other // place in Split() is guarded by the assert which we never hit. @@ -225,8 +217,8 @@ for (uint lrg = 1; lrg < _phc._lrg_map.max_lrg_id(); lrg++) { uint compressed_lrg = _phc._lrg_map.find(lrg); if (lrg != compressed_lrg) { - for (uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++) { - IndexSet *liveout = _phc._live->live(_phc._cfg._blocks[bidx]); + for (uint bidx = 0; bidx < _phc._cfg.number_of_blocks(); bidx++) { + IndexSet *liveout = _phc._live->live(_phc._cfg.get_block(bidx)); if (liveout->member(lrg)) { liveout->remove(lrg); liveout->insert(compressed_lrg); @@ -239,14 +231,14 @@ // Nodes with index less than '_unique' are original, non-virtual Nodes. _unique = C->unique(); - for( uint i=0; i<_phc._cfg._num_blocks; i++ ) { + for (uint i = 0; i < _phc._cfg.number_of_blocks(); i++) { C->check_node_count(NodeLimitFudgeFactor, "out of nodes in coalesce"); if (C->failing()) return; - Block *b = _phc._cfg._blocks[i]; + Block *b = _phc._cfg.get_block(i); uint cnt = b->num_preds(); // Number of inputs to the Phi - for( uint l = 1; l_nodes.size(); l++ ) { - Node *n = b->_nodes[l]; + for( uint l = 1; lnumber_of_nodes(); l++ ) { + Node *n = b->get_node(l); // Do not use removed-copies, use copied value instead uint ncnt = n->req(); @@ -268,7 +260,7 @@ if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) { n->replace_by(def); n->set_req(cidx,NULL); - b->_nodes.remove(l); + b->remove_node(l); l--; continue; } @@ -329,15 +321,13 @@ m->as_Mach()->rematerialize()) { copy = m->clone(); // Insert the copy in the basic block, just before us - b->_nodes.insert(l++, copy); - if(_phc.clone_projs(b, l, m, copy, _phc._lrg_map)) { - l++; - } + b->insert_node(copy, l++); + l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map); } else { const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()]; copy = new (C) MachSpillCopyNode(m, *rm, *rm); // Insert the copy in the basic block, just before us - b->_nodes.insert(l++, copy); + b->insert_node(copy, l++); } // Insert the copy in the use-def chain n->set_req(idx, copy); @@ -349,7 +339,7 @@ } // End of is two-adr // Insert a copy at a debug use for a lrg which has high frequency - if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(&_phc._cfg)) { + if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || _phc._cfg.is_uncommon(b)) { // Walk the debug inputs to the node and check for lrg freq JVMState* jvms = n->jvms(); uint debug_start = jvms ? jvms->debug_start() : 999999; @@ -386,7 +376,7 @@ // Insert the copy in the use-def chain n->set_req(inpidx, copy ); // Insert the copy in the basic block, just before us - b->_nodes.insert( l++, copy ); + b->insert_node(copy, l++); // Extend ("register allocate") the names array for the copy. uint max_lrg_id = _phc._lrg_map.max_lrg_id(); _phc.new_lrg(copy, max_lrg_id); @@ -403,8 +393,7 @@ } // End of for all blocks } -//============================================================================= -//------------------------------coalesce--------------------------------------- + // Aggressive (but pessimistic) copy coalescing of a single block // The following coalesce pass represents a single round of aggressive @@ -442,8 +431,8 @@ } // Visit all the Phis in successor block - for( uint k = 1; k_nodes.size(); k++ ) { - Node *n = bs->_nodes[k]; + for( uint k = 1; knumber_of_nodes(); k++ ) { + Node *n = bs->get_node(k); if( !n->is_Phi() ) break; combine_these_two( n, n->in(j) ); } @@ -453,7 +442,7 @@ // Check _this_ block for 2-address instructions and copies. uint cnt = b->end_idx(); for( i = 1; i_nodes[i]; + Node *n = b->get_node(i); uint idx; // 2-address instructions have a virtual Copy matching their input // to their output @@ -464,20 +453,16 @@ } // End of for all instructions in block } -//============================================================================= -//------------------------------PhaseConservativeCoalesce---------------------- PhaseConservativeCoalesce::PhaseConservativeCoalesce(PhaseChaitin &chaitin) : PhaseCoalesce(chaitin) { _ulr.initialize(_phc._lrg_map.max_lrg_id()); } -//------------------------------verify----------------------------------------- void PhaseConservativeCoalesce::verify() { #ifdef ASSERT _phc.set_was_low(); #endif } -//------------------------------union_helper----------------------------------- void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, uint lr1, uint lr2, Node *src_def, Node *dst_copy, Node *src_copy, Block *b, uint bindex ) { // Join live ranges. Merge larger into smaller. Union lr2 into lr1 in the // union-find tree @@ -505,10 +490,10 @@ dst_copy->set_req( didx, src_def ); // Add copy to free list // _phc.free_spillcopy(b->_nodes[bindex]); - assert( b->_nodes[bindex] == dst_copy, "" ); + assert( b->get_node(bindex) == dst_copy, "" ); dst_copy->replace_by( dst_copy->in(didx) ); dst_copy->set_req( didx, NULL); - b->_nodes.remove(bindex); + b->remove_node(bindex); if( bindex < b->_ihrp_index ) b->_ihrp_index--; if( bindex < b->_fhrp_index ) b->_fhrp_index--; @@ -520,7 +505,6 @@ } } -//------------------------------compute_separating_interferences--------------- // Factored code from copy_copy that computes extra interferences from // lengthening a live range by double-coalescing. uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy, Node *src_copy, Block *b, uint bindex, RegMask &rm, uint reg_degree, uint rm_size, uint lr1, uint lr2 ) { @@ -539,8 +523,8 @@ bindex2 = b2->end_idx()-1; } // Get prior instruction - assert(bindex2 < b2->_nodes.size(), "index out of bounds"); - Node *x = b2->_nodes[bindex2]; + assert(bindex2 < b2->number_of_nodes(), "index out of bounds"); + Node *x = b2->get_node(bindex2); if( x == prev_copy ) { // Previous copy in copy chain? if( prev_copy == src_copy)// Found end of chain and all interferences break; // So break out of loop @@ -586,7 +570,6 @@ return reg_degree; } -//------------------------------update_ifg------------------------------------- void PhaseConservativeCoalesce::update_ifg(uint lr1, uint lr2, IndexSet *n_lr1, IndexSet *n_lr2) { // Some original neighbors of lr1 might have gone away // because the constrained register mask prevented them. @@ -616,7 +599,6 @@ lrgs(neighbor).inc_degree( lrg1.compute_degree(lrgs(neighbor)) ); } -//------------------------------record_bias------------------------------------ static void record_bias( const PhaseIFG *ifg, int lr1, int lr2 ) { // Tag copy bias here if( !ifg->lrgs(lr1)._copy_bias ) @@ -625,7 +607,6 @@ ifg->lrgs(lr2)._copy_bias = lr1; } -//------------------------------copy_copy-------------------------------------- // See if I can coalesce a series of multiple copies together. I need the // final dest copy and the original src copy. They can be the same Node. // Compute the compatible register masks. @@ -785,18 +766,17 @@ return true; } -//------------------------------coalesce--------------------------------------- // Conservative (but pessimistic) copy coalescing of a single block void PhaseConservativeCoalesce::coalesce( Block *b ) { // Bail out on infrequent blocks - if (b->is_uncommon(&_phc._cfg)) { + if (_phc._cfg.is_uncommon(b)) { return; } // Check this block for copies. for( uint i = 1; iend_idx(); i++ ) { // Check for actual copies on inputs. Coalesce a copy into its // input if use and copy's input are compatible. - Node *copy1 = b->_nodes[i]; + Node *copy1 = b->get_node(i); uint idx1 = copy1->is_Copy(); if( !idx1 ) continue; // Not a copy diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/coalesce.hpp --- a/src/share/vm/opto/coalesce.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/coalesce.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -29,7 +29,6 @@ class LoopTree; class LRG; -class LRG_List; class Matcher; class PhaseIFG; class PhaseCFG; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/compile.cpp --- a/src/share/vm/opto/compile.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/compile.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -654,7 +654,7 @@ _inlining_progress(false), _inlining_incrementally(false), _print_inlining_list(NULL), - _print_inlining(0) { + _print_inlining_idx(0) { C = this; CompileWrapper cw(this); @@ -679,6 +679,8 @@ set_print_assembly(print_opto_assembly); set_parsed_irreducible_loop(false); #endif + set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining)); + set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics")); if (ProfileTraps) { // Make sure the method being compiled gets its own MDO, @@ -710,7 +712,7 @@ PhaseGVN gvn(node_arena(), estimated_size); set_initial_gvn(&gvn); - if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) { + if (print_inlining() || print_intrinsics()) { _print_inlining_list = new (comp_arena())GrowableArray(comp_arena(), 1, 1, PrintInliningBuffer()); } { // Scope for timing the parser @@ -937,7 +939,7 @@ _inlining_progress(false), _inlining_incrementally(false), _print_inlining_list(NULL), - _print_inlining(0) { + _print_inlining_idx(0) { C = this; #ifndef PRODUCT @@ -1297,6 +1299,10 @@ // Array pointers need some flattening const TypeAryPtr *ta = tj->isa_aryptr(); + if (ta && ta->is_stable()) { + // Erase stability property for alias analysis. + tj = ta = ta->cast_to_stable(false); + } if( ta && is_known_inst ) { if ( offset != Type::OffsetBot && offset > arrayOopDesc::length_offset_in_bytes() ) { @@ -1497,6 +1503,7 @@ _index = i; _adr_type = at; _field = NULL; + _element = NULL; _is_rewritable = true; // default const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL; if (atoop != NULL && atoop->is_known_instance()) { @@ -1615,6 +1622,16 @@ && flat->is_instptr()->klass() == env()->Class_klass()) alias_type(idx)->set_rewritable(false); } + if (flat->isa_aryptr()) { +#ifdef ASSERT + const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE); + // (T_BYTE has the weakest alignment and size restrictions...) + assert(flat->offset() < header_size_min, "array body reference must be OffsetBot"); +#endif + if (flat->offset() == TypePtr::OffsetBot) { + alias_type(idx)->set_element(flat->is_aryptr()->elem()); + } + } if (flat->isa_klassptr()) { if (flat->offset() == in_bytes(Klass::super_check_offset_offset())) alias_type(idx)->set_rewritable(false); @@ -1677,7 +1694,7 @@ else t = TypeOopPtr::make_from_klass_raw(field->holder()); AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field); - assert(field->is_final() == !atp->is_rewritable(), "must get the rewritable bits correct"); + assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct"); return atp; } @@ -2136,7 +2153,9 @@ //------------------------------Code_Gen--------------------------------------- // Given a graph, generate code for it void Compile::Code_Gen() { - if (failing()) return; + if (failing()) { + return; + } // Perform instruction selection. You might think we could reclaim Matcher // memory PDQ, but actually the Matcher is used in generating spill code. @@ -2148,12 +2167,11 @@ // nodes. Mapping is only valid at the root of each matched subtree. NOT_PRODUCT( verify_graph_edges(); ) - Node_List proj_list; - Matcher m(proj_list); - _matcher = &m; + Matcher matcher; + _matcher = &matcher; { TracePhase t2("matcher", &_t_matcher, true); - m.match(); + matcher.match(); } // In debug mode can dump m._nodes.dump() for mapping of ideal to machine // nodes. Mapping is only valid at the root of each matched subtree. @@ -2161,31 +2179,26 @@ // If you have too many nodes, or if matching has failed, bail out check_node_count(0, "out of nodes matching instructions"); - if (failing()) return; + if (failing()) { + return; + } // Build a proper-looking CFG - PhaseCFG cfg(node_arena(), root(), m); + PhaseCFG cfg(node_arena(), root(), matcher); _cfg = &cfg; { NOT_PRODUCT( TracePhase t2("scheduler", &_t_scheduler, TimeCompiler); ) - cfg.Dominators(); - if (failing()) return; - - NOT_PRODUCT( verify_graph_edges(); ) - - cfg.Estimate_Block_Frequency(); - cfg.GlobalCodeMotion(m,unique(),proj_list); - if (failing()) return; + bool success = cfg.do_global_code_motion(); + if (!success) { + return; + } print_method(PHASE_GLOBAL_CODE_MOTION, 2); - NOT_PRODUCT( verify_graph_edges(); ) - debug_only( cfg.verify(); ) } - NOT_PRODUCT( verify_graph_edges(); ) - - PhaseChaitin regalloc(unique(), cfg, m); + + PhaseChaitin regalloc(unique(), cfg, matcher); _regalloc = ®alloc; { TracePhase t2("regalloc", &_t_registerAllocation, true); @@ -2206,7 +2219,7 @@ // can now safely remove it. { NOT_PRODUCT( TracePhase t2("blockOrdering", &_t_blockOrdering, TimeCompiler); ) - cfg.remove_empty(); + cfg.remove_empty_blocks(); if (do_freq_based_layout()) { PhaseBlockLayout layout(cfg); } else { @@ -2253,38 +2266,50 @@ _regalloc->dump_frame(); Node *n = NULL; - for( uint i=0; i<_cfg->_num_blocks; i++ ) { - if (VMThread::should_terminate()) { cut_short = true; break; } - Block *b = _cfg->_blocks[i]; - if (b->is_connector() && !Verbose) continue; - n = b->_nodes[0]; - if (pcs && n->_idx < pc_limit) + for (uint i = 0; i < _cfg->number_of_blocks(); i++) { + if (VMThread::should_terminate()) { + cut_short = true; + break; + } + Block* block = _cfg->get_block(i); + if (block->is_connector() && !Verbose) { + continue; + } + n = block->head(); + if (pcs && n->_idx < pc_limit) { tty->print("%3.3x ", pcs[n->_idx]); - else + } else { tty->print(" "); - b->dump_head(_cfg); - if (b->is_connector()) { + } + block->dump_head(_cfg); + if (block->is_connector()) { tty->print_cr(" # Empty connector block"); - } else if (b->num_preds() == 2 && b->pred(1)->is_CatchProj() && b->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) { + } else if (block->num_preds() == 2 && block->pred(1)->is_CatchProj() && block->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) { tty->print_cr(" # Block is sole successor of call"); } // For all instructions Node *delay = NULL; - for( uint j = 0; j_nodes.size(); j++ ) { - if (VMThread::should_terminate()) { cut_short = true; break; } - n = b->_nodes[j]; + for (uint j = 0; j < block->number_of_nodes(); j++) { + if (VMThread::should_terminate()) { + cut_short = true; + break; + } + n = block->get_node(j); if (valid_bundle_info(n)) { - Bundle *bundle = node_bundling(n); + Bundle* bundle = node_bundling(n); if (bundle->used_in_unconditional_delay()) { delay = n; continue; } - if (bundle->starts_bundle()) + if (bundle->starts_bundle()) { starts_bundle = '+'; + } } - if (WizardMode) n->dump(); + if (WizardMode) { + n->dump(); + } if( !n->is_Region() && // Dont print in the Assembly !n->is_Phi() && // a few noisely useless nodes @@ -2623,7 +2648,7 @@ addp->in(AddPNode::Base) == n->in(AddPNode::Base), "Base pointers must match" ); #ifdef _LP64 - if ((UseCompressedOops || UseCompressedKlassPointers) && + if ((UseCompressedOops || UseCompressedClassPointers) && addp->Opcode() == Op_ConP && addp == n->in(AddPNode::Base) && n->in(AddPNode::Offset)->is_Con()) { @@ -3010,7 +3035,7 @@ // Skip next transformation if compressed oops are not used. if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) || - (!UseCompressedOops && !UseCompressedKlassPointers)) + (!UseCompressedOops && !UseCompressedClassPointers)) return; // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges. @@ -3588,7 +3613,7 @@ } void Compile::dump_inlining() { - if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) { + if (print_inlining() || print_intrinsics()) { // Print inlining message for candidates that we couldn't inline // for lack of space or non constant receiver for (int i = 0; i < _late_inlines.length(); i++) { @@ -3612,7 +3637,7 @@ } } for (int i = 0; i < _print_inlining_list->length(); i++) { - tty->print(_print_inlining_list->at(i).ss()->as_string()); + tty->print(_print_inlining_list->adr_at(i)->ss()->as_string()); } } } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/compile.hpp --- a/src/share/vm/opto/compile.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/compile.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -72,6 +72,7 @@ class StartNode; class SafePointNode; class JVMState; +class Type; class TypeData; class TypePtr; class TypeOopPtr; @@ -119,6 +120,7 @@ int _index; // unique index, used with MergeMemNode const TypePtr* _adr_type; // normalized address type ciField* _field; // relevant instance field, or null if none + const Type* _element; // relevant array element type, or null if none bool _is_rewritable; // false if the memory is write-once only int _general_index; // if this is type is an instance, the general // type that this is an instance of @@ -129,6 +131,7 @@ int index() const { return _index; } const TypePtr* adr_type() const { return _adr_type; } ciField* field() const { return _field; } + const Type* element() const { return _element; } bool is_rewritable() const { return _is_rewritable; } bool is_volatile() const { return (_field ? _field->is_volatile() : false); } int general_index() const { return (_general_index != 0) ? _general_index : _index; } @@ -137,7 +140,14 @@ void set_field(ciField* f) { assert(!_field,""); _field = f; - if (f->is_final()) _is_rewritable = false; + if (f->is_final() || f->is_stable()) { + // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops. + _is_rewritable = false; + } + } + void set_element(const Type* e) { + assert(_element == NULL, ""); + _element = e; } void print_on(outputStream* st) PRODUCT_RETURN; @@ -302,6 +312,8 @@ bool _do_method_data_update; // True if we generate code to update MethodData*s int _AliasLevel; // Locally-adjusted version of AliasLevel flag. bool _print_assembly; // True if we should dump assembly code for this compilation + bool _print_inlining; // True if we should print inlining for this compilation + bool _print_intrinsics; // True if we should print intrinsics for this compilation #ifndef PRODUCT bool _trace_opto_output; bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing @@ -404,7 +416,7 @@ }; GrowableArray* _print_inlining_list; - int _print_inlining; + int _print_inlining_idx; // Only keep nodes in the expensive node list that need to be optimized void cleanup_expensive_nodes(PhaseIterGVN &igvn); @@ -416,24 +428,24 @@ public: outputStream* print_inlining_stream() const { - return _print_inlining_list->at(_print_inlining).ss(); + return _print_inlining_list->adr_at(_print_inlining_idx)->ss(); } void print_inlining_skip(CallGenerator* cg) { - if (PrintInlining) { - _print_inlining_list->at(_print_inlining).set_cg(cg); - _print_inlining++; - _print_inlining_list->insert_before(_print_inlining, PrintInliningBuffer()); + if (_print_inlining) { + _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg); + _print_inlining_idx++; + _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer()); } } void print_inlining_insert(CallGenerator* cg) { - if (PrintInlining) { + if (_print_inlining) { for (int i = 0; i < _print_inlining_list->length(); i++) { - if (_print_inlining_list->at(i).cg() == cg) { + if (_print_inlining_list->adr_at(i)->cg() == cg) { _print_inlining_list->insert_before(i+1, PrintInliningBuffer()); - _print_inlining = i+1; - _print_inlining_list->at(i).set_cg(NULL); + _print_inlining_idx = i+1; + _print_inlining_list->adr_at(i)->set_cg(NULL); return; } } @@ -562,6 +574,10 @@ int AliasLevel() const { return _AliasLevel; } bool print_assembly() const { return _print_assembly; } void set_print_assembly(bool z) { _print_assembly = z; } + bool print_inlining() const { return _print_inlining; } + void set_print_inlining(bool z) { _print_inlining = z; } + bool print_intrinsics() const { return _print_intrinsics; } + void set_print_intrinsics(bool z) { _print_intrinsics = z; } // check the CompilerOracle for special behaviours for this compile bool method_has_option(const char * option) { return method() != NULL && method()->has_option(option); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/connode.cpp --- a/src/share/vm/opto/connode.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/connode.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -630,7 +630,7 @@ if (t == Type::TOP) return Type::TOP; assert (t != TypePtr::NULL_PTR, "null klass?"); - assert(UseCompressedKlassPointers && t->isa_klassptr(), "only klass ptr here"); + assert(UseCompressedClassPointers && t->isa_klassptr(), "only klass ptr here"); return t->make_narrowklass(); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/doCall.cpp --- a/src/share/vm/opto/doCall.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/doCall.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -41,9 +41,9 @@ #include "runtime/sharedRuntime.hpp" void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) { - if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) { + if (TraceTypeProfile || C->print_inlining()) { outputStream* out = tty; - if (!PrintInlining) { + if (!C->print_inlining()) { if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) { method->print_short_name(); tty->cr(); @@ -110,6 +110,7 @@ // then we return it as the inlined version of the call. // We do this before the strict f.p. check below because the // intrinsics handle strict f.p. correctly. + CallGenerator* cg_intrinsic = NULL; if (allow_inline && allow_intrinsics) { CallGenerator* cg = find_intrinsic(callee, call_does_dispatch); if (cg != NULL) { @@ -121,7 +122,16 @@ cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg); } } - return cg; + + // If intrinsic does the virtual dispatch, we try to use the type profile + // first, and hopefully inline it as the regular virtual call below. + // We will retry the intrinsic if nothing had claimed it afterwards. + if (cg->does_virtual_dispatch()) { + cg_intrinsic = cg; + cg = NULL; + } else { + return cg; + } } } @@ -266,6 +276,13 @@ } } + // Nothing claimed the intrinsic, we go with straight-forward inlining + // for already discovered intrinsic. + if (allow_inline && allow_intrinsics && cg_intrinsic != NULL) { + assert(cg_intrinsic->does_virtual_dispatch(), "sanity"); + return cg_intrinsic; + } + // There was no special inlining tactic, or it bailed out. // Use a more generic tactic, like a simple call. if (call_does_dispatch) { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/domgraph.cpp --- a/src/share/vm/opto/domgraph.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/domgraph.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -32,9 +32,6 @@ // Portions of code courtesy of Clifford Click -// Optimization - Graph Style - -//------------------------------Tarjan----------------------------------------- // A data structure that holds all the information needed to find dominators. struct Tarjan { Block *_block; // Basic block for this info @@ -60,23 +57,21 @@ }; -//------------------------------Dominator-------------------------------------- // Compute the dominator tree of the CFG. The CFG must already have been // constructed. This is the Lengauer & Tarjan O(E-alpha(E,V)) algorithm. -void PhaseCFG::Dominators( ) { +void PhaseCFG::build_dominator_tree() { // Pre-grow the blocks array, prior to the ResourceMark kicking in - _blocks.map(_num_blocks,0); + _blocks.map(number_of_blocks(), 0); ResourceMark rm; // Setup mappings from my Graph to Tarjan's stuff and back // Note: Tarjan uses 1-based arrays - Tarjan *tarjan = NEW_RESOURCE_ARRAY(Tarjan,_num_blocks+1); + Tarjan* tarjan = NEW_RESOURCE_ARRAY(Tarjan, number_of_blocks() + 1); // Tarjan's algorithm, almost verbatim: // Step 1: - _rpo_ctr = _num_blocks; - uint dfsnum = DFS( tarjan ); - if( dfsnum-1 != _num_blocks ) {// Check for unreachable loops! + uint dfsnum = do_DFS(tarjan, number_of_blocks()); + if (dfsnum - 1 != number_of_blocks()) { // Check for unreachable loops! // If the returned dfsnum does not match the number of blocks, then we // must have some unreachable loops. These can be made at any time by // IterGVN. They are cleaned up by CCP or the loop opts, but the last @@ -93,14 +88,13 @@ C->record_method_not_compilable("unreachable loop"); return; } - _blocks._cnt = _num_blocks; + _blocks._cnt = number_of_blocks(); // Tarjan is using 1-based arrays, so these are some initialize flags tarjan[0]._size = tarjan[0]._semi = 0; tarjan[0]._label = &tarjan[0]; - uint i; - for( i=_num_blocks; i>=2; i-- ) { // For all vertices in DFS order + for (uint i = number_of_blocks(); i >= 2; i--) { // For all vertices in DFS order Tarjan *w = &tarjan[i]; // Get vertex from DFS // Step 2: @@ -130,19 +124,19 @@ } // Step 4: - for( i=2; i <= _num_blocks; i++ ) { + for (uint i = 2; i <= number_of_blocks(); i++) { Tarjan *w = &tarjan[i]; if( w->_dom != &tarjan[w->_semi] ) w->_dom = w->_dom->_dom; w->_dom_next = w->_dom_child = NULL; // Initialize for building tree later } // No immediate dominator for the root - Tarjan *w = &tarjan[_broot->_pre_order]; + Tarjan *w = &tarjan[get_root_block()->_pre_order]; w->_dom = NULL; w->_dom_next = w->_dom_child = NULL; // Initialize for building tree later // Convert the dominator tree array into my kind of graph - for( i=1; i<=_num_blocks;i++){// For all Tarjan vertices + for(uint i = 1; i <= number_of_blocks(); i++){ // For all Tarjan vertices Tarjan *t = &tarjan[i]; // Handy access Tarjan *tdom = t->_dom; // Handy access to immediate dominator if( tdom ) { // Root has no immediate dominator @@ -152,11 +146,10 @@ } else t->_block->_idom = NULL; // Root } - w->setdepth( _num_blocks+1 ); // Set depth in dominator tree + w->setdepth(number_of_blocks() + 1); // Set depth in dominator tree } -//----------------------------Block_Stack-------------------------------------- class Block_Stack { private: struct Block_Descr { @@ -214,26 +207,25 @@ } }; -//-------------------------most_frequent_successor----------------------------- // Find the index into the b->succs[] array of the most frequent successor. uint Block_Stack::most_frequent_successor( Block *b ) { uint freq_idx = 0; int eidx = b->end_idx(); - Node *n = b->_nodes[eidx]; + Node *n = b->get_node(eidx); int op = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : n->Opcode(); switch( op ) { case Op_CountedLoopEnd: case Op_If: { // Split frequency amongst children float prob = n->as_MachIf()->_prob; // Is succ[0] the TRUE branch or the FALSE branch? - if( b->_nodes[eidx+1]->Opcode() == Op_IfFalse ) + if( b->get_node(eidx+1)->Opcode() == Op_IfFalse ) prob = 1.0f - prob; freq_idx = prob < PROB_FAIR; // freq=1 for succ[0] < 0.5 prob break; } case Op_Catch: // Split frequency amongst children for( freq_idx = 0; freq_idx < b->_num_succs; freq_idx++ ) - if( b->_nodes[eidx+1+freq_idx]->as_CatchProj()->_con == CatchProjNode::fall_through_index ) + if( b->get_node(eidx+1+freq_idx)->as_CatchProj()->_con == CatchProjNode::fall_through_index ) break; // Handle case of no fall-thru (e.g., check-cast MUST throw an exception) if( freq_idx == b->_num_succs ) freq_idx = 0; @@ -258,40 +250,38 @@ return freq_idx; } -//------------------------------DFS-------------------------------------------- // Perform DFS search. Setup 'vertex' as DFS to vertex mapping. Setup // 'semi' as vertex to DFS mapping. Set 'parent' to DFS parent. -uint PhaseCFG::DFS( Tarjan *tarjan ) { - Block *b = _broot; +uint PhaseCFG::do_DFS(Tarjan *tarjan, uint rpo_counter) { + Block* root_block = get_root_block(); uint pre_order = 1; - // Allocate stack of size _num_blocks+1 to avoid frequent realloc - Block_Stack bstack(tarjan, _num_blocks+1); + // Allocate stack of size number_of_blocks() + 1 to avoid frequent realloc + Block_Stack bstack(tarjan, number_of_blocks() + 1); // Push on stack the state for the first block - bstack.push(pre_order, b); + bstack.push(pre_order, root_block); ++pre_order; while (bstack.is_nonempty()) { if (!bstack.last_successor()) { // Walk over all successors in pre-order (DFS). - Block *s = bstack.next_successor(); - if (s->_pre_order == 0) { // Check for no-pre-order, not-visited + Block* next_block = bstack.next_successor(); + if (next_block->_pre_order == 0) { // Check for no-pre-order, not-visited // Push on stack the state of successor - bstack.push(pre_order, s); + bstack.push(pre_order, next_block); ++pre_order; } } else { // Build a reverse post-order in the CFG _blocks array Block *stack_top = bstack.pop(); - stack_top->_rpo = --_rpo_ctr; + stack_top->_rpo = --rpo_counter; _blocks.map(stack_top->_rpo, stack_top); } } return pre_order; } -//------------------------------COMPRESS--------------------------------------- void Tarjan::COMPRESS() { assert( _ancestor != 0, "" ); @@ -303,14 +293,12 @@ } } -//------------------------------EVAL------------------------------------------- Tarjan *Tarjan::EVAL() { if( !_ancestor ) return _label; COMPRESS(); return (_ancestor->_label->_semi >= _label->_semi) ? _label : _ancestor->_label; } -//------------------------------LINK------------------------------------------- void Tarjan::LINK( Tarjan *w, Tarjan *tarjan0 ) { Tarjan *s = w; while( w->_label->_semi < s->_child->_label->_semi ) { @@ -333,7 +321,6 @@ } } -//------------------------------setdepth--------------------------------------- void Tarjan::setdepth( uint stack_size ) { Tarjan **top = NEW_RESOURCE_ARRAY(Tarjan*, stack_size); Tarjan **next = top; @@ -362,8 +349,7 @@ } while (last < top); } -//*********************** DOMINATORS ON THE SEA OF NODES*********************** -//------------------------------NTarjan---------------------------------------- +// Compute dominators on the Sea of Nodes form // A data structure that holds all the information needed to find dominators. struct NTarjan { Node *_control; // Control node associated with this info @@ -396,7 +382,6 @@ #endif }; -//------------------------------Dominator-------------------------------------- // Compute the dominator tree of the sea of nodes. This version walks all CFG // nodes (using the is_CFG() call) and places them in a dominator tree. Thus, // it needs a count of the CFG nodes for the mapping table. This is the @@ -517,7 +502,6 @@ } } -//------------------------------DFS-------------------------------------------- // Perform DFS search. Setup 'vertex' as DFS to vertex mapping. Setup // 'semi' as vertex to DFS mapping. Set 'parent' to DFS parent. int NTarjan::DFS( NTarjan *ntarjan, VectorSet &visited, PhaseIdealLoop *pil, uint *dfsorder) { @@ -560,7 +544,6 @@ return dfsnum; } -//------------------------------COMPRESS--------------------------------------- void NTarjan::COMPRESS() { assert( _ancestor != 0, "" ); @@ -572,14 +555,12 @@ } } -//------------------------------EVAL------------------------------------------- NTarjan *NTarjan::EVAL() { if( !_ancestor ) return _label; COMPRESS(); return (_ancestor->_label->_semi >= _label->_semi) ? _label : _ancestor->_label; } -//------------------------------LINK------------------------------------------- void NTarjan::LINK( NTarjan *w, NTarjan *ntarjan0 ) { NTarjan *s = w; while( w->_label->_semi < s->_child->_label->_semi ) { @@ -602,7 +583,6 @@ } } -//------------------------------setdepth--------------------------------------- void NTarjan::setdepth( uint stack_size, uint *dom_depth ) { NTarjan **top = NEW_RESOURCE_ARRAY(NTarjan*, stack_size); NTarjan **next = top; @@ -631,7 +611,6 @@ } while (last < top); } -//------------------------------dump------------------------------------------- #ifndef PRODUCT void NTarjan::dump(int offset) const { // Dump the data from this node diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/gcm.cpp --- a/src/share/vm/opto/gcm.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/gcm.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -102,12 +102,12 @@ uint j = 0; if (pb->_num_succs != 1) { // More then 1 successor? // Search for successor - uint max = pb->_nodes.size(); + uint max = pb->number_of_nodes(); assert( max > 1, "" ); uint start = max - pb->_num_succs; // Find which output path belongs to projection for (j = start; j < max; j++) { - if( pb->_nodes[j] == in0 ) + if( pb->get_node(j) == in0 ) break; } assert( j < max, "must find" ); @@ -121,27 +121,30 @@ //------------------------------schedule_pinned_nodes-------------------------- // Set the basic block for Nodes pinned into blocks -void PhaseCFG::schedule_pinned_nodes( VectorSet &visited ) { +void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) { // Allocate node stack of size C->unique()+8 to avoid frequent realloc - GrowableArray spstack(C->unique()+8); + GrowableArray spstack(C->unique() + 8); spstack.push(_root); - while ( spstack.is_nonempty() ) { - Node *n = spstack.pop(); - if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited - if( n->pinned() && !has_block(n)) { // Pinned? Nail it down! - assert( n->in(0), "pinned Node must have Control" ); + while (spstack.is_nonempty()) { + Node* node = spstack.pop(); + if (!visited.test_set(node->_idx)) { // Test node and flag it as visited + if (node->pinned() && !has_block(node)) { // Pinned? Nail it down! + assert(node->in(0), "pinned Node must have Control"); // Before setting block replace block_proj control edge - replace_block_proj_ctrl(n); - Node *input = n->in(0); + replace_block_proj_ctrl(node); + Node* input = node->in(0); while (!input->is_block_start()) { input = input->in(0); } - Block *b = get_block_for_node(input); // Basic block of controlling input - schedule_node_into_block(n, b); + Block* block = get_block_for_node(input); // Basic block of controlling input + schedule_node_into_block(node, block); } - for( int i = n->req() - 1; i >= 0; --i ) { // For all inputs - if( n->in(i) != NULL ) - spstack.push(n->in(i)); + + // process all inputs that are non NULL + for (int i = node->req() - 1; i >= 0; --i) { + if (node->in(i) != NULL) { + spstack.push(node->in(i)); + } } } } @@ -205,32 +208,29 @@ // which all their inputs occur. bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) { // Allocate stack with enough space to avoid frequent realloc - Node_Stack nstack(roots.Size() + 8); // (unique >> 1) + 24 from Java2D stats - // roots.push(_root); _root will be processed among C->top() inputs + Node_Stack nstack(roots.Size() + 8); + // _root will be processed among C->top() inputs roots.push(C->top()); visited.set(C->top()->_idx); while (roots.size() != 0) { // Use local variables nstack_top_n & nstack_top_i to cache values // on stack's top. - Node *nstack_top_n = roots.pop(); - uint nstack_top_i = 0; -//while_nstack_nonempty: + Node* parent_node = roots.pop(); + uint input_index = 0; + while (true) { - // Get parent node and next input's index from stack's top. - Node *n = nstack_top_n; - uint i = nstack_top_i; - - if (i == 0) { + if (input_index == 0) { // Fixup some control. Constants without control get attached // to root and nodes that use is_block_proj() nodes should be attached // to the region that starts their block. - const Node *in0 = n->in(0); - if (in0 != NULL) { // Control-dependent? - replace_block_proj_ctrl(n); - } else { // n->in(0) == NULL - if (n->req() == 1) { // This guy is a constant with NO inputs? - n->set_req(0, _root); + const Node* control_input = parent_node->in(0); + if (control_input != NULL) { + replace_block_proj_ctrl(parent_node); + } else { + // Is a constant with NO inputs? + if (parent_node->req() == 1) { + parent_node->set_req(0, _root); } } } @@ -239,37 +239,47 @@ // input is already in a block we quit following inputs (to avoid // cycles). Instead we put that Node on a worklist to be handled // later (since IT'S inputs may not have a block yet). - bool done = true; // Assume all n's inputs will be processed - while (i < n->len()) { // For all inputs - Node *in = n->in(i); // Get input - ++i; - if (in == NULL) continue; // Ignore NULL, missing inputs + + // Assume all n's inputs will be processed + bool done = true; + + while (input_index < parent_node->len()) { + Node* in = parent_node->in(input_index++); + if (in == NULL) { + continue; + } + int is_visited = visited.test_set(in->_idx); - if (!has_block(in)) { // Missing block selection? + if (!has_block(in)) { if (is_visited) { - // assert( !visited.test(in->_idx), "did not schedule early" ); return false; } - nstack.push(n, i); // Save parent node and next input's index. - nstack_top_n = in; // Process current input now. - nstack_top_i = 0; - done = false; // Not all n's inputs processed. - break; // continue while_nstack_nonempty; - } else if (!is_visited) { // Input not yet visited? - roots.push(in); // Visit this guy later, using worklist + // Save parent node and next input's index. + nstack.push(parent_node, input_index); + // Process current input now. + parent_node = in; + input_index = 0; + // Not all n's inputs processed. + done = false; + break; + } else if (!is_visited) { + // Visit this guy later, using worklist + roots.push(in); } } + if (done) { // All of n's inputs have been processed, complete post-processing. // Some instructions are pinned into a block. These include Region, // Phi, Start, Return, and other control-dependent instructions and // any projections which depend on them. - if (!n->pinned()) { + if (!parent_node->pinned()) { // Set earliest legal block. - map_node_to_block(n, find_deepest_input(n, this)); + Block* earliest_block = find_deepest_input(parent_node, this); + map_node_to_block(parent_node, earliest_block); } else { - assert(get_block_for_node(n) == get_block_for_node(n->in(0)), "Pinned Node should be at the same block as its control edge"); + assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge"); } if (nstack.is_empty()) { @@ -278,12 +288,12 @@ break; } // Get saved parent node and next input's index. - nstack_top_n = nstack.node(); - nstack_top_i = nstack.index(); + parent_node = nstack.node(); + input_index = nstack.index(); nstack.pop(); - } // if (done) - } // while (true) - } // while (roots.size() != 0) + } + } + } return true; } @@ -847,7 +857,7 @@ //------------------------------ComputeLatenciesBackwards---------------------- // Compute the latency of all the instructions. -void PhaseCFG::ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack) { +void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_List &stack) { #ifndef PRODUCT if (trace_opto_pipelining()) tty->print("\n#---- ComputeLatenciesBackwards ----\n"); @@ -870,31 +880,34 @@ // Set the latency for this instruction #ifndef PRODUCT if (trace_opto_pipelining()) { - tty->print("# latency_to_inputs: node_latency[%d] = %d for node", - n->_idx, _node_latency->at_grow(n->_idx)); + tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); dump(); } #endif - if (n->is_Proj()) + if (n->is_Proj()) { n = n->in(0); + } - if (n->is_Root()) + if (n->is_Root()) { return; + } uint nlen = n->len(); - uint use_latency = _node_latency->at_grow(n->_idx); + uint use_latency = get_latency_for_node(n); uint use_pre_order = get_block_for_node(n)->_pre_order; - for ( uint j=0; jin(j); - if (!def || def == n) + if (!def || def == n) { continue; + } // Walk backwards thru projections - if (def->is_Proj()) + if (def->is_Proj()) { def = def->in(0); + } #ifndef PRODUCT if (trace_opto_pipelining()) { @@ -907,22 +920,20 @@ Block *def_block = get_block_for_node(def); uint def_pre_order = def_block ? def_block->_pre_order : 0; - if ( (use_pre_order < def_pre_order) || - (use_pre_order == def_pre_order && n->is_Phi()) ) + if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) { continue; + } uint delta_latency = n->latency(j); uint current_latency = delta_latency + use_latency; - if (_node_latency->at_grow(def->_idx) < current_latency) { - _node_latency->at_put_grow(def->_idx, current_latency); + if (get_latency_for_node(def) < current_latency) { + set_latency_for_node(def, current_latency); } #ifndef PRODUCT if (trace_opto_pipelining()) { - tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", - use_latency, j, delta_latency, current_latency, def->_idx, - _node_latency->at_grow(def->_idx)); + tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def)); } #endif } @@ -957,7 +968,7 @@ return 0; uint nlen = use->len(); - uint nl = _node_latency->at_grow(use->_idx); + uint nl = get_latency_for_node(use); for ( uint j=0; jin(j) == n) { @@ -992,8 +1003,7 @@ // Set the latency for this instruction #ifndef PRODUCT if (trace_opto_pipelining()) { - tty->print("# latency_from_outputs: node_latency[%d] = %d for node", - n->_idx, _node_latency->at_grow(n->_idx)); + tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); dump(); } #endif @@ -1006,7 +1016,7 @@ if (latency < l) latency = l; } - _node_latency->at_put_grow(n->_idx, latency); + set_latency_for_node(n, latency); } //------------------------------hoist_to_cheaper_block------------------------- @@ -1016,9 +1026,9 @@ const double delta = 1+PROB_UNLIKELY_MAG(4); Block* least = LCA; double least_freq = least->_freq; - uint target = _node_latency->at_grow(self->_idx); - uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx); - uint end_latency = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx); + uint target = get_latency_for_node(self); + uint start_latency = get_latency_for_node(LCA->head()); + uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx())); bool in_latency = (target <= start_latency); const Block* root_block = get_block_for_node(_root); @@ -1035,14 +1045,13 @@ #ifndef PRODUCT if (trace_opto_pipelining()) { - tty->print("# Find cheaper block for latency %d: ", - _node_latency->at_grow(self->_idx)); + tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self)); self->dump(); tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", LCA->_pre_order, - LCA->_nodes[0]->_idx, + LCA->head()->_idx, start_latency, - LCA->_nodes[LCA->end_idx()]->_idx, + LCA->get_node(LCA->end_idx())->_idx, end_latency, least_freq); } @@ -1065,14 +1074,14 @@ if (mach && LCA == root_block) break; - uint start_lat = _node_latency->at_grow(LCA->_nodes[0]->_idx); + uint start_lat = get_latency_for_node(LCA->head()); uint end_idx = LCA->end_idx(); - uint end_lat = _node_latency->at_grow(LCA->_nodes[end_idx]->_idx); + uint end_lat = get_latency_for_node(LCA->get_node(end_idx)); double LCA_freq = LCA->_freq; #ifndef PRODUCT if (trace_opto_pipelining()) { tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", - LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq); + LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq); } #endif cand_cnt++; @@ -1109,7 +1118,7 @@ tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency); } #endif - _node_latency->at_put_grow(self->_idx, end_latency); + set_latency_for_node(self, end_latency); partial_latency_of_defs(self); } @@ -1255,7 +1264,7 @@ } // end ScheduleLate //------------------------------GlobalCodeMotion------------------------------- -void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_list ) { +void PhaseCFG::global_code_motion() { ResourceMark rm; #ifndef PRODUCT @@ -1265,21 +1274,22 @@ #endif // Initialize the node to block mapping for things on the proj_list - for (uint i = 0; i < proj_list.size(); i++) { - unmap_node_from_block(proj_list[i]); + for (uint i = 0; i < _matcher.number_of_projections(); i++) { + unmap_node_from_block(_matcher.get_projection(i)); } // Set the basic block for Nodes pinned into blocks - Arena *a = Thread::current()->resource_area(); - VectorSet visited(a); - schedule_pinned_nodes( visited ); + Arena* arena = Thread::current()->resource_area(); + VectorSet visited(arena); + schedule_pinned_nodes(visited); // Find the earliest Block any instruction can be placed in. Some // instructions are pinned into Blocks. Unpinned instructions can // appear in last block in which all their inputs occur. visited.Clear(); - Node_List stack(a); - stack.map( (unique >> 1) + 16, NULL); // Pre-grow the list + Node_List stack(arena); + // Pre-grow the list + stack.map((C->unique() >> 1) + 16, NULL); if (!schedule_early(visited, stack)) { // Bailout without retry C->record_method_not_compilable("early schedule failed"); @@ -1287,29 +1297,25 @@ } // Build Def-Use edges. - proj_list.push(_root); // Add real root as another root - proj_list.pop(); - // Compute the latency information (via backwards walk) for all the // instructions in the graph _node_latency = new GrowableArray(); // resource_area allocation - if( C->do_scheduling() ) - ComputeLatenciesBackwards(visited, stack); + if (C->do_scheduling()) { + compute_latencies_backwards(visited, stack); + } // Now schedule all codes as LATE as possible. This is the LCA in the // dominator tree of all USES of a value. Pick the block with the least // loop nesting depth that is lowest in the dominator tree. // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() ) schedule_late(visited, stack); - if( C->failing() ) { + if (C->failing()) { // schedule_late fails only when graph is incorrect. assert(!VerifyGraphEdges, "verification should have failed"); return; } - unique = C->unique(); - #ifndef PRODUCT if (trace_opto_pipelining()) { tty->print("\n---- Detect implicit null checks ----\n"); @@ -1332,10 +1338,11 @@ // By reversing the loop direction we get a very minor gain on mpegaudio. // Feel free to revert to a forward loop for clarity. // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) { - for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) { - Node *proj = matcher._null_check_tests[i ]; - Node *val = matcher._null_check_tests[i+1]; - get_block_for_node(proj)->implicit_null_check(this, proj, val, allowed_reasons); + for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) { + Node* proj = _matcher._null_check_tests[i]; + Node* val = _matcher._null_check_tests[i + 1]; + Block* block = get_block_for_node(proj); + implicit_null_check(block, proj, val, allowed_reasons); // The implicit_null_check will only perform the transformation // if the null branch is truly uncommon, *and* it leads to an // uncommon trap. Combined with the too_many_traps guards @@ -1352,11 +1359,11 @@ // Schedule locally. Right now a simple topological sort. // Later, do a real latency aware scheduler. - uint max_idx = C->unique(); - GrowableArray ready_cnt(max_idx, max_idx, -1); + GrowableArray ready_cnt(C->unique(), C->unique(), -1); visited.Clear(); - for (uint i = 0; i < _num_blocks; i++) { - if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) { + for (uint i = 0; i < number_of_blocks(); i++) { + Block* block = get_block(i); + if (!schedule_local(block, ready_cnt, visited)) { if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { C->record_method_not_compilable("local schedule failed"); } @@ -1366,15 +1373,17 @@ // If we inserted any instructions between a Call and his CatchNode, // clone the instructions on all paths below the Catch. - for (uint i = 0; i < _num_blocks; i++) { - _blocks[i]->call_catch_cleanup(this, C); + for (uint i = 0; i < number_of_blocks(); i++) { + Block* block = get_block(i); + call_catch_cleanup(block); } #ifndef PRODUCT if (trace_opto_pipelining()) { tty->print("\n---- After GlobalCodeMotion ----\n"); - for (uint i = 0; i < _num_blocks; i++) { - _blocks[i]->dump(); + for (uint i = 0; i < number_of_blocks(); i++) { + Block* block = get_block(i); + block->dump(); } } #endif @@ -1382,10 +1391,29 @@ _node_latency = (GrowableArray *)0xdeadbeef; } +bool PhaseCFG::do_global_code_motion() { + + build_dominator_tree(); + if (C->failing()) { + return false; + } + + NOT_PRODUCT( C->verify_graph_edges(); ) + + estimate_block_frequency(); + + global_code_motion(); + + if (C->failing()) { + return false; + } + + return true; +} //------------------------------Estimate_Block_Frequency----------------------- // Estimate block frequencies based on IfNode probabilities. -void PhaseCFG::Estimate_Block_Frequency() { +void PhaseCFG::estimate_block_frequency() { // Force conditional branches leading to uncommon traps to be unlikely, // not because we get to the uncommon_trap with less relative frequency, @@ -1393,7 +1421,7 @@ // there once. if (C->do_freq_based_layout()) { Block_List worklist; - Block* root_blk = _blocks[0]; + Block* root_blk = get_block(0); for (uint i = 1; i < root_blk->num_preds(); i++) { Block *pb = get_block_for_node(root_blk->pred(i)); if (pb->has_uncommon_code()) { @@ -1402,7 +1430,9 @@ } while (worklist.size() > 0) { Block* uct = worklist.pop(); - if (uct == _broot) continue; + if (uct == get_root_block()) { + continue; + } for (uint i = 1; i < uct->num_preds(); i++) { Block *pb = get_block_for_node(uct->pred(i)); if (pb->_num_succs == 1) { @@ -1426,12 +1456,12 @@ _root_loop->scale_freq(); // Save outmost loop frequency for LRG frequency threshold - _outer_loop_freq = _root_loop->outer_loop_freq(); + _outer_loop_frequency = _root_loop->outer_loop_freq(); // force paths ending at uncommon traps to be infrequent if (!C->do_freq_based_layout()) { Block_List worklist; - Block* root_blk = _blocks[0]; + Block* root_blk = get_block(0); for (uint i = 1; i < root_blk->num_preds(); i++) { Block *pb = get_block_for_node(root_blk->pred(i)); if (pb->has_uncommon_code()) { @@ -1451,8 +1481,8 @@ } #ifdef ASSERT - for (uint i = 0; i < _num_blocks; i++ ) { - Block *b = _blocks[i]; + for (uint i = 0; i < number_of_blocks(); i++) { + Block* b = get_block(i); assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency"); } #endif @@ -1476,16 +1506,16 @@ CFGLoop* PhaseCFG::create_loop_tree() { #ifdef ASSERT - assert( _blocks[0] == _broot, "" ); - for (uint i = 0; i < _num_blocks; i++ ) { - Block *b = _blocks[i]; + assert(get_block(0) == get_root_block(), "first block should be root block"); + for (uint i = 0; i < number_of_blocks(); i++) { + Block* block = get_block(i); // Check that _loop field are clear...we could clear them if not. - assert(b->_loop == NULL, "clear _loop expected"); + assert(block->_loop == NULL, "clear _loop expected"); // Sanity check that the RPO numbering is reflected in the _blocks array. // It doesn't have to be for the loop tree to be built, but if it is not, // then the blocks have been reordered since dom graph building...which // may question the RPO numbering - assert(b->_rpo == i, "unexpected reverse post order number"); + assert(block->_rpo == i, "unexpected reverse post order number"); } #endif @@ -1495,11 +1525,11 @@ Block_List worklist; // Assign blocks to loops - for(uint i = _num_blocks - 1; i > 0; i-- ) { // skip Root block - Block *b = _blocks[i]; + for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block + Block* block = get_block(i); - if (b->head()->is_Loop()) { - Block* loop_head = b; + if (block->head()->is_Loop()) { + Block* loop_head = block; assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); Node* tail_n = loop_head->pred(LoopNode::LoopBackControl); Block* tail = get_block_for_node(tail_n); @@ -1533,23 +1563,23 @@ // Create a member list for each loop consisting // of both blocks and (immediate child) loops. - for (uint i = 0; i < _num_blocks; i++) { - Block *b = _blocks[i]; - CFGLoop* lp = b->_loop; + for (uint i = 0; i < number_of_blocks(); i++) { + Block* block = get_block(i); + CFGLoop* lp = block->_loop; if (lp == NULL) { // Not assigned to a loop. Add it to the method's pseudo loop. - b->_loop = root_loop; + block->_loop = root_loop; lp = root_loop; } - if (lp == root_loop || b != lp->head()) { // loop heads are already members - lp->add_member(b); + if (lp == root_loop || block != lp->head()) { // loop heads are already members + lp->add_member(block); } if (lp != root_loop) { if (lp->parent() == NULL) { // Not a nested loop. Make it a child of the method's pseudo loop. root_loop->add_nested_loop(lp); } - if (b == lp->head()) { + if (block == lp->head()) { // Add nested loop to member list of parent loop. lp->parent()->add_member(lp); } @@ -1696,7 +1726,7 @@ // Determine the probability of reaching successor 'i' from the receiver block. float Block::succ_prob(uint i) { int eidx = end_idx(); - Node *n = _nodes[eidx]; // Get ending Node + Node *n = get_node(eidx); // Get ending Node int op = n->Opcode(); if (n->is_Mach()) { @@ -1731,7 +1761,7 @@ float prob = n->as_MachIf()->_prob; assert(prob >= 0.0 && prob <= 1.0, "out of range probability"); // If succ[i] is the FALSE branch, invert path info - if( _nodes[i + eidx + 1]->Opcode() == Op_IfFalse ) { + if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) { return 1.0f - prob; // not taken } else { return prob; // taken @@ -1743,7 +1773,7 @@ return 1.0f/_num_succs; case Op_Catch: { - const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj(); + const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); if (ci->_con == CatchProjNode::fall_through_index) { // Fall-thru path gets the lion's share. return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs; @@ -1780,7 +1810,7 @@ // Return the number of fall-through candidates for a block int Block::num_fall_throughs() { int eidx = end_idx(); - Node *n = _nodes[eidx]; // Get ending Node + Node *n = get_node(eidx); // Get ending Node int op = n->Opcode(); if (n->is_Mach()) { @@ -1804,7 +1834,7 @@ case Op_Catch: { for (uint i = 0; i < _num_succs; i++) { - const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj(); + const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); if (ci->_con == CatchProjNode::fall_through_index) { return 1; } @@ -1832,14 +1862,14 @@ // Return true if a specific successor could be fall-through target. bool Block::succ_fall_through(uint i) { int eidx = end_idx(); - Node *n = _nodes[eidx]; // Get ending Node + Node *n = get_node(eidx); // Get ending Node int op = n->Opcode(); if (n->is_Mach()) { if (n->is_MachNullCheck()) { // In theory, either side can fall-thru, for simplicity sake, // let's say only the false branch can now. - return _nodes[i + eidx + 1]->Opcode() == Op_IfFalse; + return get_node(i + eidx + 1)->Opcode() == Op_IfFalse; } op = n->as_Mach()->ideal_Opcode(); } @@ -1853,7 +1883,7 @@ return true; case Op_Catch: { - const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj(); + const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); return ci->_con == CatchProjNode::fall_through_index; } @@ -1877,7 +1907,7 @@ // Update the probability of a two-branch to be uncommon void Block::update_uncommon_branch(Block* ub) { int eidx = end_idx(); - Node *n = _nodes[eidx]; // Get ending Node + Node *n = get_node(eidx); // Get ending Node int op = n->as_Mach()->ideal_Opcode(); @@ -1893,7 +1923,7 @@ // If ub is the true path, make the proability small, else // ub is the false path, and make the probability large - bool invert = (_nodes[s + eidx + 1]->Opcode() == Op_IfFalse); + bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse); // Get existing probability float p = n->as_MachIf()->_prob; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/generateOptoStub.cpp --- a/src/share/vm/opto/generateOptoStub.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/generateOptoStub.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -61,6 +61,7 @@ JVMState* jvms = new (C) JVMState(0); jvms->set_bci(InvocationEntryBci); jvms->set_monoff(max_map); + jvms->set_scloff(max_map); jvms->set_endoff(max_map); { SafePointNode *map = new (C) SafePointNode( max_map, jvms ); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/graphKit.cpp --- a/src/share/vm/opto/graphKit.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/graphKit.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1501,6 +1501,25 @@ } } +bool GraphKit::can_move_pre_barrier() const { + BarrierSet* bs = Universe::heap()->barrier_set(); + switch (bs->kind()) { + case BarrierSet::G1SATBCT: + case BarrierSet::G1SATBCTLogging: + return true; // Can move it if no safepoint + + case BarrierSet::CardTableModRef: + case BarrierSet::CardTableExtension: + case BarrierSet::ModRef: + return true; // There is no pre-barrier + + case BarrierSet::Other: + default : + ShouldNotReachHere(); + } + return false; +} + void GraphKit::post_barrier(Node* ctl, Node* store, Node* obj, @@ -3551,6 +3570,8 @@ } else { // In this case both val_type and alias_idx are unused. assert(pre_val != NULL, "must be loaded already"); + // Nothing to be done if pre_val is null. + if (pre_val->bottom_type() == TypePtr::NULL_PTR) return; assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here"); } assert(bt == T_OBJECT, "or we shouldn't be here"); @@ -3595,7 +3616,7 @@ if (do_load) { // load original value // alias_idx correct?? - pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx); + pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx); } // if (pre_val != NULL) @@ -3804,8 +3825,13 @@ TypeAry::make(TypeInt::CHAR,TypeInt::POS), ciTypeArrayKlass::make(T_CHAR), true, 0); int value_field_idx = C->get_alias_index(value_field_type); - return make_load(ctrl, basic_plus_adr(str, str, value_offset), - value_type, T_OBJECT, value_field_idx); + Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset), + value_type, T_OBJECT, value_field_idx); + // String.value field is known to be @Stable. + if (UseImplicitStableValues) { + load = cast_array_to_stable(load, value_type); + } + return load; } void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) { @@ -3823,12 +3849,9 @@ const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), false, NULL, 0); const TypePtr* value_field_type = string_type->add_offset(value_offset); - const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull, - TypeAry::make(TypeInt::CHAR,TypeInt::POS), - ciTypeArrayKlass::make(T_CHAR), true, 0); - int value_field_idx = C->get_alias_index(value_field_type); - store_to_memory(ctrl, basic_plus_adr(str, value_offset), - value, T_OBJECT, value_field_idx); + + store_oop_to_object(ctrl, str, basic_plus_adr(str, value_offset), value_field_type, + value, TypeAryPtr::CHARS, T_OBJECT); } void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) { @@ -3840,3 +3863,9 @@ store_to_memory(ctrl, basic_plus_adr(str, count_offset), value, T_INT, count_field_idx); } + +Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) { + // Reify the property as a CastPP node in Ideal graph to comply with monotonicity + // assumption of CCP analysis. + return _gvn.transform(new(C) CastPPNode(ary, ary_type->cast_to_stable(true))); +} diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/graphKit.hpp --- a/src/share/vm/opto/graphKit.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/graphKit.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -695,6 +695,10 @@ void write_barrier_post(Node *store, Node* obj, Node* adr, uint adr_idx, Node* val, bool use_precise); + // Allow reordering of pre-barrier with oop store and/or post-barrier. + // Used for load_store operations which loads old value. + bool can_move_pre_barrier() const; + // G1 pre/post barriers void g1_write_barrier_pre(bool do_load, Node* obj, @@ -832,6 +836,9 @@ // Insert a loop predicate into the graph void add_predicate(int nargs = 0); void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs); + + // Produce new array node of stable type + Node* cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type); }; // Helper class to support building of control flow branches. Upon diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/idealGraphPrinter.cpp --- a/src/share/vm/opto/idealGraphPrinter.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/idealGraphPrinter.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -416,7 +416,7 @@ if (C->cfg() != NULL) { Block* block = C->cfg()->get_block_for_node(node); if (block == NULL) { - print_prop("block", C->cfg()->_blocks[0]->_pre_order); + print_prop("block", C->cfg()->get_block(0)->_pre_order); } else { print_prop("block", block->_pre_order); } @@ -637,10 +637,10 @@ if (C->cfg() != NULL) { // once we have a CFG there are some nodes that aren't really // reachable but are in the CFG so add them here. - for (uint i = 0; i < C->cfg()->_blocks.size(); i++) { - Block *b = C->cfg()->_blocks[i]; - for (uint s = 0; s < b->_nodes.size(); s++) { - nodeStack.push(b->_nodes[s]); + for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) { + Block* block = C->cfg()->get_block(i); + for (uint s = 0; s < block->number_of_nodes(); s++) { + nodeStack.push(block->get_node(s)); } } } @@ -698,24 +698,24 @@ tail(EDGES_ELEMENT); if (C->cfg() != NULL) { head(CONTROL_FLOW_ELEMENT); - for (uint i = 0; i < C->cfg()->_blocks.size(); i++) { - Block *b = C->cfg()->_blocks[i]; + for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) { + Block* block = C->cfg()->get_block(i); begin_head(BLOCK_ELEMENT); - print_attr(BLOCK_NAME_PROPERTY, b->_pre_order); + print_attr(BLOCK_NAME_PROPERTY, block->_pre_order); end_head(); head(SUCCESSORS_ELEMENT); - for (uint s = 0; s < b->_num_succs; s++) { + for (uint s = 0; s < block->_num_succs; s++) { begin_elem(SUCCESSOR_ELEMENT); - print_attr(BLOCK_NAME_PROPERTY, b->_succs[s]->_pre_order); + print_attr(BLOCK_NAME_PROPERTY, block->_succs[s]->_pre_order); end_elem(); } tail(SUCCESSORS_ELEMENT); head(NODES_ELEMENT); - for (uint s = 0; s < b->_nodes.size(); s++) { + for (uint s = 0; s < block->number_of_nodes(); s++) { begin_elem(NODE_ELEMENT); - print_attr(NODE_ID_PROPERTY, get_node_id(b->_nodes[s])); + print_attr(NODE_ID_PROPERTY, get_node_id(block->get_node(s))); end_elem(); } tail(NODES_ELEMENT); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/ifg.cpp --- a/src/share/vm/opto/ifg.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/ifg.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -37,12 +37,9 @@ #include "opto/memnode.hpp" #include "opto/opcodes.hpp" -//============================================================================= -//------------------------------IFG-------------------------------------------- PhaseIFG::PhaseIFG( Arena *arena ) : Phase(Interference_Graph), _arena(arena) { } -//------------------------------init------------------------------------------- void PhaseIFG::init( uint maxlrg ) { _maxlrg = maxlrg; _yanked = new (_arena) VectorSet(_arena); @@ -59,7 +56,6 @@ } } -//------------------------------add-------------------------------------------- // Add edge between vertices a & b. These are sorted (triangular matrix), // then the smaller number is inserted in the larger numbered array. int PhaseIFG::add_edge( uint a, uint b ) { @@ -71,7 +67,6 @@ return _adjs[a].insert( b ); } -//------------------------------add_vector------------------------------------- // Add an edge between 'a' and everything in the vector. void PhaseIFG::add_vector( uint a, IndexSet *vec ) { // IFG is triangular, so do the inserts where 'a' < 'b'. @@ -86,7 +81,6 @@ } } -//------------------------------test------------------------------------------- // Is there an edge between a and b? int PhaseIFG::test_edge( uint a, uint b ) const { // Sort a and b, so that a is larger @@ -95,7 +89,6 @@ return _adjs[a].member(b); } -//------------------------------SquareUp--------------------------------------- // Convert triangular matrix to square matrix void PhaseIFG::SquareUp() { assert( !_is_square, "only on triangular" ); @@ -111,7 +104,6 @@ _is_square = true; } -//------------------------------Compute_Effective_Degree----------------------- // Compute effective degree in bulk void PhaseIFG::Compute_Effective_Degree() { assert( _is_square, "only on square" ); @@ -120,7 +112,6 @@ lrgs(i).set_degree(effective_degree(i)); } -//------------------------------test_edge_sq----------------------------------- int PhaseIFG::test_edge_sq( uint a, uint b ) const { assert( _is_square, "only on square" ); // Swap, so that 'a' has the lesser count. Then binary search is on @@ -130,7 +121,6 @@ return _adjs[a].member(b); } -//------------------------------Union------------------------------------------ // Union edges of B into A void PhaseIFG::Union( uint a, uint b ) { assert( _is_square, "only on square" ); @@ -146,7 +136,6 @@ } } -//------------------------------remove_node------------------------------------ // Yank a Node and all connected edges from the IFG. Return a // list of neighbors (edges) yanked. IndexSet *PhaseIFG::remove_node( uint a ) { @@ -165,7 +154,6 @@ return neighbors(a); } -//------------------------------re_insert-------------------------------------- // Re-insert a yanked Node. void PhaseIFG::re_insert( uint a ) { assert( _is_square, "only on square" ); @@ -180,7 +168,6 @@ } } -//------------------------------compute_degree--------------------------------- // Compute the degree between 2 live ranges. If both live ranges are // aligned-adjacent powers-of-2 then we use the MAX size. If either is // mis-aligned (or for Fat-Projections, not-adjacent) then we have to @@ -196,7 +183,6 @@ return tmp; } -//------------------------------effective_degree------------------------------- // Compute effective degree for this live range. If both live ranges are // aligned-adjacent powers-of-2 then we use the MAX size. If either is // mis-aligned (or for Fat-Projections, not-adjacent) then we have to @@ -221,7 +207,6 @@ #ifndef PRODUCT -//------------------------------dump------------------------------------------- void PhaseIFG::dump() const { tty->print_cr("-- Interference Graph --%s--", _is_square ? "square" : "triangular" ); @@ -260,7 +245,6 @@ tty->print("\n"); } -//------------------------------stats------------------------------------------ void PhaseIFG::stats() const { ResourceMark rm; int *h_cnt = NEW_RESOURCE_ARRAY(int,_maxlrg*2); @@ -276,7 +260,6 @@ tty->print_cr(""); } -//------------------------------verify----------------------------------------- void PhaseIFG::verify( const PhaseChaitin *pc ) const { // IFG is square, sorted and no need for Find for( uint i = 0; i < _maxlrg; i++ ) { @@ -298,7 +281,6 @@ } #endif -//------------------------------interfere_with_live---------------------------- // Interfere this register with everything currently live. Use the RegMasks // to trim the set of possible interferences. Return a count of register-only // interferences as an estimate of register pressure. @@ -315,7 +297,6 @@ _ifg->add_edge( r, l ); } -//------------------------------build_ifg_virtual------------------------------ // Actually build the interference graph. Uses virtual registers only, no // physical register masks. This allows me to be very aggressive when // coalescing copies. Some of this aggressiveness will have to be undone @@ -325,9 +306,9 @@ void PhaseChaitin::build_ifg_virtual( ) { // For all blocks (in any order) do... - for( uint i=0; i<_cfg._num_blocks; i++ ) { - Block *b = _cfg._blocks[i]; - IndexSet *liveout = _live->live(b); + for (uint i = 0; i < _cfg.number_of_blocks(); i++) { + Block* block = _cfg.get_block(i); + IndexSet* liveout = _live->live(block); // The IFG is built by a single reverse pass over each basic block. // Starting with the known live-out set, we remove things that get @@ -337,8 +318,8 @@ // The defined value interferes with everything currently live. The // value is then removed from the live-ness set and it's inputs are // added to the live-ness set. - for( uint j = b->end_idx() + 1; j > 1; j-- ) { - Node *n = b->_nodes[j-1]; + for (uint j = block->end_idx() + 1; j > 1; j--) { + Node* n = block->get_node(j - 1); // Get value being defined uint r = _lrg_map.live_range_id(n); @@ -408,7 +389,6 @@ } // End of forall blocks } -//------------------------------count_int_pressure----------------------------- uint PhaseChaitin::count_int_pressure( IndexSet *liveout ) { IndexSetIterator elements(liveout); uint lidx; @@ -424,7 +404,6 @@ return cnt; } -//------------------------------count_float_pressure--------------------------- uint PhaseChaitin::count_float_pressure( IndexSet *liveout ) { IndexSetIterator elements(liveout); uint lidx; @@ -438,7 +417,6 @@ return cnt; } -//------------------------------lower_pressure--------------------------------- // Adjust register pressure down by 1. Capture last hi-to-low transition, static void lower_pressure( LRG *lrg, uint where, Block *b, uint *pressure, uint *hrp_index ) { if (lrg->mask().is_UP() && lrg->mask_size()) { @@ -460,40 +438,41 @@ } } -//------------------------------build_ifg_physical----------------------------- // Build the interference graph using physical registers when available. // That is, if 2 live ranges are simultaneously alive but in their acceptable // register sets do not overlap, then they do not interfere. uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) { NOT_PRODUCT( Compile::TracePhase t3("buildIFG", &_t_buildIFGphysical, TimeCompiler); ) - uint spill_reg = LRG::SPILL_REG; uint must_spill = 0; // For all blocks (in any order) do... - for( uint i = 0; i < _cfg._num_blocks; i++ ) { - Block *b = _cfg._blocks[i]; + for (uint i = 0; i < _cfg.number_of_blocks(); i++) { + Block* block = _cfg.get_block(i); // Clone (rather than smash in place) the liveout info, so it is alive // for the "collect_gc_info" phase later. - IndexSet liveout(_live->live(b)); - uint last_inst = b->end_idx(); + IndexSet liveout(_live->live(block)); + uint last_inst = block->end_idx(); // Compute first nonphi node index uint first_inst; - for( first_inst = 1; first_inst < last_inst; first_inst++ ) - if( !b->_nodes[first_inst]->is_Phi() ) + for (first_inst = 1; first_inst < last_inst; first_inst++) { + if (!block->get_node(first_inst)->is_Phi()) { break; + } + } // Spills could be inserted before CreateEx node which should be // first instruction in block after Phis. Move CreateEx up. - for( uint insidx = first_inst; insidx < last_inst; insidx++ ) { - Node *ex = b->_nodes[insidx]; - if( ex->is_SpillCopy() ) continue; - if( insidx > first_inst && ex->is_Mach() && - ex->as_Mach()->ideal_Opcode() == Op_CreateEx ) { + for (uint insidx = first_inst; insidx < last_inst; insidx++) { + Node *ex = block->get_node(insidx); + if (ex->is_SpillCopy()) { + continue; + } + if (insidx > first_inst && ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) { // If the CreateEx isn't above all the MachSpillCopies // then move it to the top. - b->_nodes.remove(insidx); - b->_nodes.insert(first_inst, ex); + block->remove_node(insidx); + block->insert_node(ex, first_inst); } // Stop once a CreateEx or any other node is found break; @@ -503,12 +482,12 @@ uint pressure[2], hrp_index[2]; pressure[0] = pressure[1] = 0; hrp_index[0] = hrp_index[1] = last_inst+1; - b->_reg_pressure = b->_freg_pressure = 0; + block->_reg_pressure = block->_freg_pressure = 0; // Liveout things are presumed live for the whole block. We accumulate // 'area' accordingly. If they get killed in the block, we'll subtract // the unused part of the block from the area. int inst_count = last_inst - first_inst; - double cost = (inst_count <= 0) ? 0.0 : b->_freq * double(inst_count); + double cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count); assert(!(cost < 0.0), "negative spill cost" ); IndexSetIterator elements(&liveout); uint lidx; @@ -519,13 +498,15 @@ if (lrg.mask().is_UP() && lrg.mask_size()) { if (lrg._is_float || lrg._is_vector) { // Count float pressure pressure[1] += lrg.reg_pressure(); - if( pressure[1] > b->_freg_pressure ) - b->_freg_pressure = pressure[1]; + if (pressure[1] > block->_freg_pressure) { + block->_freg_pressure = pressure[1]; + } // Count int pressure, but do not count the SP, flags - } else if( lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) { + } else if(lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI])) { pressure[0] += lrg.reg_pressure(); - if( pressure[0] > b->_reg_pressure ) - b->_reg_pressure = pressure[0]; + if (pressure[0] > block->_reg_pressure) { + block->_reg_pressure = pressure[0]; + } } } } @@ -541,8 +522,8 @@ // value is then removed from the live-ness set and it's inputs are added // to the live-ness set. uint j; - for( j = last_inst + 1; j > 1; j-- ) { - Node *n = b->_nodes[j - 1]; + for (j = last_inst + 1; j > 1; j--) { + Node* n = block->get_node(j - 1); // Get value being defined uint r = _lrg_map.live_range_id(n); @@ -551,7 +532,7 @@ if(r) { // A DEF normally costs block frequency; rematerialized values are // removed from the DEF sight, so LOWER costs here. - lrgs(r)._cost += n->rematerialize() ? 0 : b->_freq; + lrgs(r)._cost += n->rematerialize() ? 0 : block->_freq; // If it is not live, then this instruction is dead. Probably caused // by spilling and rematerialization. Who cares why, yank this baby. @@ -560,7 +541,7 @@ if( !n->is_Proj() || // Could also be a flags-projection of a dead ADD or such. (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) { - b->_nodes.remove(j - 1); + block->remove_node(j - 1); if (lrgs(r)._def == n) { lrgs(r)._def = 0; } @@ -580,21 +561,21 @@ RegMask itmp = lrgs(r).mask(); itmp.AND(*Matcher::idealreg2regmask[Op_RegI]); int iregs = itmp.Size(); - if( pressure[0]+iregs > b->_reg_pressure ) - b->_reg_pressure = pressure[0]+iregs; - if( pressure[0] <= (uint)INTPRESSURE && - pressure[0]+iregs > (uint)INTPRESSURE ) { - hrp_index[0] = j-1; + if (pressure[0]+iregs > block->_reg_pressure) { + block->_reg_pressure = pressure[0] + iregs; + } + if (pressure[0] <= (uint)INTPRESSURE && pressure[0] + iregs > (uint)INTPRESSURE) { + hrp_index[0] = j - 1; } // Count the float-only registers RegMask ftmp = lrgs(r).mask(); ftmp.AND(*Matcher::idealreg2regmask[Op_RegD]); int fregs = ftmp.Size(); - if( pressure[1]+fregs > b->_freg_pressure ) - b->_freg_pressure = pressure[1]+fregs; - if( pressure[1] <= (uint)FLOATPRESSURE && - pressure[1]+fregs > (uint)FLOATPRESSURE ) { - hrp_index[1] = j-1; + if (pressure[1] + fregs > block->_freg_pressure) { + block->_freg_pressure = pressure[1] + fregs; + } + if(pressure[1] <= (uint)FLOATPRESSURE && pressure[1]+fregs > (uint)FLOATPRESSURE) { + hrp_index[1] = j - 1; } } @@ -607,7 +588,7 @@ if( n->is_SpillCopy() && lrgs(r).is_singledef() // MultiDef live range can still split && n->outcnt() == 1 // and use must be in this block - && _cfg.get_block_for_node(n->unique_out()) == b ) { + && _cfg.get_block_for_node(n->unique_out()) == block) { // All single-use MachSpillCopy(s) that immediately precede their // use must color early. If a longer live range steals their // color, the spill copy will split and may push another spill copy @@ -617,14 +598,16 @@ // Node *single_use = n->unique_out(); - assert( b->find_node(single_use) >= j, "Use must be later in block"); + assert(block->find_node(single_use) >= j, "Use must be later in block"); // Use can be earlier in block if it is a Phi, but then I should be a MultiDef // Find first non SpillCopy 'm' that follows the current instruction // (j - 1) is index for current instruction 'n' Node *m = n; - for( uint i = j; i <= last_inst && m->is_SpillCopy(); ++i ) { m = b->_nodes[i]; } - if( m == single_use ) { + for (uint i = j; i <= last_inst && m->is_SpillCopy(); ++i) { + m = block->get_node(i); + } + if (m == single_use) { lrgs(r)._area = 0.0; } } @@ -633,7 +616,7 @@ if( liveout.remove(r) ) { // Adjust register pressure. // Capture last hi-to-lo pressure transition - lower_pressure( &lrgs(r), j-1, b, pressure, hrp_index ); + lower_pressure(&lrgs(r), j - 1, block, pressure, hrp_index); assert( pressure[0] == count_int_pressure (&liveout), "" ); assert( pressure[1] == count_float_pressure(&liveout), "" ); } @@ -646,7 +629,7 @@ if (liveout.remove(x)) { lrgs(x)._area -= cost; // Adjust register pressure. - lower_pressure(&lrgs(x), j-1, b, pressure, hrp_index); + lower_pressure(&lrgs(x), j - 1, block, pressure, hrp_index); assert( pressure[0] == count_int_pressure (&liveout), "" ); assert( pressure[1] == count_float_pressure(&liveout), "" ); } @@ -718,7 +701,7 @@ // Area remaining in the block inst_count--; - cost = (inst_count <= 0) ? 0.0 : b->_freq * double(inst_count); + cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count); // Make all inputs live if( !n->is_Phi() ) { // Phi function uses come from prior block @@ -743,7 +726,7 @@ if (k < debug_start) { // A USE costs twice block frequency (once for the Load, once // for a Load-delay). Rematerialized uses only cost once. - lrg._cost += (def->rematerialize() ? b->_freq : (b->_freq + b->_freq)); + lrg._cost += (def->rematerialize() ? block->_freq : (block->_freq + block->_freq)); } // It is live now if (liveout.insert(x)) { @@ -753,12 +736,14 @@ if (lrg.mask().is_UP() && lrg.mask_size()) { if (lrg._is_float || lrg._is_vector) { pressure[1] += lrg.reg_pressure(); - if( pressure[1] > b->_freg_pressure ) - b->_freg_pressure = pressure[1]; + if (pressure[1] > block->_freg_pressure) { + block->_freg_pressure = pressure[1]; + } } else if( lrg.mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) { pressure[0] += lrg.reg_pressure(); - if( pressure[0] > b->_reg_pressure ) - b->_reg_pressure = pressure[0]; + if (pressure[0] > block->_reg_pressure) { + block->_reg_pressure = pressure[0]; + } } } assert( pressure[0] == count_int_pressure (&liveout), "" ); @@ -772,44 +757,47 @@ // If we run off the top of the block with high pressure and // never see a hi-to-low pressure transition, just record that // the whole block is high pressure. - if( pressure[0] > (uint)INTPRESSURE ) { + if (pressure[0] > (uint)INTPRESSURE) { hrp_index[0] = 0; - if( pressure[0] > b->_reg_pressure ) - b->_reg_pressure = pressure[0]; + if (pressure[0] > block->_reg_pressure) { + block->_reg_pressure = pressure[0]; + } } - if( pressure[1] > (uint)FLOATPRESSURE ) { + if (pressure[1] > (uint)FLOATPRESSURE) { hrp_index[1] = 0; - if( pressure[1] > b->_freg_pressure ) - b->_freg_pressure = pressure[1]; + if (pressure[1] > block->_freg_pressure) { + block->_freg_pressure = pressure[1]; + } } // Compute high pressure indice; avoid landing in the middle of projnodes j = hrp_index[0]; - if( j < b->_nodes.size() && j < b->end_idx()+1 ) { - Node *cur = b->_nodes[j]; - while( cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch() ) { + if (j < block->number_of_nodes() && j < block->end_idx() + 1) { + Node* cur = block->get_node(j); + while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) { j--; - cur = b->_nodes[j]; + cur = block->get_node(j); } } - b->_ihrp_index = j; + block->_ihrp_index = j; j = hrp_index[1]; - if( j < b->_nodes.size() && j < b->end_idx()+1 ) { - Node *cur = b->_nodes[j]; - while( cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch() ) { + if (j < block->number_of_nodes() && j < block->end_idx() + 1) { + Node* cur = block->get_node(j); + while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) { j--; - cur = b->_nodes[j]; + cur = block->get_node(j); } } - b->_fhrp_index = j; + block->_fhrp_index = j; #ifndef PRODUCT // Gather Register Pressure Statistics if( PrintOptoStatistics ) { - if( b->_reg_pressure > (uint)INTPRESSURE || b->_freg_pressure > (uint)FLOATPRESSURE ) + if (block->_reg_pressure > (uint)INTPRESSURE || block->_freg_pressure > (uint)FLOATPRESSURE) { _high_pressure++; - else + } else { _low_pressure++; + } } #endif } // End of for all blocks diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/ifnode.cpp --- a/src/share/vm/opto/ifnode.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/ifnode.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -76,6 +76,7 @@ if( !i1->is_Bool() ) return NULL; BoolNode *b = i1->as_Bool(); Node *cmp = b->in(1); + if( cmp->is_FlagsProj() ) return NULL; if( !cmp->is_Cmp() ) return NULL; i1 = cmp->in(1); if( i1 == NULL || !i1->is_Phi() ) return NULL; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/lcm.cpp --- a/src/share/vm/opto/lcm.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/lcm.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -58,14 +58,14 @@ // The proj is the control projection for the not-null case. // The val is the pointer being checked for nullness or // decodeHeapOop_not_null node if it did not fold into address. -void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons) { +void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons) { // Assume if null check need for 0 offset then always needed // Intel solaris doesn't support any null checks yet and no // mechanism exists (yet) to set the switches at an os_cpu level if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return; // Make sure the ptr-is-null path appears to be uncommon! - float f = end()->as_MachIf()->_prob; + float f = block->end()->as_MachIf()->_prob; if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f; if( f > PROB_UNLIKELY_MAG(4) ) return; @@ -75,13 +75,13 @@ // Get the successor block for if the test ptr is non-null Block* not_null_block; // this one goes with the proj Block* null_block; - if (_nodes[_nodes.size()-1] == proj) { - null_block = _succs[0]; - not_null_block = _succs[1]; + if (block->get_node(block->number_of_nodes()-1) == proj) { + null_block = block->_succs[0]; + not_null_block = block->_succs[1]; } else { - assert(_nodes[_nodes.size()-2] == proj, "proj is one or the other"); - not_null_block = _succs[0]; - null_block = _succs[1]; + assert(block->get_node(block->number_of_nodes()-2) == proj, "proj is one or the other"); + not_null_block = block->_succs[0]; + null_block = block->_succs[1]; } while (null_block->is_Empty() == Block::empty_with_goto) { null_block = null_block->_succs[0]; @@ -93,8 +93,8 @@ // detect failure of this optimization, as in 6366351.) { bool found_trap = false; - for (uint i1 = 0; i1 < null_block->_nodes.size(); i1++) { - Node* nn = null_block->_nodes[i1]; + for (uint i1 = 0; i1 < null_block->number_of_nodes(); i1++) { + Node* nn = null_block->get_node(i1); if (nn->is_MachCall() && nn->as_MachCall()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) { const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type(); @@ -237,20 +237,20 @@ } // Check ctrl input to see if the null-check dominates the memory op - Block *cb = cfg->get_block_for_node(mach); + Block *cb = get_block_for_node(mach); cb = cb->_idom; // Always hoist at least 1 block if( !was_store ) { // Stores can be hoisted only one block - while( cb->_dom_depth > (_dom_depth + 1)) + while( cb->_dom_depth > (block->_dom_depth + 1)) cb = cb->_idom; // Hoist loads as far as we want // The non-null-block should dominate the memory op, too. Live // range spilling will insert a spill in the non-null-block if it is // needs to spill the memory op for an implicit null check. - if (cb->_dom_depth == (_dom_depth + 1)) { + if (cb->_dom_depth == (block->_dom_depth + 1)) { if (cb != not_null_block) continue; cb = cb->_idom; } } - if( cb != this ) continue; + if( cb != block ) continue; // Found a memory user; see if it can be hoisted to check-block uint vidx = 0; // Capture index of value into memop @@ -262,8 +262,8 @@ if( is_decoden ) continue; } // Block of memory-op input - Block *inb = cfg->get_block_for_node(mach->in(j)); - Block *b = this; // Start from nul check + Block *inb = get_block_for_node(mach->in(j)); + Block *b = block; // Start from nul check while( b != inb && b->_dom_depth > inb->_dom_depth ) b = b->_idom; // search upwards for input // See if input dominates null check @@ -272,28 +272,28 @@ } if( j > 0 ) continue; - Block *mb = cfg->get_block_for_node(mach); + Block *mb = get_block_for_node(mach); // Hoisting stores requires more checks for the anti-dependence case. // Give up hoisting if we have to move the store past any load. if( was_store ) { Block *b = mb; // Start searching here for a local load // mach use (faulting) trying to hoist // n might be blocker to hoisting - while( b != this ) { + while( b != block ) { uint k; - for( k = 1; k < b->_nodes.size(); k++ ) { - Node *n = b->_nodes[k]; + for( k = 1; k < b->number_of_nodes(); k++ ) { + Node *n = b->get_node(k); if( n->needs_anti_dependence_check() && n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) ) break; // Found anti-dependent load } - if( k < b->_nodes.size() ) + if( k < b->number_of_nodes() ) break; // Found anti-dependent load // Make sure control does not do a merge (would have to check allpaths) if( b->num_preds() != 2 ) break; - b = cfg->get_block_for_node(b->pred(1)); // Move up to predecessor block + b = get_block_for_node(b->pred(1)); // Move up to predecessor block } - if( b != this ) continue; + if( b != block ) continue; } // Make sure this memory op is not already being used for a NullCheck @@ -303,7 +303,7 @@ // Found a candidate! Pick one with least dom depth - the highest // in the dom tree should be closest to the null check. - if (best == NULL || cfg->get_block_for_node(mach)->_dom_depth < cfg->get_block_for_node(best)->_dom_depth) { + if (best == NULL || get_block_for_node(mach)->_dom_depth < get_block_for_node(best)->_dom_depth) { best = mach; bidx = vidx; } @@ -319,46 +319,45 @@ if( is_decoden ) { // Check if we need to hoist decodeHeapOop_not_null first. - Block *valb = cfg->get_block_for_node(val); - if( this != valb && this->_dom_depth < valb->_dom_depth ) { + Block *valb = get_block_for_node(val); + if( block != valb && block->_dom_depth < valb->_dom_depth ) { // Hoist it up to the end of the test block. valb->find_remove(val); - this->add_inst(val); - cfg->map_node_to_block(val, this); + block->add_inst(val); + map_node_to_block(val, block); // DecodeN on x86 may kill flags. Check for flag-killing projections // that also need to be hoisted. for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) { Node* n = val->fast_out(j); if( n->is_MachProj() ) { - cfg->get_block_for_node(n)->find_remove(n); - this->add_inst(n); - cfg->map_node_to_block(n, this); + get_block_for_node(n)->find_remove(n); + block->add_inst(n); + map_node_to_block(n, block); } } } } // Hoist the memory candidate up to the end of the test block. - Block *old_block = cfg->get_block_for_node(best); + Block *old_block = get_block_for_node(best); old_block->find_remove(best); - add_inst(best); - cfg->map_node_to_block(best, this); + block->add_inst(best); + map_node_to_block(best, block); // Move the control dependence - if (best->in(0) && best->in(0) == old_block->_nodes[0]) - best->set_req(0, _nodes[0]); + if (best->in(0) && best->in(0) == old_block->head()) + best->set_req(0, block->head()); // Check for flag-killing projections that also need to be hoisted // Should be DU safe because no edge updates. for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) { Node* n = best->fast_out(j); if( n->is_MachProj() ) { - cfg->get_block_for_node(n)->find_remove(n); - add_inst(n); - cfg->map_node_to_block(n, this); + get_block_for_node(n)->find_remove(n); + block->add_inst(n); + map_node_to_block(n, block); } } - Compile *C = cfg->C; // proj==Op_True --> ne test; proj==Op_False --> eq test. // One of two graph shapes got matched: // (IfTrue (If (Bool NE (CmpP ptr NULL)))) @@ -368,10 +367,10 @@ // We need to flip the projections to keep the same semantics. if( proj->Opcode() == Op_IfTrue ) { // Swap order of projections in basic block to swap branch targets - Node *tmp1 = _nodes[end_idx()+1]; - Node *tmp2 = _nodes[end_idx()+2]; - _nodes.map(end_idx()+1, tmp2); - _nodes.map(end_idx()+2, tmp1); + Node *tmp1 = block->get_node(block->end_idx()+1); + Node *tmp2 = block->get_node(block->end_idx()+2); + block->map_node(tmp2, block->end_idx()+1); + block->map_node(tmp1, block->end_idx()+2); Node *tmp = new (C) Node(C->top()); // Use not NULL input tmp1->replace_by(tmp); tmp2->replace_by(tmp1); @@ -384,8 +383,8 @@ // it as well. Node *old_tst = proj->in(0); MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx); - _nodes.map(end_idx(),nul_chk); - cfg->map_node_to_block(nul_chk, this); + block->map_node(nul_chk, block->end_idx()); + map_node_to_block(nul_chk, block); // Redirect users of old_test to nul_chk for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2) old_tst->last_out(i2)->set_req(0, nul_chk); @@ -393,8 +392,8 @@ for (uint i3 = 0; i3 < old_tst->req(); i3++) old_tst->set_req(i3, NULL); - cfg->latency_from_uses(nul_chk); - cfg->latency_from_uses(best); + latency_from_uses(nul_chk); + latency_from_uses(best); } @@ -408,7 +407,7 @@ // remaining cases (most), choose the instruction with the greatest latency // (that is, the most number of pseudo-cycles required to the end of the // routine). If there is a tie, choose the instruction with the most inputs. -Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray &ready_cnt, VectorSet &next_call, uint sched_slot) { +Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray &ready_cnt, VectorSet &next_call, uint sched_slot) { // If only a single entry on the stack, use it uint cnt = worklist.size(); @@ -442,7 +441,7 @@ } // Final call in a block must be adjacent to 'catch' - Node *e = end(); + Node *e = block->end(); if( e->is_Catch() && e->in(0)->in(0) == n ) continue; @@ -468,7 +467,14 @@ Node* use = n->fast_out(j); // The use is a conditional branch, make them adjacent - if (use->is_MachIf() && cfg->get_block_for_node(use) == this) { + if (use->is_MachIf() && get_block_for_node(use) == block) { + found_machif = true; + break; + } + + // For nodes that produce a FlagsProj, make the node adjacent to the + // use of the FlagsProj + if (use->is_FlagsProj() && get_block_for_node(use) == block) { found_machif = true; break; } @@ -501,7 +507,7 @@ n_choice = 1; } - uint n_latency = cfg->_node_latency->at_grow(n->_idx); + uint n_latency = get_latency_for_node(n); uint n_score = n->req(); // Many inputs get high score to break ties // Keep best latency found @@ -529,13 +535,13 @@ //------------------------------set_next_call---------------------------------- -void Block::set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg) { +void PhaseCFG::set_next_call(Block* block, Node* n, VectorSet& next_call) { if( next_call.test_set(n->_idx) ) return; for( uint i=0; ilen(); i++ ) { Node *m = n->in(i); if( !m ) continue; // must see all nodes in block that precede call - if (cfg->get_block_for_node(m) == this) { - set_next_call(m, next_call, cfg); + if (get_block_for_node(m) == block) { + set_next_call(block, m, next_call); } } } @@ -546,24 +552,26 @@ // next subroutine call get priority - basically it moves things NOT needed // for the next call till after the call. This prevents me from trying to // carry lots of stuff live across a call. -void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg) { +void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call) { // Find the next control-defining Node in this block Node* call = NULL; for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) { Node* m = this_call->fast_out(i); - if(cfg->get_block_for_node(m) == this && // Local-block user + if (get_block_for_node(m) == block && // Local-block user m != this_call && // Not self-start node - m->is_MachCall() ) + m->is_MachCall()) { call = m; break; + } } if (call == NULL) return; // No next call (e.g., block end is near) // Set next-call for all inputs to this call - set_next_call(call, next_call, cfg); + set_next_call(block, call, next_call); } //------------------------------add_call_kills------------------------------------- -void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) { +// helper function that adds caller save registers to MachProjNode +static void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) { // Fill in the kill mask for the call for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) { if( !regs.Member(r) ) { // Not already defined by the call @@ -579,7 +587,7 @@ //------------------------------sched_call------------------------------------- -uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) { +uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray& ready_cnt, MachCallNode* mcall, VectorSet& next_call) { RegMask regs; // Schedule all the users of the call right now. All the users are @@ -592,18 +600,18 @@ ready_cnt.at_put(n->_idx, n_cnt); assert( n_cnt == 0, "" ); // Schedule next to call - _nodes.map(node_cnt++, n); + block->map_node(n, node_cnt++); // Collect defined registers regs.OR(n->out_RegMask()); // Check for scheduling the next control-definer if( n->bottom_type() == Type::CONTROL ) // Warm up next pile of heuristic bits - needed_for_next_call(n, next_call, cfg); + needed_for_next_call(block, n, next_call); // Children of projections are now all ready for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { Node* m = n->fast_out(j); // Get user - if(cfg->get_block_for_node(m) != this) { + if(get_block_for_node(m) != block) { continue; } if( m->is_Phi() ) continue; @@ -617,14 +625,14 @@ // Act as if the call defines the Frame Pointer. // Certainly the FP is alive and well after the call. - regs.Insert(matcher.c_frame_pointer()); + regs.Insert(_matcher.c_frame_pointer()); // Set all registers killed and not already defined by the call. uint r_cnt = mcall->tf()->range()->cnt(); int op = mcall->ideal_Opcode(); - MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj ); - cfg->map_node_to_block(proj, this); - _nodes.insert(node_cnt++, proj); + MachProjNode *proj = new (C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj ); + map_node_to_block(proj, block); + block->insert_node(proj, node_cnt++); // Select the right register save policy. const char * save_policy; @@ -633,13 +641,13 @@ case Op_CallLeaf: case Op_CallLeafNoFP: // Calling C code so use C calling convention - save_policy = matcher._c_reg_save_policy; + save_policy = _matcher._c_reg_save_policy; break; case Op_CallStaticJava: case Op_CallDynamicJava: // Calling Java code so use Java calling convention - save_policy = matcher._register_save_policy; + save_policy = _matcher._register_save_policy; break; default: @@ -674,44 +682,46 @@ //------------------------------schedule_local--------------------------------- // Topological sort within a block. Someday become a real scheduler. -bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray &ready_cnt, VectorSet &next_call) { +bool PhaseCFG::schedule_local(Block* block, GrowableArray& ready_cnt, VectorSet& next_call) { // Already "sorted" are the block start Node (as the first entry), and // the block-ending Node and any trailing control projections. We leave // these alone. PhiNodes and ParmNodes are made to follow the block start // Node. Everything else gets topo-sorted. #ifndef PRODUCT - if (cfg->trace_opto_pipelining()) { - tty->print_cr("# --- schedule_local B%d, before: ---", _pre_order); - for (uint i = 0;i < _nodes.size();i++) { + if (trace_opto_pipelining()) { + tty->print_cr("# --- schedule_local B%d, before: ---", block->_pre_order); + for (uint i = 0;i < block->number_of_nodes(); i++) { tty->print("# "); - _nodes[i]->fast_dump(); + block->get_node(i)->fast_dump(); } tty->print_cr("#"); } #endif // RootNode is already sorted - if( _nodes.size() == 1 ) return true; + if (block->number_of_nodes() == 1) { + return true; + } // Move PhiNodes and ParmNodes from 1 to cnt up to the start - uint node_cnt = end_idx(); + uint node_cnt = block->end_idx(); uint phi_cnt = 1; uint i; for( i = 1; iget_node(i); if( n->is_Phi() || // Found a PhiNode or ParmNode - (n->is_Proj() && n->in(0) == head()) ) { + (n->is_Proj() && n->in(0) == block->head()) ) { // Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt - _nodes.map(i,_nodes[phi_cnt]); - _nodes.map(phi_cnt++,n); // swap Phi/Parm up front + block->map_node(block->get_node(phi_cnt), i); + block->map_node(n, phi_cnt++); // swap Phi/Parm up front } else { // All others // Count block-local inputs to 'n' uint cnt = n->len(); // Input count uint local = 0; for( uint j=0; jin(j); - if( m && cfg->get_block_for_node(m) == this && !m->is_top() ) + if( m && get_block_for_node(m) == block && !m->is_top() ) local++; // One more block-local input } ready_cnt.at_put(n->_idx, local); // Count em up @@ -723,7 +733,7 @@ for (uint prec = n->req(); prec < n->len(); prec++) { Node* oop_store = n->in(prec); if (oop_store != NULL) { - assert(cfg->get_block_for_node(oop_store)->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark"); + assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark"); } } } @@ -747,16 +757,16 @@ } } } - for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count - ready_cnt.at_put(_nodes[i2]->_idx, 0); + for(uint i2=i; i2< block->number_of_nodes(); i2++ ) // Trailing guys get zapped count + ready_cnt.at_put(block->get_node(i2)->_idx, 0); // All the prescheduled guys do not hold back internal nodes uint i3; for(i3 = 0; i3get_node(i3); // Get pre-scheduled for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { Node* m = n->fast_out(j); - if (cfg->get_block_for_node(m) == this) { // Local-block user + if (get_block_for_node(m) == block) { // Local-block user int m_cnt = ready_cnt.at(m->_idx)-1; ready_cnt.at_put(m->_idx, m_cnt); // Fix ready count } @@ -767,7 +777,7 @@ // Make a worklist Node_List worklist; for(uint i4=i3; i4get_node(i4); if( !ready_cnt.at(m->_idx) ) { // Zero ready count? if (m->is_iteratively_computed()) { // Push induction variable increments last to allow other uses @@ -789,15 +799,15 @@ } // Warm up the 'next_call' heuristic bits - needed_for_next_call(_nodes[0], next_call, cfg); + needed_for_next_call(block, block->head(), next_call); #ifndef PRODUCT - if (cfg->trace_opto_pipelining()) { - for (uint j=0; j<_nodes.size(); j++) { - Node *n = _nodes[j]; + if (trace_opto_pipelining()) { + for (uint j=0; j< block->number_of_nodes(); j++) { + Node *n = block->get_node(j); int idx = n->_idx; tty->print("# ready cnt:%3d ", ready_cnt.at(idx)); - tty->print("latency:%3d ", cfg->_node_latency->at_grow(idx)); + tty->print("latency:%3d ", get_latency_for_node(n)); tty->print("%4d: %s\n", idx, n->Name()); } } @@ -808,7 +818,7 @@ while( worklist.size() ) { // Worklist is not ready #ifndef PRODUCT - if (cfg->trace_opto_pipelining()) { + if (trace_opto_pipelining()) { tty->print("# ready list:"); for( uint i=0; imap_node(n, phi_cnt++); // Schedule him next #ifndef PRODUCT - if (cfg->trace_opto_pipelining()) { + if (trace_opto_pipelining()) { tty->print("# select %d: %s", n->_idx, n->Name()); - tty->print(", latency:%d", cfg->_node_latency->at_grow(n->_idx)); + tty->print(", latency:%d", get_latency_for_node(n)); n->dump(); if (Verbose) { tty->print("# ready list:"); @@ -840,26 +850,26 @@ #endif if( n->is_MachCall() ) { MachCallNode *mcall = n->as_MachCall(); - phi_cnt = sched_call(matcher, cfg, phi_cnt, worklist, ready_cnt, mcall, next_call); + phi_cnt = sched_call(block, phi_cnt, worklist, ready_cnt, mcall, next_call); continue; } if (n->is_Mach() && n->as_Mach()->has_call()) { RegMask regs; - regs.Insert(matcher.c_frame_pointer()); + regs.Insert(_matcher.c_frame_pointer()); regs.OR(n->out_RegMask()); - MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj ); - cfg->map_node_to_block(proj, this); - _nodes.insert(phi_cnt++, proj); + MachProjNode *proj = new (C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj ); + map_node_to_block(proj, block); + block->insert_node(proj, phi_cnt++); - add_call_kills(proj, regs, matcher._c_reg_save_policy, false); + add_call_kills(proj, regs, _matcher._c_reg_save_policy, false); } // Children are now all ready for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) { Node* m = n->fast_out(i5); // Get user - if (cfg->get_block_for_node(m) != this) { + if (get_block_for_node(m) != block) { continue; } if( m->is_Phi() ) continue; @@ -874,9 +884,8 @@ } } - if( phi_cnt != end_idx() ) { + if( phi_cnt != block->end_idx() ) { // did not schedule all. Retry, Bailout, or Die - Compile* C = matcher.C; if (C->subsume_loads() == true && !C->failing()) { // Retry with subsume_loads == false // If this is the first failure, the sentinel string will "stick" @@ -888,12 +897,12 @@ } #ifndef PRODUCT - if (cfg->trace_opto_pipelining()) { + if (trace_opto_pipelining()) { tty->print_cr("#"); tty->print_cr("# after schedule_local"); - for (uint i = 0;i < _nodes.size();i++) { + for (uint i = 0;i < block->number_of_nodes();i++) { tty->print("# "); - _nodes[i]->fast_dump(); + block->get_node(i)->fast_dump(); } tty->cr(); } @@ -919,7 +928,7 @@ } //------------------------------catch_cleanup_find_cloned_def------------------ -static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) { +Node* PhaseCFG::catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) { assert( use_blk != def_blk, "Inter-block cleanup only"); // The use is some block below the Catch. Find and return the clone of the def @@ -945,14 +954,14 @@ // PhiNode, the PhiNode uses from the def and IT's uses need fixup. Node_Array inputs = new Node_List(Thread::current()->resource_area()); for(uint k = 1; k < use_blk->num_preds(); k++) { - Block* block = cfg->get_block_for_node(use_blk->pred(k)); - inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, cfg, n_clone_idx)); + Block* block = get_block_for_node(use_blk->pred(k)); + inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, n_clone_idx)); } // Check to see if the use_blk already has an identical phi inserted. // If it exists, it will be at the first position since all uses of a // def are processed together. - Node *phi = use_blk->_nodes[1]; + Node *phi = use_blk->get_node(1); if( phi->is_Phi() ) { fixup = phi; for (uint k = 1; k < use_blk->num_preds(); k++) { @@ -967,8 +976,8 @@ // If an existing PhiNode was not found, make a new one. if (fixup == NULL) { Node *new_phi = PhiNode::make(use_blk->head(), def); - use_blk->_nodes.insert(1, new_phi); - cfg->map_node_to_block(new_phi, use_blk); + use_blk->insert_node(new_phi, 1); + map_node_to_block(new_phi, use_blk); for (uint k = 1; k < use_blk->num_preds(); k++) { new_phi->set_req(k, inputs[k]); } @@ -977,7 +986,7 @@ } else { // Found the use just below the Catch. Make it use the clone. - fixup = use_blk->_nodes[n_clone_idx]; + fixup = use_blk->get_node(n_clone_idx); } return fixup; @@ -997,36 +1006,36 @@ for( uint k = 0; k < blk->_num_succs; k++ ) { // Get clone in each successor block Block *sb = blk->_succs[k]; - Node *clone = sb->_nodes[offset_idx+1]; + Node *clone = sb->get_node(offset_idx+1); assert( clone->Opcode() == use->Opcode(), "" ); // Make use-clone reference the def-clone - catch_cleanup_fix_all_inputs(clone, def, sb->_nodes[n_clone_idx]); + catch_cleanup_fix_all_inputs(clone, def, sb->get_node(n_clone_idx)); } } //------------------------------catch_cleanup_inter_block--------------------- // Fix all input edges in use that reference "def". The use is in a different // block than the def. -static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) { +void PhaseCFG::catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) { if( !use_blk ) return; // Can happen if the use is a precedence edge - Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, cfg, n_clone_idx); + Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, n_clone_idx); catch_cleanup_fix_all_inputs(use, def, new_def); } //------------------------------call_catch_cleanup----------------------------- // If we inserted any instructions between a Call and his CatchNode, // clone the instructions on all paths below the Catch. -void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) { +void PhaseCFG::call_catch_cleanup(Block* block) { // End of region to clone - uint end = end_idx(); - if( !_nodes[end]->is_Catch() ) return; + uint end = block->end_idx(); + if( !block->get_node(end)->is_Catch() ) return; // Start of region to clone uint beg = end; - while(!_nodes[beg-1]->is_MachProj() || - !_nodes[beg-1]->in(0)->is_MachCall() ) { + while(!block->get_node(beg-1)->is_MachProj() || + !block->get_node(beg-1)->in(0)->is_MachCall() ) { beg--; assert(beg > 0,"Catch cleanup walking beyond block boundary"); } @@ -1035,15 +1044,15 @@ // Clone along all Catch output paths. Clone area between the 'beg' and // 'end' indices. - for( uint i = 0; i < _num_succs; i++ ) { - Block *sb = _succs[i]; + for( uint i = 0; i < block->_num_succs; i++ ) { + Block *sb = block->_succs[i]; // Clone the entire area; ignoring the edge fixup for now. for( uint j = end; j > beg; j-- ) { // It is safe here to clone a node with anti_dependence // since clones dominate on each path. - Node *clone = _nodes[j-1]->clone(); - sb->_nodes.insert( 1, clone ); - cfg->map_node_to_block(clone, sb); + Node *clone = block->get_node(j-1)->clone(); + sb->insert_node(clone, 1); + map_node_to_block(clone, sb); } } @@ -1051,7 +1060,7 @@ // Fixup edges. Check the def-use info per cloned Node for(uint i2 = beg; i2 < end; i2++ ) { uint n_clone_idx = i2-beg+1; // Index of clone of n in each successor block - Node *n = _nodes[i2]; // Node that got cloned + Node *n = block->get_node(i2); // Node that got cloned // Need DU safe iterator because of edge manipulation in calls. Unique_Node_List *out = new Unique_Node_List(Thread::current()->resource_area()); for (DUIterator_Fast j1max, j1 = n->fast_outs(j1max); j1 < j1max; j1++) { @@ -1060,19 +1069,19 @@ uint max = out->size(); for (uint j = 0; j < max; j++) {// For all users Node *use = out->pop(); - Block *buse = cfg->get_block_for_node(use); + Block *buse = get_block_for_node(use); if( use->is_Phi() ) { for( uint k = 1; k < use->req(); k++ ) if( use->in(k) == n ) { - Block* block = cfg->get_block_for_node(buse->pred(k)); - Node *fixup = catch_cleanup_find_cloned_def(block, n, this, cfg, n_clone_idx); + Block* b = get_block_for_node(buse->pred(k)); + Node *fixup = catch_cleanup_find_cloned_def(b, n, block, n_clone_idx); use->set_req(k, fixup); } } else { - if (this == buse) { - catch_cleanup_intra_block(use, n, this, beg, n_clone_idx); + if (block == buse) { + catch_cleanup_intra_block(use, n, block, beg, n_clone_idx); } else { - catch_cleanup_inter_block(use, buse, n, this, cfg, n_clone_idx); + catch_cleanup_inter_block(use, buse, n, block, n_clone_idx); } } } // End for all users @@ -1081,30 +1090,30 @@ // Remove the now-dead cloned ops for(uint i3 = beg; i3 < end; i3++ ) { - _nodes[beg]->disconnect_inputs(NULL, C); - _nodes.remove(beg); + block->get_node(beg)->disconnect_inputs(NULL, C); + block->remove_node(beg); } // If the successor blocks have a CreateEx node, move it back to the top - for(uint i4 = 0; i4 < _num_succs; i4++ ) { - Block *sb = _succs[i4]; + for(uint i4 = 0; i4 < block->_num_succs; i4++ ) { + Block *sb = block->_succs[i4]; uint new_cnt = end - beg; // Remove any newly created, but dead, nodes. for( uint j = new_cnt; j > 0; j-- ) { - Node *n = sb->_nodes[j]; + Node *n = sb->get_node(j); if (n->outcnt() == 0 && (!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){ n->disconnect_inputs(NULL, C); - sb->_nodes.remove(j); + sb->remove_node(j); new_cnt--; } } // If any newly created nodes remain, move the CreateEx node to the top if (new_cnt > 0) { - Node *cex = sb->_nodes[1+new_cnt]; + Node *cex = sb->get_node(1+new_cnt); if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) { - sb->_nodes.remove(1+new_cnt); - sb->_nodes.insert(1,cex); + sb->remove_node(1+new_cnt); + sb->insert_node(cex, 1); } } } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/library_call.cpp --- a/src/share/vm/opto/library_call.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/library_call.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -32,6 +32,7 @@ #include "opto/callGenerator.hpp" #include "opto/cfgnode.hpp" #include "opto/idealKit.hpp" +#include "opto/mathexactnode.hpp" #include "opto/mulnode.hpp" #include "opto/parse.hpp" #include "opto/runtime.hpp" @@ -46,19 +47,22 @@ private: bool _is_virtual; bool _is_predicted; + bool _does_virtual_dispatch; vmIntrinsics::ID _intrinsic_id; public: - LibraryIntrinsic(ciMethod* m, bool is_virtual, bool is_predicted, vmIntrinsics::ID id) + LibraryIntrinsic(ciMethod* m, bool is_virtual, bool is_predicted, bool does_virtual_dispatch, vmIntrinsics::ID id) : InlineCallGenerator(m), _is_virtual(is_virtual), _is_predicted(is_predicted), + _does_virtual_dispatch(does_virtual_dispatch), _intrinsic_id(id) { } virtual bool is_intrinsic() const { return true; } virtual bool is_virtual() const { return _is_virtual; } virtual bool is_predicted() const { return _is_predicted; } + virtual bool does_virtual_dispatch() const { return _does_virtual_dispatch; } virtual JVMState* generate(JVMState* jvms); virtual Node* generate_predicate(JVMState* jvms); vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; } @@ -199,6 +203,8 @@ bool inline_math_native(vmIntrinsics::ID id); bool inline_trig(vmIntrinsics::ID id); bool inline_math(vmIntrinsics::ID id); + bool inline_math_mathExact(Node* math); + bool inline_math_addExact(); bool inline_exp(); bool inline_pow(); void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName); @@ -213,6 +219,7 @@ void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar); bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile); bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static); + static bool klass_needs_init_guard(Node* kls); bool inline_unsafe_allocate(); bool inline_unsafe_copyMemory(); bool inline_native_currentThread(); @@ -351,6 +358,7 @@ } bool is_predicted = false; + bool does_virtual_dispatch = false; switch (id) { case vmIntrinsics::_compareTo: @@ -377,8 +385,10 @@ break; case vmIntrinsics::_hashCode: if (!InlineObjectHash) return NULL; + does_virtual_dispatch = true; break; case vmIntrinsics::_clone: + does_virtual_dispatch = true; case vmIntrinsics::_copyOf: case vmIntrinsics::_copyOfRange: if (!InlineObjectCopy) return NULL; @@ -497,6 +507,15 @@ if (!UseCRC32Intrinsics) return NULL; break; + case vmIntrinsics::_addExact: + if (!Matcher::match_rule_supported(Op_AddExactI)) { + return NULL; + } + if (!UseMathExactIntrinsics) { + return NULL; + } + break; + default: assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility"); assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?"); @@ -528,7 +547,7 @@ if (!InlineUnsafeOps) return NULL; } - return new LibraryIntrinsic(m, is_virtual, is_predicted, (vmIntrinsics::ID) id); + return new LibraryIntrinsic(m, is_virtual, is_predicted, does_virtual_dispatch, (vmIntrinsics::ID) id); } //----------------------register_library_intrinsics----------------------- @@ -542,7 +561,7 @@ Compile* C = kit.C; int nodes = C->unique(); #ifndef PRODUCT - if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) { + if ((C->print_intrinsics() || C->print_inlining()) && Verbose) { char buf[1000]; const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf)); tty->print_cr("Intrinsic %s", str); @@ -553,7 +572,7 @@ // Try to inline the intrinsic. if (kit.try_to_inline()) { - if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { + if (C->print_intrinsics() || C->print_inlining()) { C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); } C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); @@ -569,7 +588,7 @@ } // The intrinsic bailed out - if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { + if (C->print_intrinsics() || C->print_inlining()) { if (jvms->has_method()) { // Not a root compile. const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)"; @@ -591,7 +610,7 @@ int nodes = C->unique(); #ifndef PRODUCT assert(is_predicted(), "sanity"); - if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) { + if ((C->print_intrinsics() || C->print_inlining()) && Verbose) { char buf[1000]; const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf)); tty->print_cr("Predicate for intrinsic %s", str); @@ -602,7 +621,7 @@ Node* slow_ctl = kit.try_to_predicate(); if (!kit.failing()) { - if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { + if (C->print_intrinsics() || C->print_inlining()) { C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); } C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); @@ -616,7 +635,7 @@ } // The intrinsic bailed out - if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { + if (C->print_intrinsics() || C->print_inlining()) { if (jvms->has_method()) { // Not a root compile. const char* msg = "failed to generate predicate for intrinsic"; @@ -667,6 +686,8 @@ case vmIntrinsics::_min: case vmIntrinsics::_max: return inline_min_max(intrinsic_id()); + case vmIntrinsics::_addExact: return inline_math_addExact(); + case vmIntrinsics::_arraycopy: return inline_arraycopy(); case vmIntrinsics::_compareTo: return inline_string_compareTo(); @@ -1279,6 +1300,11 @@ const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin)); const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot); + // String.value field is known to be @Stable. + if (UseImplicitStableValues) { + target = cast_array_to_stable(target, target_type); + } + IdealKit kit(this, false, true); #define __ kit. Node* zero = __ ConI(0); @@ -1905,6 +1931,45 @@ return true; } +bool LibraryCallKit::inline_math_mathExact(Node* math) { + Node* result = _gvn.transform( new(C) ProjNode(math, MathExactNode::result_proj_node)); + Node* flags = _gvn.transform( new(C) FlagsProjNode(math, MathExactNode::flags_proj_node)); + + Node* bol = _gvn.transform( new (C) BoolNode(flags, BoolTest::overflow) ); + IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN); + Node* fast_path = _gvn.transform( new (C) IfFalseNode(check)); + Node* slow_path = _gvn.transform( new (C) IfTrueNode(check) ); + + { + PreserveJVMState pjvms(this); + PreserveReexecuteState preexecs(this); + jvms()->set_should_reexecute(true); + + set_control(slow_path); + set_i_o(i_o()); + + uncommon_trap(Deoptimization::Reason_intrinsic, + Deoptimization::Action_none); + } + + set_control(fast_path); + set_result(result); + return true; +} + +bool LibraryCallKit::inline_math_addExact() { + Node* arg1 = argument(0); + Node* arg2 = argument(1); + + Node* add = _gvn.transform( new(C) AddExactINode(NULL, arg1, arg2) ); + if (add->Opcode() == Op_AddExactI) { + return inline_math_mathExact(add); + } else { + set_result(add); + } + return true; +} + Node* LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) { // These are the candidate return value: @@ -2293,7 +2358,7 @@ const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass); #ifndef PRODUCT - if (PrintIntrinsics || PrintInlining || PrintOptoInlining) { + if (C->print_intrinsics() || C->print_inlining()) { tty->print(" from base type: "); adr_type->dump(); tty->print(" sharpened value: "); tjp->dump(); } @@ -2755,10 +2820,28 @@ newval = _gvn.makecon(TypePtr::NULL_PTR); // Reference stores need a store barrier. - pre_barrier(true /* do_load*/, - control(), base, adr, alias_idx, newval, value_type->make_oopptr(), - NULL /* pre_val*/, - T_OBJECT); + if (kind == LS_xchg) { + // If pre-barrier must execute before the oop store, old value will require do_load here. + if (!can_move_pre_barrier()) { + pre_barrier(true /* do_load*/, + control(), base, adr, alias_idx, newval, value_type->make_oopptr(), + NULL /* pre_val*/, + T_OBJECT); + } // Else move pre_barrier to use load_store value, see below. + } else if (kind == LS_cmpxchg) { + // Same as for newval above: + if (_gvn.type(oldval) == TypePtr::NULL_PTR) { + oldval = _gvn.makecon(TypePtr::NULL_PTR); + } + // The only known value which might get overwritten is oldval. + pre_barrier(false /* do_load */, + control(), NULL, NULL, max_juint, NULL, NULL, + oldval /* pre_val */, + T_OBJECT); + } else { + ShouldNotReachHere(); + } + #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop())); @@ -2794,16 +2877,27 @@ Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store)); set_memory(proj, alias_idx); + if (type == T_OBJECT && kind == LS_xchg) { +#ifdef _LP64 + if (adr->bottom_type()->is_ptr_to_narrowoop()) { + load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type())); + } +#endif + if (can_move_pre_barrier()) { + // Don't need to load pre_val. The old value is returned by load_store. + // The pre_barrier can execute after the xchg as long as no safepoint + // gets inserted between them. + pre_barrier(false /* do_load */, + control(), NULL, NULL, max_juint, NULL, NULL, + load_store /* pre_val */, + T_OBJECT); + } + } + // Add the trailing membar surrounding the access insert_mem_bar(Op_MemBarCPUOrder); insert_mem_bar(Op_MemBarAcquire); -#ifdef _LP64 - if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) { - load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type())); - } -#endif - assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); set_result(load_store); return true; @@ -2892,8 +2986,21 @@ } } +bool LibraryCallKit::klass_needs_init_guard(Node* kls) { + if (!kls->is_Con()) { + return true; + } + const TypeKlassPtr* klsptr = kls->bottom_type()->isa_klassptr(); + if (klsptr == NULL) { + return true; + } + ciInstanceKlass* ik = klsptr->klass()->as_instance_klass(); + // don't need a guard for a klass that is already initialized + return !ik->is_initialized(); +} + //----------------------------inline_unsafe_allocate--------------------------- -// public native Object sun.mics.Unsafe.allocateInstance(Class cls); +// public native Object sun.misc.Unsafe.allocateInstance(Class cls); bool LibraryCallKit::inline_unsafe_allocate() { if (callee()->is_static()) return false; // caller must have the capability! @@ -2905,16 +3012,19 @@ kls = null_check(kls); if (stopped()) return true; // argument was like int.class - // Note: The argument might still be an illegal value like - // Serializable.class or Object[].class. The runtime will handle it. - // But we must make an explicit check for initialization. - Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset())); - // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler - // can generate code to load it as unsigned byte. - Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN); - Node* bits = intcon(InstanceKlass::fully_initialized); - Node* test = _gvn.transform(new (C) SubINode(inst, bits)); - // The 'test' is non-zero if we need to take a slow path. + Node* test = NULL; + if (LibraryCallKit::klass_needs_init_guard(kls)) { + // Note: The argument might still be an illegal value like + // Serializable.class or Object[].class. The runtime will handle it. + // But we must make an explicit check for initialization. + Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset())); + // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler + // can generate code to load it as unsigned byte. + Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN); + Node* bits = intcon(InstanceKlass::fully_initialized); + test = _gvn.transform(new (C) SubINode(inst, bits)); + // The 'test' is non-zero if we need to take a slow path. + } Node* obj = new_instance(kls, test); set_result(obj); @@ -3209,7 +3319,7 @@ if (mirror_con == NULL) return false; // cannot happen? #ifndef PRODUCT - if (PrintIntrinsics || PrintInlining || PrintOptoInlining) { + if (C->print_intrinsics() || C->print_inlining()) { ciType* k = mirror_con->java_mirror_type(); if (k) { tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id())); @@ -3683,6 +3793,8 @@ RegionNode* slow_region) { ciMethod* method = callee(); int vtable_index = method->vtable_index(); + assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index, + err_msg_res("bad index %d", vtable_index)); // Get the Method* out of the appropriate vtable entry. int entry_offset = (InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size()) * wordSize + @@ -3733,6 +3845,8 @@ // so the vtable index is fixed. // No need to use the linkResolver to get it. vtable_index = method->vtable_index(); + assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index, + err_msg_res("bad index %d", vtable_index)); } slow_call = new(C) CallDynamicJavaNode(tf, SharedRuntime::get_resolve_virtual_call_stub(), @@ -3897,14 +4011,14 @@ // caller sensitive methods. bool LibraryCallKit::inline_native_Reflection_getCallerClass() { #ifndef PRODUCT - if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { + if ((C->print_intrinsics() || C->print_inlining()) && Verbose) { tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass"); } #endif if (!jvms()->has_method()) { #ifndef PRODUCT - if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { + if ((C->print_intrinsics() || C->print_inlining()) && Verbose) { tty->print_cr(" Bailing out because intrinsic was inlined at top level"); } #endif @@ -3928,7 +4042,7 @@ // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass). if (!m->caller_sensitive()) { #ifndef PRODUCT - if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { + if ((C->print_intrinsics() || C->print_inlining()) && Verbose) { tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n); } #endif @@ -3944,7 +4058,7 @@ set_result(makecon(TypeInstPtr::make(caller_mirror))); #ifndef PRODUCT - if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { + if ((C->print_intrinsics() || C->print_inlining()) && Verbose) { tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth()); tty->print_cr(" JVM state at this point:"); for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) { @@ -3960,7 +4074,7 @@ } #ifndef PRODUCT - if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { + if ((C->print_intrinsics() || C->print_inlining()) && Verbose) { tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth()); tty->print_cr(" JVM state at this point:"); for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) { @@ -4153,7 +4267,7 @@ // 12 - 64-bit VM, compressed klass // 16 - 64-bit VM, normal klass if (base_off % BytesPerLong != 0) { - assert(UseCompressedKlassPointers, ""); + assert(UseCompressedClassPointers, ""); if (is_array) { // Exclude length to copy by 8 bytes words. base_off += sizeof(int); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/live.cpp --- a/src/share/vm/opto/live.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/live.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -30,9 +30,6 @@ #include "opto/machnode.hpp" - -//============================================================================= -//------------------------------PhaseLive-------------------------------------- // Compute live-in/live-out. We use a totally incremental algorithm. The LIVE // problem is monotonic. The steady-state solution looks like this: pull a // block from the worklist. It has a set of delta's - values which are newly @@ -53,9 +50,9 @@ // Init the sparse live arrays. This data is live on exit from here! // The _live info is the live-out info. - _live = (IndexSet*)_arena->Amalloc(sizeof(IndexSet)*_cfg._num_blocks); + _live = (IndexSet*)_arena->Amalloc(sizeof(IndexSet) * _cfg.number_of_blocks()); uint i; - for( i=0; i<_cfg._num_blocks; i++ ) { + for (i = 0; i < _cfg.number_of_blocks(); i++) { _live[i].initialize(_maxlrg); } @@ -65,14 +62,14 @@ // Does the memory used by _defs and _deltas get reclaimed? Does it matter? TT // Array of values defined locally in blocks - _defs = NEW_RESOURCE_ARRAY(IndexSet,_cfg._num_blocks); - for( i=0; i<_cfg._num_blocks; i++ ) { + _defs = NEW_RESOURCE_ARRAY(IndexSet,_cfg.number_of_blocks()); + for (i = 0; i < _cfg.number_of_blocks(); i++) { _defs[i].initialize(_maxlrg); } // Array of delta-set pointers, indexed by block pre_order-1. - _deltas = NEW_RESOURCE_ARRAY(IndexSet*,_cfg._num_blocks); - memset( _deltas, 0, sizeof(IndexSet*)* _cfg._num_blocks); + _deltas = NEW_RESOURCE_ARRAY(IndexSet*,_cfg.number_of_blocks()); + memset( _deltas, 0, sizeof(IndexSet*)* _cfg.number_of_blocks()); _free_IndexSet = NULL; @@ -80,31 +77,32 @@ VectorSet first_pass(Thread::current()->resource_area()); // Outer loop: must compute local live-in sets and push into predecessors. - uint iters = _cfg._num_blocks; // stat counters - for( uint j=_cfg._num_blocks; j>0; j-- ) { - Block *b = _cfg._blocks[j-1]; + for (uint j = _cfg.number_of_blocks(); j > 0; j--) { + Block* block = _cfg.get_block(j - 1); // Compute the local live-in set. Start with any new live-out bits. - IndexSet *use = getset( b ); - IndexSet *def = &_defs[b->_pre_order-1]; + IndexSet* use = getset(block); + IndexSet* def = &_defs[block->_pre_order-1]; DEBUG_ONLY(IndexSet *def_outside = getfreeset();) uint i; - for( i=b->_nodes.size(); i>1; i-- ) { - Node *n = b->_nodes[i-1]; - if( n->is_Phi() ) break; + for (i = block->number_of_nodes(); i > 1; i--) { + Node* n = block->get_node(i-1); + if (n->is_Phi()) { + break; + } - uint r = _names[n->_idx]; + uint r = _names.at(n->_idx); assert(!def_outside->member(r), "Use of external LRG overlaps the same LRG defined in this block"); def->insert( r ); use->remove( r ); uint cnt = n->req(); - for( uint k=1; kin(k); uint nkidx = nk->_idx; - if (_cfg.get_block_for_node(nk) != b) { - uint u = _names[nkidx]; - use->insert( u ); - DEBUG_ONLY(def_outside->insert( u );) + if (_cfg.get_block_for_node(nk) != block) { + uint u = _names.at(nkidx); + use->insert(u); + DEBUG_ONLY(def_outside->insert(u);) } } } @@ -113,41 +111,38 @@ _free_IndexSet = def_outside; // Drop onto free list #endif // Remove anything defined by Phis and the block start instruction - for( uint k=i; k>0; k-- ) { - uint r = _names[b->_nodes[k-1]->_idx]; - def->insert( r ); - use->remove( r ); + for (uint k = i; k > 0; k--) { + uint r = _names.at(block->get_node(k - 1)->_idx); + def->insert(r); + use->remove(r); } // Push these live-in things to predecessors - for( uint l=1; lnum_preds(); l++ ) { - Block *p = _cfg.get_block_for_node(b->pred(l)); - add_liveout( p, use, first_pass ); + for (uint l = 1; l < block->num_preds(); l++) { + Block* p = _cfg.get_block_for_node(block->pred(l)); + add_liveout(p, use, first_pass); // PhiNode uses go in the live-out set of prior blocks. - for( uint k=i; k>0; k-- ) - add_liveout( p, _names[b->_nodes[k-1]->in(l)->_idx], first_pass ); + for (uint k = i; k > 0; k--) { + add_liveout(p, _names.at(block->get_node(k-1)->in(l)->_idx), first_pass); + } } - freeset( b ); - first_pass.set(b->_pre_order); + freeset(block); + first_pass.set(block->_pre_order); // Inner loop: blocks that picked up new live-out values to be propagated - while( _worklist->size() ) { - // !!!!! -// #ifdef ASSERT - iters++; -// #endif - Block *b = _worklist->pop(); - IndexSet *delta = getset(b); + while (_worklist->size()) { + Block* block = _worklist->pop(); + IndexSet *delta = getset(block); assert( delta->count(), "missing delta set" ); // Add new-live-in to predecessors live-out sets - for (uint l = 1; l < b->num_preds(); l++) { - Block* block = _cfg.get_block_for_node(b->pred(l)); - add_liveout(block, delta, first_pass); + for (uint l = 1; l < block->num_preds(); l++) { + Block* predecessor = _cfg.get_block_for_node(block->pred(l)); + add_liveout(predecessor, delta, first_pass); } - freeset(b); + freeset(block); } // End of while-worklist-not-empty } // End of for-all-blocks-outer-loop @@ -155,7 +150,7 @@ // We explicitly clear all of the IndexSets which we are about to release. // This allows us to recycle their internal memory into IndexSet's free list. - for( i=0; i<_cfg._num_blocks; i++ ) { + for (i = 0; i < _cfg.number_of_blocks(); i++) { _defs[i].clear(); if (_deltas[i]) { // Is this always true? @@ -171,13 +166,11 @@ } -//------------------------------stats------------------------------------------ #ifndef PRODUCT void PhaseLive::stats(uint iters) const { } #endif -//------------------------------getset----------------------------------------- // Get an IndexSet for a block. Return existing one, if any. Make a new // empty one if a prior one does not exist. IndexSet *PhaseLive::getset( Block *p ) { @@ -188,7 +181,6 @@ return delta; // Return set of new live-out items } -//------------------------------getfreeset------------------------------------- // Pull from free list, or allocate. Internal allocation on the returned set // is always from thread local storage. IndexSet *PhaseLive::getfreeset( ) { @@ -207,7 +199,6 @@ return f; } -//------------------------------freeset---------------------------------------- // Free an IndexSet from a block. void PhaseLive::freeset( const Block *p ) { IndexSet *f = _deltas[p->_pre_order-1]; @@ -216,7 +207,6 @@ _deltas[p->_pre_order-1] = NULL; } -//------------------------------add_liveout------------------------------------ // Add a live-out value to a given blocks live-out set. If it is new, then // also add it to the delta set and stick the block on the worklist. void PhaseLive::add_liveout( Block *p, uint r, VectorSet &first_pass ) { @@ -233,8 +223,6 @@ } } - -//------------------------------add_liveout------------------------------------ // Add a vector of live-out values to a given blocks live-out set. void PhaseLive::add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ) { IndexSet *live = &_live[p->_pre_order-1]; @@ -262,31 +250,31 @@ } #ifndef PRODUCT -//------------------------------dump------------------------------------------- // Dump the live-out set for a block void PhaseLive::dump( const Block *b ) const { tty->print("Block %d: ",b->_pre_order); tty->print("LiveOut: "); _live[b->_pre_order-1].dump(); - uint cnt = b->_nodes.size(); + uint cnt = b->number_of_nodes(); for( uint i=0; iprint("L%d/", _names[b->_nodes[i]->_idx] ); - b->_nodes[i]->dump(); + tty->print("L%d/", _names.at(b->get_node(i)->_idx)); + b->get_node(i)->dump(); } tty->print("\n"); } -//------------------------------verify_base_ptrs------------------------------- // Verify that base pointers and derived pointers are still sane. void PhaseChaitin::verify_base_ptrs( ResourceArea *a ) const { #ifdef ASSERT Unique_Node_List worklist(a); - for( uint i = 0; i < _cfg._num_blocks; i++ ) { - Block *b = _cfg._blocks[i]; - for( uint j = b->end_idx() + 1; j > 1; j-- ) { - Node *n = b->_nodes[j-1]; - if( n->is_Phi() ) break; + for (uint i = 0; i < _cfg.number_of_blocks(); i++) { + Block* block = _cfg.get_block(i); + for (uint j = block->end_idx() + 1; j > 1; j--) { + Node* n = block->get_node(j-1); + if (n->is_Phi()) { + break; + } // Found a safepoint? - if( n->is_MachSafePoint() ) { + if (n->is_MachSafePoint()) { MachSafePointNode *sfpt = n->as_MachSafePoint(); JVMState* jvms = sfpt->jvms(); if (jvms != NULL) { @@ -333,7 +321,7 @@ #ifdef _LP64 UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP || UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN || - UseCompressedKlassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass || + UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass || #endif check->as_Mach()->ideal_Opcode() == Op_LoadP || check->as_Mach()->ideal_Opcode() == Op_LoadKlass)) { @@ -358,7 +346,6 @@ #endif } -//------------------------------verify------------------------------------- // Verify that graphs and base pointers are still sane. void PhaseChaitin::verify( ResourceArea *a, bool verify_ifg ) const { #ifdef ASSERT diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/live.hpp --- a/src/share/vm/opto/live.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/live.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -40,27 +40,7 @@ //------------------------------LRG_List--------------------------------------- // Map Node indices to Live RanGe indices. // Array lookup in the optimized case. -class LRG_List : public ResourceObj { - friend class VMStructs; - uint _cnt, _max; - uint* _lidxs; - ReallocMark _nesting; // assertion check for reallocations -public: - LRG_List( uint max ); - - uint lookup( uint nidx ) const { - return _lidxs[nidx]; - } - uint operator[] (uint nidx) const { return lookup(nidx); } - - void map( uint nidx, uint lidx ) { - assert( nidx < _cnt, "oob" ); - _lidxs[nidx] = lidx; - } - void extend( uint nidx, uint lidx ); - - uint Size() const { return _cnt; } -}; +typedef GrowableArray LRG_List; //------------------------------PhaseLive-------------------------------------- // Compute live-in/live-out diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/loopTransform.cpp --- a/src/share/vm/opto/loopTransform.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/loopTransform.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -776,6 +776,9 @@ continue; // not RC Node *cmp = bol->in(1); + if (cmp->is_FlagsProj()) { + continue; + } Node *rc_exp = cmp->in(1); Node *limit = cmp->in(2); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/loopopts.cpp --- a/src/share/vm/opto/loopopts.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/loopopts.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -2355,7 +2355,8 @@ opc == Op_Catch || opc == Op_CatchProj || opc == Op_Jump || - opc == Op_JumpProj) { + opc == Op_JumpProj || + opc == Op_FlagsProj) { #if !defined(PRODUCT) if (TracePartialPeeling) { tty->print_cr("\nExit control too complex: lp: %d", head->_idx); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/machnode.cpp --- a/src/share/vm/opto/machnode.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/machnode.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -341,7 +341,7 @@ return TypePtr::BOTTOM; } // %%% make offset be intptr_t - assert(!Universe::heap()->is_in_reserved((oop)offset), "must be a raw ptr"); + assert(!Universe::heap()->is_in_reserved(cast_to_oop(offset)), "must be a raw ptr"); return TypeRawPtr::BOTTOM; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/machnode.hpp --- a/src/share/vm/opto/machnode.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/machnode.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,7 +58,7 @@ class MachOper : public ResourceObj { public: // Allocate right next to the MachNodes in the same arena - void *operator new( size_t x, Compile* C ) { return C->node_arena()->Amalloc_D(x); } + void *operator new( size_t x, Compile* C ) throw() { return C->node_arena()->Amalloc_D(x); } // Opcode virtual uint opcode() const = 0; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/macro.cpp --- a/src/share/vm/opto/macro.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/macro.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -72,6 +72,8 @@ int jvms_adj = new_dbg_start - old_dbg_start; assert (new_dbg_start == newcall->req(), "argument count mismatch"); + // SafePointScalarObject node could be referenced several times in debug info. + // Use Dict to record cloned nodes. Dict* sosn_map = new Dict(cmpkey,hashkey); for (uint i = old_dbg_start; i < oldcall->req(); i++) { Node* old_in = oldcall->in(i); @@ -79,8 +81,8 @@ if (old_in != NULL && old_in->is_SafePointScalarObject()) { SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject(); uint old_unique = C->unique(); - Node* new_in = old_sosn->clone(jvms_adj, sosn_map); - if (old_unique != C->unique()) { + Node* new_in = old_sosn->clone(sosn_map); + if (old_unique != C->unique()) { // New node? new_in->set_req(0, C->root()); // reset control edge new_in = transform_later(new_in); // Register new node. } @@ -725,7 +727,11 @@ while (safepoints.length() > 0) { SafePointNode* sfpt = safepoints.pop(); Node* mem = sfpt->memory(); - uint first_ind = sfpt->req(); + assert(sfpt->jvms() != NULL, "missed JVMS"); + // Fields of scalar objs are referenced only at the end + // of regular debuginfo at the last (youngest) JVMS. + // Record relative start index. + uint first_ind = (sfpt->req() - sfpt->jvms()->scloff()); SafePointScalarObjectNode* sobj = new (C) SafePointScalarObjectNode(res_type, #ifdef ASSERT alloc, @@ -799,7 +805,7 @@ for (int i = start; i < end; i++) { if (sfpt_done->in(i)->is_SafePointScalarObject()) { SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject(); - if (scobj->first_index() == sfpt_done->req() && + if (scobj->first_index(jvms) == sfpt_done->req() && scobj->n_fields() == (uint)nfields) { assert(scobj->alloc() == alloc, "sanity"); sfpt_done->set_req(i, res); @@ -2185,7 +2191,7 @@ Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes()); klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) ); #ifdef _LP64 - if (UseCompressedKlassPointers && klass_node->is_DecodeNKlass()) { + if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) { assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity"); klass_node->in(1)->init_req(0, ctrl); } else diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/matcher.cpp --- a/src/share/vm/opto/matcher.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/matcher.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -67,8 +67,8 @@ const uint Matcher::_end_rematerialize = _END_REMATERIALIZE; //---------------------------Matcher------------------------------------------- -Matcher::Matcher( Node_List &proj_list ) : - PhaseTransform( Phase::Ins_Select ), +Matcher::Matcher() +: PhaseTransform( Phase::Ins_Select ), #ifdef ASSERT _old2new_map(C->comp_arena()), _new2old_map(C->comp_arena()), @@ -78,7 +78,7 @@ _swallowed(swallowed), _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE), _end_inst_chain_rule(_END_INST_CHAIN_RULE), - _must_clone(must_clone), _proj_list(proj_list), + _must_clone(must_clone), _register_save_policy(register_save_policy), _c_reg_save_policy(c_reg_save_policy), _register_save_type(register_save_type), @@ -1304,8 +1304,9 @@ for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++) proj->_rout.Insert(OptoReg::Name(i)); } - if( proj->_rout.is_NotEmpty() ) - _proj_list.push(proj); + if (proj->_rout.is_NotEmpty()) { + push_projection(proj); + } } // Transfer the safepoint information from the call to the mcall // Move the JVMState list @@ -1685,14 +1686,15 @@ } // If the _leaf is an AddP, insert the base edge - if( leaf->is_AddP() ) + if (leaf->is_AddP()) { mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base)); + } - uint num_proj = _proj_list.size(); + uint number_of_projections_prior = number_of_projections(); // Perform any 1-to-many expansions required - MachNode *ex = mach->Expand(s,_proj_list, mem); - if( ex != mach ) { + MachNode *ex = mach->Expand(s, _projection_list, mem); + if (ex != mach) { assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match"); if( ex->in(1)->is_Con() ) ex->in(1)->set_req(0, C->root()); @@ -1713,7 +1715,7 @@ // generated belatedly during spill code generation. if (_allocation_started) { guarantee(ex == mach, "no expand rules during spill generation"); - guarantee(_proj_list.size() == num_proj, "no allocation during spill generation"); + guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation"); } if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) { @@ -1962,6 +1964,7 @@ case Op_Catch: case Op_CatchProj: case Op_CProj: + case Op_FlagsProj: case Op_JumpProj: case Op_JProj: case Op_NeverBranch: diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/matcher.hpp --- a/src/share/vm/opto/matcher.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/matcher.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -88,7 +88,7 @@ Node *transform( Node *dummy ); - Node_List &_proj_list; // For Machine nodes killing many values + Node_List _projection_list; // For Machine nodes killing many values Node_Array _shared_nodes; @@ -183,10 +183,30 @@ void collect_null_checks( Node *proj, Node *orig_proj ); void validate_null_checks( ); - Matcher( Node_List &proj_list ); + Matcher(); + + // Get a projection node at position pos + Node* get_projection(uint pos) { + return _projection_list[pos]; + } + + // Push a projection node onto the projection list + void push_projection(Node* node) { + _projection_list.push(node); + } + + Node* pop_projection() { + return _projection_list.pop(); + } + + // Number of nodes in the projection list + uint number_of_projections() const { + return _projection_list.size(); + } // Select instructions for entire method - void match( ); + void match(); + // Helper for match OptoReg::Name warp_incoming_stk_arg( VMReg reg ); @@ -317,6 +337,9 @@ // Register for MODL projection of divmodL static RegMask modL_proj_mask(); + static const RegMask mathExactI_result_proj_mask(); + static const RegMask mathExactI_flags_proj_mask(); + // Use hardware DIV instruction when it is faster than // a code which use multiply for division by constant. static bool use_asm_for_ldiv_by_con( jlong divisor ); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/mathexactnode.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/opto/mathexactnode.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "memory/allocation.inline.hpp" +#include "opto/addnode.hpp" +#include "opto/machnode.hpp" +#include "opto/mathexactnode.hpp" +#include "opto/matcher.hpp" +#include "opto/subnode.hpp" + +MathExactNode::MathExactNode(Node* ctrl, Node* n1, Node* n2) : MultiNode(3) { + init_req(0, ctrl); + init_req(1, n1); + init_req(2, n2); +} + +Node* AddExactINode::match(const ProjNode* proj, const Matcher* m) { + uint ideal_reg = proj->ideal_reg(); + RegMask rm; + if (proj->_con == result_proj_node) { + rm = m->mathExactI_result_proj_mask(); + } else { + assert(proj->_con == flags_proj_node, "must be result or flags"); + assert(ideal_reg == Op_RegFlags, "sanity"); + rm = m->mathExactI_flags_proj_mask(); + } + return new (m->C) MachProjNode(this, proj->_con, rm, ideal_reg); +} + +// If the MathExactNode won't overflow we have to replace the +// FlagsProjNode and ProjNode that is generated by the MathExactNode +Node* MathExactNode::no_overflow(PhaseGVN *phase, Node* new_result) { + PhaseIterGVN *igvn = phase->is_IterGVN(); + if (igvn) { + ProjNode* result = result_node(); + ProjNode* flags = flags_node(); + + if (result != NULL) { + igvn->replace_node(result, new_result); + } + + if (flags != NULL) { + BoolNode* bolnode = (BoolNode *) flags->unique_out(); + switch (bolnode->_test._test) { + case BoolTest::overflow: + // if the check is for overflow - never taken + igvn->replace_node(bolnode, phase->intcon(0)); + break; + case BoolTest::no_overflow: + // if the check is for no overflow - always taken + igvn->replace_node(bolnode, phase->intcon(1)); + break; + default: + fatal("Unexpected value of BoolTest"); + break; + } + flags->del_req(0); + } + } + return new_result; +} + +Node *AddExactINode::Ideal(PhaseGVN *phase, bool can_reshape) { + Node *arg1 = in(1); + Node *arg2 = in(2); + + const Type* type1 = phase->type(arg1); + const Type* type2 = phase->type(arg2); + + if (type1 != Type::TOP && type1->singleton() && + type2 != Type::TOP && type2->singleton()) { + jint val1 = arg1->get_int(); + jint val2 = arg2->get_int(); + jint result = val1 + val2; + // Hacker's Delight 2-12 Overflow if both arguments have the opposite sign of the result + if ( (((val1 ^ result) & (val2 ^ result)) >= 0)) { + Node* con_result = ConINode::make(phase->C, result); + return no_overflow(phase, con_result); + } + return NULL; + } + + if (type1 == TypeInt::ZERO) { // (Add 0 x) == x + Node* add_result = new (phase->C) AddINode(arg1, arg2); + return no_overflow(phase, add_result); + } + + if (type2 == TypeInt::ZERO) { // (Add x 0) == x + Node* add_result = new (phase->C) AddINode(arg1, arg2); + return no_overflow(phase, add_result); + } + + if (type2->singleton()) { + return NULL; // no change - keep constant on the right + } + + if (type1->singleton()) { + // Make it x + Constant - move constant to the right + swap_edges(1, 2); + return this; + } + + if (arg2->is_Load()) { + return NULL; // no change - keep load on the right + } + + if (arg1->is_Load()) { + // Make it x + Load - move load to the right + swap_edges(1, 2); + return this; + } + + if (arg1->_idx > arg2->_idx) { + // Sort the edges + swap_edges(1, 2); + return this; + } + + return NULL; +} + diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/mathexactnode.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/opto/mathexactnode.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OPTO_MATHEXACTNODE_HPP +#define SHARE_VM_OPTO_MATHEXACTNODE_HPP + +#include "opto/multnode.hpp" +#include "opto/node.hpp" +#include "opto/type.hpp" + +class Node; + +class PhaseGVN; +class PhaseTransform; + +class MathExactNode : public MultiNode { +public: + MathExactNode(Node* ctrl, Node* in1, Node* in2); + enum { + result_proj_node = 0, + flags_proj_node = 1 + }; + virtual int Opcode() const; + virtual Node* Identity(PhaseTransform* phase) { return this; } + virtual Node* Ideal(PhaseGVN* phase, bool can_reshape) { return NULL; } + virtual const Type* Value(PhaseTransform* phase) const { return bottom_type(); } + virtual uint hash() const { return Node::hash(); } + virtual bool is_CFG() const { return false; } + virtual uint ideal_reg() const { return NotAMachineReg; } + + ProjNode* result_node() { return proj_out(result_proj_node); } + ProjNode* flags_node() { return proj_out(flags_proj_node); } +protected: + Node* no_overflow(PhaseGVN *phase, Node* new_result); +}; + +class AddExactINode : public MathExactNode { +public: + AddExactINode(Node* ctrl, Node* in1, Node* in2) : MathExactNode(ctrl, in1, in2) {} + virtual int Opcode() const; + virtual const Type* bottom_type() const { return TypeTuple::INT_CC_PAIR; } + virtual Node* match(const ProjNode* proj, const Matcher* m); + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); +}; + +class FlagsProjNode : public ProjNode { +public: + FlagsProjNode(Node* src, uint con) : ProjNode(src, con) { + init_class_id(Class_FlagsProj); + } + + virtual int Opcode() const; + virtual bool is_CFG() const { return false; } + virtual const Type* bottom_type() const { return TypeInt::CC; } + virtual uint ideal_reg() const { return Op_RegFlags; } +}; + + +#endif + diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/memnode.cpp --- a/src/share/vm/opto/memnode.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/memnode.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -962,6 +962,19 @@ return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address); } +static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) { + if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) { + bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile(); + bool is_stable_ary = FoldStableValues && + (tp != NULL) && (tp->isa_aryptr() != NULL) && + tp->isa_aryptr()->is_stable(); + + return (eliminate_boxing && non_volatile) || is_stable_ary; + } + + return false; +} + //---------------------------can_see_stored_value------------------------------ // This routine exists to make sure this set of tests is done the same // everywhere. We need to make a coordinated change: first LoadNode::Ideal @@ -976,11 +989,9 @@ const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr(); Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL; // This is more general than load from boxing objects. - if (phase->C->eliminate_boxing() && (atp != NULL) && - (atp->index() >= Compile::AliasIdxRaw) && - (atp->field() != NULL) && !atp->field()->is_volatile()) { + if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) { uint alias_idx = atp->index(); - bool final = atp->field()->is_final(); + bool final = !atp->is_rewritable(); Node* result = NULL; Node* current = st; // Skip through chains of MemBarNodes checking the MergeMems for @@ -1015,7 +1026,6 @@ } } - // Loop around twice in the case Load -> Initialize -> Store. // (See PhaseIterGVN::add_users_to_worklist, which knows about this case.) for (int trip = 0; trip <= 1; trip++) { @@ -1577,6 +1587,40 @@ return NULL; } +// Try to constant-fold a stable array element. +static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) { + assert(ary->is_stable(), "array should be stable"); + + if (ary->const_oop() != NULL) { + // Decode the results of GraphKit::array_element_address. + ciArray* aobj = ary->const_oop()->as_array(); + ciConstant con = aobj->element_value_by_offset(off); + + if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) { + const Type* con_type = Type::make_from_constant(con); + if (con_type != NULL) { + if (con_type->isa_aryptr()) { + // Join with the array element type, in case it is also stable. + int dim = ary->stable_dimension(); + con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1); + } + if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) { + con_type = con_type->make_narrowoop(); + } +#ifndef PRODUCT + if (TraceIterativeGVN) { + tty->print("FoldStableValues: array element [off=%d]: con_type=", off); + con_type->dump(); tty->cr(); + } +#endif //PRODUCT + return con_type; + } + } + } + + return NULL; +} + //------------------------------Value----------------------------------------- const Type *LoadNode::Value( PhaseTransform *phase ) const { // Either input is TOP ==> the result is TOP @@ -1591,8 +1635,31 @@ Compile* C = phase->C; // Try to guess loaded type from pointer type - if (tp->base() == Type::AryPtr) { - const Type *t = tp->is_aryptr()->elem(); + if (tp->isa_aryptr()) { + const TypeAryPtr* ary = tp->is_aryptr(); + const Type *t = ary->elem(); + + // Determine whether the reference is beyond the header or not, by comparing + // the offset against the offset of the start of the array's data. + // Different array types begin at slightly different offsets (12 vs. 16). + // We choose T_BYTE as an example base type that is least restrictive + // as to alignment, which will therefore produce the smallest + // possible base offset. + const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE); + const bool off_beyond_header = ((uint)off >= (uint)min_base_off); + + // Try to constant-fold a stable array element. + if (FoldStableValues && ary->is_stable()) { + // Make sure the reference is not into the header + if (off_beyond_header && off != Type::OffsetBot) { + assert(adr->is_AddP() && adr->in(AddPNode::Offset)->is_Con(), "offset is a constant"); + const Type* con_type = fold_stable_ary_elem(ary, off, memory_type()); + if (con_type != NULL) { + return con_type; + } + } + } + // Don't do this for integer types. There is only potential profit if // the element type t is lower than _type; that is, for int types, if _type is // more restrictive than t. This only happens here if one is short and the other @@ -1613,14 +1680,7 @@ && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { // t might actually be lower than _type, if _type is a unique // concrete subclass of abstract class t. - // Make sure the reference is not into the header, by comparing - // the offset against the offset of the start of the array's data. - // Different array types begin at slightly different offsets (12 vs. 16). - // We choose T_BYTE as an example base type that is least restrictive - // as to alignment, which will therefore produce the smallest - // possible base offset. - const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE); - if ((uint)off >= (uint)min_base_off) { // is the offset beyond the header? + if (off_beyond_header) { // is the offset beyond the header? const Type* jt = t->join(_type); // In any case, do not allow the join, per se, to empty out the type. if (jt->empty() && !t->empty()) { @@ -1971,7 +2031,7 @@ assert(adr_type != NULL, "expecting TypeKlassPtr"); #ifdef _LP64 if (adr_type->is_ptr_to_narrowklass()) { - assert(UseCompressedKlassPointers, "no compressed klasses"); + assert(UseCompressedClassPointers, "no compressed klasses"); Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass())); return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr()); } @@ -2309,7 +2369,7 @@ val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop())); return new (C) StoreNNode(ctl, mem, adr, adr_type, val); } else if (adr->bottom_type()->is_ptr_to_narrowklass() || - (UseCompressedKlassPointers && val->bottom_type()->isa_klassptr() && + (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() && adr->bottom_type()->isa_rawptr())) { val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass())); return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/multnode.cpp --- a/src/share/vm/opto/multnode.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/multnode.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "opto/callnode.hpp" #include "opto/matcher.hpp" +#include "opto/mathexactnode.hpp" #include "opto/multnode.hpp" #include "opto/opcodes.hpp" #include "opto/phaseX.hpp" @@ -46,15 +47,21 @@ assert(Opcode() != Op_If || outcnt() == 2, "bad if #1"); for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) { Node *p = fast_out(i); - if( !p->is_Proj() ) { + if (p->is_Proj()) { + ProjNode *proj = p->as_Proj(); + if (proj->_con == which_proj) { + assert(Opcode() != Op_If || proj->Opcode() == (which_proj?Op_IfTrue:Op_IfFalse), "bad if #2"); + return proj; + } + } else if (p->is_FlagsProj()) { + FlagsProjNode *proj = p->as_FlagsProj(); + if (proj->_con == which_proj) { + return proj; + } + } else { assert(p == this && this->is_Start(), "else must be proj"); continue; } - ProjNode *proj = p->as_Proj(); - if( proj->_con == which_proj ) { - assert(Opcode() != Op_If || proj->Opcode() == (which_proj?Op_IfTrue:Op_IfFalse), "bad if #2"); - return proj; - } } return NULL; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/node.cpp --- a/src/share/vm/opto/node.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/node.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -773,6 +773,21 @@ _in[_cnt] = NULL; // NULL out emptied slot } +//------------------------------del_req_ordered-------------------------------- +// Delete the required edge and compact the edge array with preserved order +void Node::del_req_ordered( uint idx ) { + assert( idx < _cnt, "oob"); + assert( !VerifyHashTableKeys || _hash_lock == 0, + "remove node from hash table before modifying it"); + // First remove corresponding def-use edge + Node *n = in(idx); + if (n != NULL) n->del_out((Node *)this); + if (idx < _cnt - 1) { // Not last edge ? + Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx-1)*sizeof(Node*))); + } + _in[--_cnt] = NULL; // NULL out emptied slot +} + //------------------------------ins_req---------------------------------------- // Insert a new required input at the end void Node::ins_req( uint idx, Node *n ) { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/node.hpp --- a/src/share/vm/opto/node.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/node.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,6 +69,7 @@ class EncodePKlassNode; class FastLockNode; class FastUnlockNode; +class FlagsProjNode; class IfNode; class IfFalseNode; class IfTrueNode; @@ -211,7 +212,7 @@ // New Operator that takes a Compile pointer, this will eventually // be the "new" New operator. - inline void* operator new( size_t x, Compile* C) { + inline void* operator new( size_t x, Compile* C) throw() { Node* n = (Node*)C->node_arena()->Amalloc_D(x); #ifdef ASSERT n->_in = (Node**)n; // magic cookie for assertion check @@ -384,6 +385,7 @@ void add_req( Node *n ); // Append a NEW required input void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n). void del_req( uint idx ); // Delete required edge & compact + void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order void ins_req( uint i, Node *n ); // Insert a NEW required input void set_req( uint i, Node *n ) { assert( is_not_dead(n), "can not use dead node"); @@ -622,6 +624,7 @@ DEFINE_CLASS_ID(Cmp, Sub, 0) DEFINE_CLASS_ID(FastLock, Cmp, 0) DEFINE_CLASS_ID(FastUnlock, Cmp, 1) + DEFINE_CLASS_ID(FlagsProj, Cmp, 2) DEFINE_CLASS_ID(MergeMem, Node, 7) DEFINE_CLASS_ID(Bool, Node, 8) @@ -725,6 +728,7 @@ DEFINE_CLASS_QUERY(EncodePKlass) DEFINE_CLASS_QUERY(FastLock) DEFINE_CLASS_QUERY(FastUnlock) + DEFINE_CLASS_QUERY(FlagsProj) DEFINE_CLASS_QUERY(If) DEFINE_CLASS_QUERY(IfFalse) DEFINE_CLASS_QUERY(IfTrue) diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/output.cpp --- a/src/share/vm/opto/output.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/output.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -54,11 +54,10 @@ extern int emit_exception_handler(CodeBuffer &cbuf); extern int emit_deopt_handler(CodeBuffer &cbuf); -//------------------------------Output----------------------------------------- // Convert Nodes to instruction bits and pass off to the VM void Compile::Output() { // RootNode goes - assert( _cfg->_broot->_nodes.size() == 0, "" ); + assert( _cfg->get_root_block()->number_of_nodes() == 0, "" ); // The number of new nodes (mostly MachNop) is proportional to // the number of java calls and inner loops which are aligned. @@ -68,14 +67,14 @@ return; } // Make sure I can find the Start Node - Block *entry = _cfg->_blocks[1]; - Block *broot = _cfg->_broot; - - const StartNode *start = entry->_nodes[0]->as_Start(); + Block *entry = _cfg->get_block(1); + Block *broot = _cfg->get_root_block(); + + const StartNode *start = entry->head()->as_Start(); // Replace StartNode with prolog MachPrologNode *prolog = new (this) MachPrologNode(); - entry->_nodes.map( 0, prolog ); + entry->map_node(prolog, 0); _cfg->map_node_to_block(prolog, entry); _cfg->unmap_node_from_block(start); // start is no longer in any block @@ -109,40 +108,44 @@ } // Insert epilogs before every return - for( uint i=0; i<_cfg->_num_blocks; i++ ) { - Block *b = _cfg->_blocks[i]; - if( !b->is_connector() && b->non_connector_successor(0) == _cfg->_broot ) { // Found a program exit point? - Node *m = b->end(); - if( m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt ) { - MachEpilogNode *epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return); - b->add_inst( epilog ); - _cfg->map_node_to_block(epilog, b); + for (uint i = 0; i < _cfg->number_of_blocks(); i++) { + Block* block = _cfg->get_block(i); + if (!block->is_connector() && block->non_connector_successor(0) == _cfg->get_root_block()) { // Found a program exit point? + Node* m = block->end(); + if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) { + MachEpilogNode* epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return); + block->add_inst(epilog); + _cfg->map_node_to_block(epilog, block); } } } # ifdef ENABLE_ZAP_DEAD_LOCALS - if ( ZapDeadCompiledLocals ) Insert_zap_nodes(); + if (ZapDeadCompiledLocals) { + Insert_zap_nodes(); + } # endif - uint* blk_starts = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks+1); - blk_starts[0] = 0; + uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1); + blk_starts[0] = 0; // Initialize code buffer and process short branches. CodeBuffer* cb = init_buffer(blk_starts); - if (cb == NULL || failing()) return; + if (cb == NULL || failing()) { + return; + } ScheduleAndBundle(); #ifndef PRODUCT if (trace_opto_output()) { tty->print("\n---- After ScheduleAndBundle ----\n"); - for (uint i = 0; i < _cfg->_num_blocks; i++) { + for (uint i = 0; i < _cfg->number_of_blocks(); i++) { tty->print("\nBB#%03d:\n", i); - Block *bb = _cfg->_blocks[i]; - for (uint j = 0; j < bb->_nodes.size(); j++) { - Node *n = bb->_nodes[j]; + Block* block = _cfg->get_block(i); + for (uint j = 0; j < block->number_of_nodes(); j++) { + Node* n = block->get_node(j); OptoReg::Name reg = _regalloc->get_reg_first(n); tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : ""); n->dump(); @@ -151,11 +154,15 @@ } #endif - if (failing()) return; + if (failing()) { + return; + } BuildOopMaps(); - if (failing()) return; + if (failing()) { + return; + } fill_buffer(cb, blk_starts); } @@ -217,10 +224,10 @@ return; // no safepoints/oopmaps emitted for calls in stubs,so we don't care // Insert call to zap runtime stub before every node with an oop map - for( uint i=0; i<_cfg->_num_blocks; i++ ) { - Block *b = _cfg->_blocks[i]; - for ( uint j = 0; j < b->_nodes.size(); ++j ) { - Node *n = b->_nodes[j]; + for( uint i=0; i<_cfg->number_of_blocks(); i++ ) { + Block *b = _cfg->get_block(i); + for ( uint j = 0; j < b->number_of_nodes(); ++j ) { + Node *n = b->get_node(j); // Determining if we should insert a zap-a-lot node in output. // We do that for all nodes that has oopmap info, except for calls @@ -249,7 +256,7 @@ } if (insert) { Node *zap = call_zap_node(n->as_MachSafePoint(), i); - b->_nodes.insert( j, zap ); + b->insert_node(zap, j); _cfg->map_node_to_block(zap, b); ++j; } @@ -275,7 +282,6 @@ return _matcher->match_sfpt(ideal_node); } -//------------------------------is_node_getting_a_safepoint-------------------- bool Compile::is_node_getting_a_safepoint( Node* n) { // This code duplicates the logic prior to the call of add_safepoint // below in this file. @@ -285,7 +291,6 @@ # endif // ENABLE_ZAP_DEAD_LOCALS -//------------------------------compute_loop_first_inst_sizes------------------ // Compute the size of first NumberOfLoopInstrToAlign instructions at the top // of a loop. When aligning a loop we need to provide enough instructions // in cpu's fetch buffer to feed decoders. The loop alignment could be @@ -302,42 +307,39 @@ // or alignment padding is larger then MaxLoopPad. By default, MaxLoopPad // is equal to OptoLoopAlignment-1 except on new Intel cpus, where it is // equal to 11 bytes which is the largest address NOP instruction. - if( MaxLoopPad < OptoLoopAlignment-1 ) { - uint last_block = _cfg->_num_blocks-1; - for( uint i=1; i <= last_block; i++ ) { - Block *b = _cfg->_blocks[i]; + if (MaxLoopPad < OptoLoopAlignment - 1) { + uint last_block = _cfg->number_of_blocks() - 1; + for (uint i = 1; i <= last_block; i++) { + Block* block = _cfg->get_block(i); // Check the first loop's block which requires an alignment. - if( b->loop_alignment() > (uint)relocInfo::addr_unit() ) { + if (block->loop_alignment() > (uint)relocInfo::addr_unit()) { uint sum_size = 0; uint inst_cnt = NumberOfLoopInstrToAlign; - inst_cnt = b->compute_first_inst_size(sum_size, inst_cnt, _regalloc); + inst_cnt = block->compute_first_inst_size(sum_size, inst_cnt, _regalloc); // Check subsequent fallthrough blocks if the loop's first // block(s) does not have enough instructions. - Block *nb = b; - while( inst_cnt > 0 && - i < last_block && - !_cfg->_blocks[i+1]->has_loop_alignment() && - !nb->has_successor(b) ) { + Block *nb = block; + while(inst_cnt > 0 && + i < last_block && + !_cfg->get_block(i + 1)->has_loop_alignment() && + !nb->has_successor(block)) { i++; - nb = _cfg->_blocks[i]; + nb = _cfg->get_block(i); inst_cnt = nb->compute_first_inst_size(sum_size, inst_cnt, _regalloc); } // while( inst_cnt > 0 && i < last_block ) - b->set_first_inst_size(sum_size); + block->set_first_inst_size(sum_size); } // f( b->head()->is_Loop() ) } // for( i <= last_block ) } // if( MaxLoopPad < OptoLoopAlignment-1 ) } -//----------------------shorten_branches--------------------------------------- // The architecture description provides short branch variants for some long // branch instructions. Replace eligible long branches with short branches. void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size) { - - // ------------------ // Compute size of each block, method size, and relocation information size - uint nblocks = _cfg->_num_blocks; + uint nblocks = _cfg->number_of_blocks(); uint* jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks); uint* jmp_size = NEW_RESOURCE_ARRAY(uint,nblocks); @@ -364,7 +366,7 @@ uint last_avoid_back_to_back_adr = max_uint; uint nop_size = (new (this) MachNopNode())->size(_regalloc); for (uint i = 0; i < nblocks; i++) { // For all blocks - Block *b = _cfg->_blocks[i]; + Block* block = _cfg->get_block(i); // During short branch replacement, we store the relative (to blk_starts) // offset of jump in jmp_offset, rather than the absolute offset of jump. @@ -377,10 +379,10 @@ DEBUG_ONLY( jmp_rule[i] = 0; ) // Sum all instruction sizes to compute block size - uint last_inst = b->_nodes.size(); + uint last_inst = block->number_of_nodes(); uint blk_size = 0; for (uint j = 0; j < last_inst; j++) { - Node* nj = b->_nodes[j]; + Node* nj = block->get_node(j); // Handle machine instruction nodes if (nj->is_Mach()) { MachNode *mach = nj->as_Mach(); @@ -441,8 +443,8 @@ // When the next block starts a loop, we may insert pad NOP // instructions. Since we cannot know our future alignment, // assume the worst. - if (i< nblocks-1) { - Block *nb = _cfg->_blocks[i+1]; + if (i < nblocks - 1) { + Block* nb = _cfg->get_block(i + 1); int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit(); if (max_loop_pad > 0) { assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), ""); @@ -473,26 +475,26 @@ has_short_branch_candidate = false; int adjust_block_start = 0; for (uint i = 0; i < nblocks; i++) { - Block *b = _cfg->_blocks[i]; + Block* block = _cfg->get_block(i); int idx = jmp_nidx[i]; - MachNode* mach = (idx == -1) ? NULL: b->_nodes[idx]->as_Mach(); + MachNode* mach = (idx == -1) ? NULL: block->get_node(idx)->as_Mach(); if (mach != NULL && mach->may_be_short_branch()) { #ifdef ASSERT assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity"); int j; // Find the branch; ignore trailing NOPs. - for (j = b->_nodes.size()-1; j>=0; j--) { - Node* n = b->_nodes[j]; + for (j = block->number_of_nodes()-1; j>=0; j--) { + Node* n = block->get_node(j); if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) break; } - assert(j >= 0 && j == idx && b->_nodes[j] == (Node*)mach, "sanity"); + assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity"); #endif int br_size = jmp_size[i]; int br_offs = blk_starts[i] + jmp_offset[i]; // This requires the TRUE branch target be in succs[0] - uint bnum = b->non_connector_successor(0)->_pre_order; + uint bnum = block->non_connector_successor(0)->_pre_order; int offset = blk_starts[bnum] - br_offs; if (bnum > i) { // adjust following block's offset offset -= adjust_block_start; @@ -520,7 +522,7 @@ diff -= nop_size; } adjust_block_start += diff; - b->_nodes.map(idx, replacement); + block->map_node(replacement, idx); mach->subsume_by(replacement, C); mach = replacement; progress = true; @@ -637,7 +639,7 @@ new ConstantOopWriteValue(cik->java_mirror()->constant_encoding())); Compile::set_sv_for_object_node(objs, sv); - uint first_ind = spobj->first_index(); + uint first_ind = spobj->first_index(sfpt->jvms()); for (uint i = 0; i < spobj->n_fields(); i++) { Node* fld_node = sfpt->in(first_ind+i); (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs); @@ -892,7 +894,7 @@ GrowableArray *monarray = new GrowableArray(num_mon); // Loop over monitors and insert into array - for(idx = 0; idx < num_mon; idx++) { + for (idx = 0; idx < num_mon; idx++) { // Grab the node that defines this monitor Node* box_node = sfn->monitor_box(jvms, idx); Node* obj_node = sfn->monitor_obj(jvms, idx); @@ -900,11 +902,11 @@ // Create ScopeValue for object ScopeValue *scval = NULL; - if( obj_node->is_SafePointScalarObject() ) { + if (obj_node->is_SafePointScalarObject()) { SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject(); scval = Compile::sv_for_node_id(objs, spobj->_idx); if (scval == NULL) { - const Type *t = obj_node->bottom_type(); + const Type *t = spobj->bottom_type(); ciKlass* cik = t->is_oopptr()->klass(); assert(cik->is_instance_klass() || cik->is_array_klass(), "Not supported allocation."); @@ -912,14 +914,14 @@ new ConstantOopWriteValue(cik->java_mirror()->constant_encoding())); Compile::set_sv_for_object_node(objs, sv); - uint first_ind = spobj->first_index(); + uint first_ind = spobj->first_index(youngest_jvms); for (uint i = 0; i < spobj->n_fields(); i++) { Node* fld_node = sfn->in(first_ind+i); (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs); } scval = sv; } - } else if( !obj_node->is_Con() ) { + } else if (!obj_node->is_Con()) { OptoReg::Name obj_reg = _regalloc->get_reg_first(obj_node); if( obj_node->bottom_type()->base() == Type::NarrowOop ) { scval = new_loc_value( _regalloc, obj_reg, Location::narrowoop ); @@ -1086,11 +1088,11 @@ if (has_mach_constant_base_node()) { // Fill the constant table. // Note: This must happen before shorten_branches. - for (uint i = 0; i < _cfg->_num_blocks; i++) { - Block* b = _cfg->_blocks[i]; - - for (uint j = 0; j < b->_nodes.size(); j++) { - Node* n = b->_nodes[j]; + for (uint i = 0; i < _cfg->number_of_blocks(); i++) { + Block* b = _cfg->get_block(i); + + for (uint j = 0; j < b->number_of_nodes(); j++) { + Node* n = b->get_node(j); // If the node is a MachConstantNode evaluate the constant // value section. @@ -1173,7 +1175,7 @@ // !!!!! This preserves old handling of oopmaps for now debug_info()->set_oopmaps(_oop_map_set); - uint nblocks = _cfg->_num_blocks; + uint nblocks = _cfg->number_of_blocks(); // Count and start of implicit null check instructions uint inct_cnt = 0; uint *inct_starts = NEW_RESOURCE_ARRAY(uint, nblocks+1); @@ -1221,21 +1223,21 @@ // Now fill in the code buffer Node *delay_slot = NULL; - for (uint i=0; i < nblocks; i++) { - Block *b = _cfg->_blocks[i]; - - Node *head = b->head(); + for (uint i = 0; i < nblocks; i++) { + Block* block = _cfg->get_block(i); + Node* head = block->head(); // If this block needs to start aligned (i.e, can be reached other // than by falling-thru from the previous block), then force the // start of a new bundle. - if (Pipeline::requires_bundling() && starts_bundle(head)) + if (Pipeline::requires_bundling() && starts_bundle(head)) { cb->flush_bundle(true); + } #ifdef ASSERT - if (!b->is_connector()) { + if (!block->is_connector()) { stringStream st; - b->dump_head(_cfg, &st); + block->dump_head(_cfg, &st); MacroAssembler(cb).block_comment(st.as_string()); } jmp_target[i] = 0; @@ -1246,16 +1248,16 @@ int blk_offset = current_offset; // Define the label at the beginning of the basic block - MacroAssembler(cb).bind(blk_labels[b->_pre_order]); - - uint last_inst = b->_nodes.size(); + MacroAssembler(cb).bind(blk_labels[block->_pre_order]); + + uint last_inst = block->number_of_nodes(); // Emit block normally, except for last instruction. // Emit means "dump code bits into code buffer". for (uint j = 0; j_nodes[j]; + Node* n = block->get_node(j); // See if delay slots are supported if (valid_bundle_info(n) && @@ -1309,9 +1311,9 @@ assert((padding % nop_size) == 0, "padding is not a multiple of NOP size"); int nops_cnt = padding / nop_size; MachNode *nop = new (this) MachNopNode(nops_cnt); - b->_nodes.insert(j++, nop); + block->insert_node(nop, j++); last_inst++; - _cfg->map_node_to_block(nop, b); + _cfg->map_node_to_block(nop, block); nop->emit(*cb, _regalloc); cb->flush_bundle(true); current_offset = cb->insts_size(); @@ -1325,7 +1327,7 @@ mcall->method_set((intptr_t)mcall->entry_point()); // Save the return address - call_returns[b->_pre_order] = current_offset + mcall->ret_addr_offset(); + call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset(); if (mcall->is_MachCallLeaf()) { is_mcall = false; @@ -1362,7 +1364,7 @@ // If this is a branch, then fill in the label with the target BB's label else if (mach->is_MachBranch()) { // This requires the TRUE branch target be in succs[0] - uint block_num = b->non_connector_successor(0)->_pre_order; + uint block_num = block->non_connector_successor(0)->_pre_order; // Try to replace long branch if delay slot is not used, // it is mostly for back branches since forward branch's @@ -1395,8 +1397,8 @@ // Insert padding between avoid_back_to_back branches. if (needs_padding && replacement->avoid_back_to_back()) { MachNode *nop = new (this) MachNopNode(); - b->_nodes.insert(j++, nop); - _cfg->map_node_to_block(nop, b); + block->insert_node(nop, j++); + _cfg->map_node_to_block(nop, block); last_inst++; nop->emit(*cb, _regalloc); cb->flush_bundle(true); @@ -1408,7 +1410,7 @@ jmp_size[i] = new_size; jmp_rule[i] = mach->rule(); #endif - b->_nodes.map(j, replacement); + block->map_node(replacement, j); mach->subsume_by(replacement, C); n = replacement; mach = replacement; @@ -1416,8 +1418,8 @@ } mach->as_MachBranch()->label_set( &blk_labels[block_num], block_num ); } else if (mach->ideal_Opcode() == Op_Jump) { - for (uint h = 0; h < b->_num_succs; h++) { - Block* succs_block = b->_succs[h]; + for (uint h = 0; h < block->_num_succs; h++) { + Block* succs_block = block->_succs[h]; for (uint j = 1; j < succs_block->num_preds(); j++) { Node* jpn = succs_block->pred(j); if (jpn->is_JumpProj() && jpn->in(0) == mach) { @@ -1428,7 +1430,6 @@ } } } - #ifdef ASSERT // Check that oop-store precedes the card-mark else if (mach->ideal_Opcode() == Op_StoreCM) { @@ -1439,17 +1440,18 @@ if (oop_store == NULL) continue; count++; uint i4; - for( i4 = 0; i4 < last_inst; ++i4 ) { - if( b->_nodes[i4] == oop_store ) break; + for (i4 = 0; i4 < last_inst; ++i4) { + if (block->get_node(i4) == oop_store) { + break; + } } // Note: This test can provide a false failure if other precedence // edges have been added to the storeCMNode. - assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store"); + assert(i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store"); } assert(count > 0, "storeCM expects at least one precedence edge"); } #endif - else if (!n->is_Proj()) { // Remember the beginning of the previous instruction, in case // it's followed by a flag-kill and a null-check. Happens on @@ -1545,12 +1547,12 @@ // If the next block is the top of a loop, pad this block out to align // the loop top a little. Helps prevent pipe stalls at loop back branches. if (i < nblocks-1) { - Block *nb = _cfg->_blocks[i+1]; + Block *nb = _cfg->get_block(i + 1); int padding = nb->alignment_padding(current_offset); if( padding > 0 ) { MachNode *nop = new (this) MachNopNode(padding / nop_size); - b->_nodes.insert( b->_nodes.size(), nop ); - _cfg->map_node_to_block(nop, b); + block->insert_node(nop, block->number_of_nodes()); + _cfg->map_node_to_block(nop, block); nop->emit(*cb, _regalloc); current_offset = cb->insts_size(); } @@ -1590,8 +1592,6 @@ } #endif - // ------------------ - #ifndef PRODUCT // Information on the size of the method, without the extraneous code Scheduling::increment_method_size(cb->insts_size()); @@ -1652,52 +1652,55 @@ _inc_table.set_size(cnt); uint inct_cnt = 0; - for( uint i=0; i<_cfg->_num_blocks; i++ ) { - Block *b = _cfg->_blocks[i]; + for (uint i = 0; i < _cfg->number_of_blocks(); i++) { + Block* block = _cfg->get_block(i); Node *n = NULL; int j; // Find the branch; ignore trailing NOPs. - for( j = b->_nodes.size()-1; j>=0; j-- ) { - n = b->_nodes[j]; - if( !n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con ) + for (j = block->number_of_nodes() - 1; j >= 0; j--) { + n = block->get_node(j); + if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) { break; + } } // If we didn't find anything, continue - if( j < 0 ) continue; + if (j < 0) { + continue; + } // Compute ExceptionHandlerTable subtable entry and add it // (skip empty blocks) - if( n->is_Catch() ) { + if (n->is_Catch()) { // Get the offset of the return from the call - uint call_return = call_returns[b->_pre_order]; + uint call_return = call_returns[block->_pre_order]; #ifdef ASSERT assert( call_return > 0, "no call seen for this basic block" ); - while( b->_nodes[--j]->is_MachProj() ) ; - assert( b->_nodes[j]->is_MachCall(), "CatchProj must follow call" ); + while (block->get_node(--j)->is_MachProj()) ; + assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call"); #endif // last instruction is a CatchNode, find it's CatchProjNodes - int nof_succs = b->_num_succs; + int nof_succs = block->_num_succs; // allocate space GrowableArray handler_bcis(nof_succs); GrowableArray handler_pcos(nof_succs); // iterate through all successors for (int j = 0; j < nof_succs; j++) { - Block* s = b->_succs[j]; + Block* s = block->_succs[j]; bool found_p = false; - for( uint k = 1; k < s->num_preds(); k++ ) { - Node *pk = s->pred(k); - if( pk->is_CatchProj() && pk->in(0) == n ) { + for (uint k = 1; k < s->num_preds(); k++) { + Node* pk = s->pred(k); + if (pk->is_CatchProj() && pk->in(0) == n) { const CatchProjNode* p = pk->as_CatchProj(); found_p = true; // add the corresponding handler bci & pco information - if( p->_con != CatchProjNode::fall_through_index ) { + if (p->_con != CatchProjNode::fall_through_index) { // p leads to an exception handler (and is not fall through) - assert(s == _cfg->_blocks[s->_pre_order],"bad numbering"); + assert(s == _cfg->get_block(s->_pre_order), "bad numbering"); // no duplicates, please - if( !handler_bcis.contains(p->handler_bci()) ) { + if (!handler_bcis.contains(p->handler_bci())) { uint block_num = s->non_connector()->_pre_order; handler_bcis.append(p->handler_bci()); handler_pcos.append(blk_labels[block_num].loc_pos()); @@ -1716,9 +1719,9 @@ } // Handle implicit null exception table updates - if( n->is_MachNullCheck() ) { - uint block_num = b->non_connector_successor(0)->_pre_order; - _inc_table.append( inct_starts[inct_cnt++], blk_labels[block_num].loc_pos() ); + if (n->is_MachNullCheck()) { + uint block_num = block->non_connector_successor(0)->_pre_order; + _inc_table.append(inct_starts[inct_cnt++], blk_labels[block_num].loc_pos()); continue; } } // End of for all blocks fill in exception table entries @@ -1777,14 +1780,12 @@ memset(_current_latency, 0, node_max * sizeof(unsigned short)); // Clear the bundling information - memcpy(_bundle_use_elements, - Pipeline_Use::elaborated_elements, - sizeof(Pipeline_Use::elaborated_elements)); + memcpy(_bundle_use_elements, Pipeline_Use::elaborated_elements, sizeof(Pipeline_Use::elaborated_elements)); // Get the last node - Block *bb = _cfg->_blocks[_cfg->_blocks.size()-1]; - - _next_node = bb->_nodes[bb->_nodes.size()-1]; + Block* block = _cfg->get_block(_cfg->number_of_blocks() - 1); + + _next_node = block->get_node(block->number_of_nodes() - 1); } #ifndef PRODUCT @@ -1834,7 +1835,6 @@ sizeof(Pipeline_Use::elaborated_elements)); } -//------------------------------ScheduleAndBundle------------------------------ // Perform instruction scheduling and bundling over the sequence of // instructions in backwards order. void Compile::ScheduleAndBundle() { @@ -1861,7 +1861,6 @@ scheduling.DoScheduling(); } -//------------------------------ComputeLocalLatenciesForward------------------- // Compute the latency of all the instructions. This is fairly simple, // because we already have a legal ordering. Walk over the instructions // from first to last, and compute the latency of the instruction based @@ -1879,7 +1878,7 @@ // Used to allow latency 0 to force an instruction to the beginning // of the bb uint latency = 1; - Node *use = bb->_nodes[j]; + Node *use = bb->get_node(j); uint nlen = use->len(); // Walk over all the inputs @@ -2031,7 +2030,6 @@ return _available[0]; } -//------------------------------AddNodeToAvailableList------------------------- void Scheduling::AddNodeToAvailableList(Node *n) { assert( !n->is_Proj(), "projections never directly made available" ); #ifndef PRODUCT @@ -2077,7 +2075,6 @@ #endif } -//------------------------------DecrementUseCounts----------------------------- void Scheduling::DecrementUseCounts(Node *n, const Block *bb) { for ( uint i=0; i < n->len(); i++ ) { Node *def = n->in(i); @@ -2100,7 +2097,6 @@ } } -//------------------------------AddNodeToBundle-------------------------------- void Scheduling::AddNodeToBundle(Node *n, const Block *bb) { #ifndef PRODUCT if (_cfg->C->trace_opto_output()) { @@ -2293,7 +2289,7 @@ (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) { // Push any trailing projections - if( bb->_nodes[bb->_nodes.size()-1] != n ) { + if( bb->get_node(bb->number_of_nodes()-1) != n ) { for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node *foi = n->fast_out(i); if( foi->is_Proj() ) @@ -2315,7 +2311,6 @@ DecrementUseCounts(n,bb); } -//------------------------------ComputeUseCount-------------------------------- // This method sets the use count within a basic block. We will ignore all // uses outside the current basic block. As we are doing a backwards walk, // any node we reach that has a use count of 0 may be scheduled. This also @@ -2337,21 +2332,21 @@ _unconditional_delay_slot = NULL; #ifdef ASSERT - for( uint i=0; i < bb->_nodes.size(); i++ ) - assert( _uses[bb->_nodes[i]->_idx] == 0, "_use array not clean" ); + for( uint i=0; i < bb->number_of_nodes(); i++ ) + assert( _uses[bb->get_node(i)->_idx] == 0, "_use array not clean" ); #endif // Force the _uses count to never go to zero for unscheduable pieces // of the block for( uint k = 0; k < _bb_start; k++ ) - _uses[bb->_nodes[k]->_idx] = 1; - for( uint l = _bb_end; l < bb->_nodes.size(); l++ ) - _uses[bb->_nodes[l]->_idx] = 1; + _uses[bb->get_node(k)->_idx] = 1; + for( uint l = _bb_end; l < bb->number_of_nodes(); l++ ) + _uses[bb->get_node(l)->_idx] = 1; // Iterate backwards over the instructions in the block. Don't count the // branch projections at end or the block header instructions. for( uint j = _bb_end-1; j >= _bb_start; j-- ) { - Node *n = bb->_nodes[j]; + Node *n = bb->get_node(j); if( n->is_Proj() ) continue; // Projections handled another way // Account for all uses @@ -2400,20 +2395,22 @@ Block *bb; // Walk over all the basic blocks in reverse order - for( int i=_cfg->_num_blocks-1; i >= 0; succ_bb = bb, i-- ) { - bb = _cfg->_blocks[i]; + for (int i = _cfg->number_of_blocks() - 1; i >= 0; succ_bb = bb, i--) { + bb = _cfg->get_block(i); #ifndef PRODUCT if (_cfg->C->trace_opto_output()) { tty->print("# Schedule BB#%03d (initial)\n", i); - for (uint j = 0; j < bb->_nodes.size(); j++) - bb->_nodes[j]->dump(); + for (uint j = 0; j < bb->number_of_nodes(); j++) { + bb->get_node(j)->dump(); + } } #endif // On the head node, skip processing - if( bb == _cfg->_broot ) + if (bb == _cfg->get_root_block()) { continue; + } // Skip empty, connector blocks if (bb->is_connector()) @@ -2432,10 +2429,10 @@ } // Leave untouched the starting instruction, any Phis, a CreateEx node - // or Top. bb->_nodes[_bb_start] is the first schedulable instruction. - _bb_end = bb->_nodes.size()-1; + // or Top. bb->get_node(_bb_start) is the first schedulable instruction. + _bb_end = bb->number_of_nodes()-1; for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) { - Node *n = bb->_nodes[_bb_start]; + Node *n = bb->get_node(_bb_start); // Things not matched, like Phinodes and ProjNodes don't get scheduled. // Also, MachIdealNodes do not get scheduled if( !n->is_Mach() ) continue; // Skip non-machine nodes @@ -2455,19 +2452,19 @@ // in the block), because they have delay slots we can fill. Calls all // have their delay slots filled in the template expansions, so we don't // bother scheduling them. - Node *last = bb->_nodes[_bb_end]; + Node *last = bb->get_node(_bb_end); // Ignore trailing NOPs. while (_bb_end > 0 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Con) { - last = bb->_nodes[--_bb_end]; + last = bb->get_node(--_bb_end); } assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, ""); if( last->is_Catch() || // Exclude unreachable path case when Halt node is in a separate block. (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) { // There must be a prior call. Skip it. - while( !bb->_nodes[--_bb_end]->is_MachCall() ) { - assert( bb->_nodes[_bb_end]->is_MachProj(), "skipping projections after expected call" ); + while( !bb->get_node(--_bb_end)->is_MachCall() ) { + assert( bb->get_node(_bb_end)->is_MachProj(), "skipping projections after expected call" ); } } else if( last->is_MachNullCheck() ) { // Backup so the last null-checked memory instruction is @@ -2476,7 +2473,7 @@ Node *mem = last->in(1); do { _bb_end--; - } while (mem != bb->_nodes[_bb_end]); + } while (mem != bb->get_node(_bb_end)); } else { // Set _bb_end to point after last schedulable inst. _bb_end++; @@ -2505,7 +2502,7 @@ assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" ); #ifdef ASSERT for( uint l = _bb_start; l < _bb_end; l++ ) { - Node *n = bb->_nodes[l]; + Node *n = bb->get_node(l); uint m; for( m = 0; m < _bb_end-_bb_start; m++ ) if( _scheduled[m] == n ) @@ -2516,14 +2513,14 @@ // Now copy the instructions (in reverse order) back to the block for ( uint k = _bb_start; k < _bb_end; k++ ) - bb->_nodes.map(k, _scheduled[_bb_end-k-1]); + bb->map_node(_scheduled[_bb_end-k-1], k); #ifndef PRODUCT if (_cfg->C->trace_opto_output()) { tty->print("# Schedule BB#%03d (final)\n", i); uint current = 0; - for (uint j = 0; j < bb->_nodes.size(); j++) { - Node *n = bb->_nodes[j]; + for (uint j = 0; j < bb->number_of_nodes(); j++) { + Node *n = bb->get_node(j); if( valid_bundle_info(n) ) { Bundle *bundle = node_bundling(n); if (bundle->instr_count() > 0 || bundle->flags() > 0) { @@ -2550,7 +2547,6 @@ } // end DoScheduling -//------------------------------verify_good_schedule--------------------------- // Verify that no live-range used in the block is killed in the block by a // wrong DEF. This doesn't verify live-ranges that span blocks. @@ -2563,7 +2559,6 @@ } #ifdef ASSERT -//------------------------------verify_do_def---------------------------------- void Scheduling::verify_do_def( Node *n, OptoReg::Name def, const char *msg ) { // Check for bad kills if( OptoReg::is_valid(def) ) { // Ignore stores & control flow @@ -2579,7 +2574,6 @@ } } -//------------------------------verify_good_schedule--------------------------- void Scheduling::verify_good_schedule( Block *b, const char *msg ) { // Zap to something reasonable for the verify code @@ -2588,8 +2582,8 @@ // Walk over the block backwards. Check to make sure each DEF doesn't // kill a live value (other than the one it's supposed to). Add each // USE to the live set. - for( uint i = b->_nodes.size()-1; i >= _bb_start; i-- ) { - Node *n = b->_nodes[i]; + for( uint i = b->number_of_nodes()-1; i >= _bb_start; i-- ) { + Node *n = b->get_node(i); int n_op = n->Opcode(); if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) { // Fat-proj kills a slew of registers @@ -2639,7 +2633,6 @@ from->add_prec(to); } -//------------------------------anti_do_def------------------------------------ void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def ) { if( !OptoReg::is_valid(def_reg) ) // Ignore stores & control flow return; @@ -2709,7 +2702,6 @@ add_prec_edge_from_to(kill,pinch); } -//------------------------------anti_do_use------------------------------------ void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) { if( !OptoReg::is_valid(use_reg) ) // Ignore stores & control flow return; @@ -2722,7 +2714,7 @@ pinch->req() == 1 ) { // pinch not yet in block? pinch->del_req(0); // yank pointer to later-def, also set flag // Insert the pinch-point in the block just after the last use - b->_nodes.insert(b->find_node(use)+1,pinch); + b->insert_node(pinch, b->find_node(use) + 1); _bb_end++; // Increase size scheduled region in block } @@ -2730,7 +2722,6 @@ } } -//------------------------------ComputeRegisterAntidependences----------------- // We insert antidependences between the reads and following write of // allocated registers to prevent illegal code motion. Hopefully, the // number of added references should be fairly small, especially as we @@ -2775,10 +2766,10 @@ // it being in the current block. bool fat_proj_seen = false; uint last_safept = _bb_end-1; - Node* end_node = (_bb_end-1 >= _bb_start) ? b->_nodes[last_safept] : NULL; + Node* end_node = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : NULL; Node* last_safept_node = end_node; for( uint i = _bb_end-1; i >= _bb_start; i-- ) { - Node *n = b->_nodes[i]; + Node *n = b->get_node(i); int is_def = n->outcnt(); // def if some uses prior to adding precedence edges if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) { // Fat-proj kills a slew of registers @@ -2827,7 +2818,7 @@ // Do not allow defs of new derived values to float above GC // points unless the base is definitely available at the GC point. - Node *m = b->_nodes[i]; + Node *m = b->get_node(i); // Add precedence edge from following safepoint to use of derived pointer if( last_safept_node != end_node && @@ -2844,11 +2835,11 @@ if( n->jvms() ) { // Precedence edge from derived to safept // Check if last_safept_node was moved by pinch-point insertion in anti_do_use() - if( b->_nodes[last_safept] != last_safept_node ) { + if( b->get_node(last_safept) != last_safept_node ) { last_safept = b->find_node(last_safept_node); } for( uint j=last_safept; j > i; j-- ) { - Node *mach = b->_nodes[j]; + Node *mach = b->get_node(j); if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP ) mach->add_prec( n ); } @@ -2864,8 +2855,6 @@ } } -//------------------------------garbage_collect_pinch_nodes------------------------------- - // Garbage collect pinch nodes for reuse by other blocks. // // The block scheduler's insertion of anti-dependence @@ -2940,7 +2929,6 @@ pinch->set_req(0, NULL); } -//------------------------------print_statistics------------------------------- #ifndef PRODUCT void Scheduling::dump_available() const { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/parse.hpp --- a/src/share/vm/opto/parse.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/parse.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -518,7 +518,7 @@ // loading from a constant field or the constant pool // returns false if push failed (non-perm field constants only, not ldcs) - bool push_constant(ciConstant con, bool require_constant = false, bool is_autobox_cache = false); + bool push_constant(ciConstant con, bool require_constant = false, bool is_autobox_cache = false, const Type* basic_type = NULL); // implementation of object creation bytecodes void emit_guard_for_new(ciInstanceKlass* klass); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/parse3.cpp --- a/src/share/vm/opto/parse3.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/parse3.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -147,7 +147,15 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { // Does this field have a constant value? If so, just push the value. if (field->is_constant()) { - // final field + // final or stable field + const Type* stable_type = NULL; + if (FoldStableValues && field->is_stable()) { + stable_type = Type::get_const_type(field->type()); + if (field->type()->is_array_klass()) { + int stable_dimension = field->type()->as_array_klass()->dimension(); + stable_type = stable_type->is_aryptr()->cast_to_stable(true, stable_dimension); + } + } if (field->is_static()) { // final static field if (C->eliminate_boxing()) { @@ -167,11 +175,10 @@ } } } - if (push_constant(field->constant_value())) + if (push_constant(field->constant_value(), false, false, stable_type)) return; - } - else { - // final non-static field + } else { + // final or stable non-static field // Treat final non-static fields of trusted classes (classes in // java.lang.invoke and sun.invoke packages and subpackages) as // compile time constants. @@ -179,8 +186,12 @@ const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr(); ciObject* constant_oop = oop_ptr->const_oop(); ciConstant constant = field->constant_value_of(constant_oop); - if (push_constant(constant, true)) - return; + if (FoldStableValues && field->is_stable() && constant.is_null_or_zero()) { + // fall through to field load; the field is not yet initialized + } else { + if (push_constant(constant, true, false, stable_type)) + return; + } } } } @@ -301,7 +312,8 @@ // Note the presence of writes to final non-static fields, so that we // can insert a memory barrier later on to keep the writes from floating // out of the constructor. - if (is_field && field->is_final()) { + // Any method can write a @Stable field; insert memory barriers after those also. + if (is_field && (field->is_final() || field->is_stable())) { set_wrote_final(true); // Preserve allocation ptr to create precedent edge to it in membar // generated on exit from constructor. @@ -314,35 +326,21 @@ } -bool Parse::push_constant(ciConstant constant, bool require_constant, bool is_autobox_cache) { + +bool Parse::push_constant(ciConstant constant, bool require_constant, bool is_autobox_cache, const Type* stable_type) { + const Type* con_type = Type::make_from_constant(constant, require_constant, is_autobox_cache); switch (constant.basic_type()) { - case T_BOOLEAN: push( intcon(constant.as_boolean()) ); break; - case T_INT: push( intcon(constant.as_int()) ); break; - case T_CHAR: push( intcon(constant.as_char()) ); break; - case T_BYTE: push( intcon(constant.as_byte()) ); break; - case T_SHORT: push( intcon(constant.as_short()) ); break; - case T_FLOAT: push( makecon(TypeF::make(constant.as_float())) ); break; - case T_DOUBLE: push_pair( makecon(TypeD::make(constant.as_double())) ); break; - case T_LONG: push_pair( longcon(constant.as_long()) ); break; case T_ARRAY: - case T_OBJECT: { + case T_OBJECT: // cases: // can_be_constant = (oop not scavengable || ScavengeRootsInCode != 0) // should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2) // An oop is not scavengable if it is in the perm gen. - ciObject* oop_constant = constant.as_object(); - if (oop_constant->is_null_object()) { - push( zerocon(T_OBJECT) ); - break; - } else if (require_constant || oop_constant->should_be_constant()) { - push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant, is_autobox_cache)) ); - break; - } else { - // we cannot inline the oop, but we can use it later to narrow a type - return false; - } - } - case T_ILLEGAL: { + if (stable_type != NULL && con_type != NULL && con_type->isa_oopptr()) + con_type = con_type->join(stable_type); + break; + + case T_ILLEGAL: // Invalid ciConstant returned due to OutOfMemoryError in the CI assert(C->env()->failing(), "otherwise should not see this"); // These always occur because of object types; we are going to @@ -350,17 +348,16 @@ push( zerocon(T_OBJECT) ); return false; } - default: - ShouldNotReachHere(); + + if (con_type == NULL) + // we cannot inline the oop, but we can use it later to narrow a type return false; - } - // success + push_node(constant.basic_type(), makecon(con_type)); return true; } - //============================================================================= void Parse::do_anewarray() { bool will_link; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/phaseX.cpp --- a/src/share/vm/opto/phaseX.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/phaseX.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1643,15 +1643,15 @@ bool method_name_not_printed = true; // Examine each basic block - for( uint block_number = 1; block_number < _cfg._num_blocks; ++block_number ) { - Block *block = _cfg._blocks[block_number]; + for (uint block_number = 1; block_number < _cfg.number_of_blocks(); ++block_number) { + Block* block = _cfg.get_block(block_number); bool block_not_printed = true; // and each instruction within a block - uint end_index = block->_nodes.size(); + uint end_index = block->number_of_nodes(); // block->end_idx() not valid after PhaseRegAlloc for( uint instruction_index = 1; instruction_index < end_index; ++instruction_index ) { - Node *n = block->_nodes.at(instruction_index); + Node *n = block->get_node(instruction_index); if( n->is_Mach() ) { MachNode *m = n->as_Mach(); int deleted_count = 0; @@ -1673,7 +1673,7 @@ } // Print instructions being deleted for( int i = (deleted_count - 1); i >= 0; --i ) { - block->_nodes.at(instruction_index-i)->as_Mach()->format(_regalloc); tty->cr(); + block->get_node(instruction_index-i)->as_Mach()->format(_regalloc); tty->cr(); } tty->print_cr("replaced with"); // Print new instruction @@ -1687,11 +1687,11 @@ // the node index to live range mappings.) uint safe_instruction_index = (instruction_index - deleted_count); for( ; (instruction_index > safe_instruction_index); --instruction_index ) { - block->_nodes.remove( instruction_index ); + block->remove_node( instruction_index ); } // install new node after safe_instruction_index - block->_nodes.insert( safe_instruction_index + 1, m2 ); - end_index = block->_nodes.size() - 1; // Recompute new block size + block->insert_node(m2, safe_instruction_index + 1); + end_index = block->number_of_nodes() - 1; // Recompute new block size NOT_PRODUCT( inc_peepholes(); ) } } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/postaloc.cpp --- a/src/share/vm/opto/postaloc.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/postaloc.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -405,28 +405,29 @@ // Need a mapping from basic block Node_Lists. We need a Node_List to // map from register number to value-producing Node. - Node_List **blk2value = NEW_RESOURCE_ARRAY( Node_List *, _cfg._num_blocks+1); - memset( blk2value, 0, sizeof(Node_List*)*(_cfg._num_blocks+1) ); + Node_List **blk2value = NEW_RESOURCE_ARRAY( Node_List *, _cfg.number_of_blocks() + 1); + memset(blk2value, 0, sizeof(Node_List*) * (_cfg.number_of_blocks() + 1)); // Need a mapping from basic block Node_Lists. We need a Node_List to // map from register number to register-defining Node. - Node_List **blk2regnd = NEW_RESOURCE_ARRAY( Node_List *, _cfg._num_blocks+1); - memset( blk2regnd, 0, sizeof(Node_List*)*(_cfg._num_blocks+1) ); + Node_List **blk2regnd = NEW_RESOURCE_ARRAY( Node_List *, _cfg.number_of_blocks() + 1); + memset(blk2regnd, 0, sizeof(Node_List*) * (_cfg.number_of_blocks() + 1)); // We keep unused Node_Lists on a free_list to avoid wasting // memory. GrowableArray free_list = GrowableArray(16); // For all blocks - for( uint i = 0; i < _cfg._num_blocks; i++ ) { + for (uint i = 0; i < _cfg.number_of_blocks(); i++) { uint j; - Block *b = _cfg._blocks[i]; + Block* block = _cfg.get_block(i); // Count of Phis in block uint phi_dex; - for( phi_dex = 1; phi_dex < b->_nodes.size(); phi_dex++ ) { - Node *phi = b->_nodes[phi_dex]; - if( !phi->is_Phi() ) + for (phi_dex = 1; phi_dex < block->number_of_nodes(); phi_dex++) { + Node* phi = block->get_node(phi_dex); + if (!phi->is_Phi()) { break; + } } // If any predecessor has not been visited, we do not know the state @@ -434,21 +435,23 @@ // along Phi input edges bool missing_some_inputs = false; Block *freed = NULL; - for( j = 1; j < b->num_preds(); j++ ) { - Block *pb = _cfg.get_block_for_node(b->pred(j)); + for (j = 1; j < block->num_preds(); j++) { + Block* pb = _cfg.get_block_for_node(block->pred(j)); // Remove copies along phi edges - for( uint k=1; k_nodes[k], j, b, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false ); - if( blk2value[pb->_pre_order] ) { // Have a mapping on this edge? + for (uint k = 1; k < phi_dex; k++) { + elide_copy(block->get_node(k), j, block, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false); + } + if (blk2value[pb->_pre_order]) { // Have a mapping on this edge? // See if this predecessor's mappings have been used by everybody // who wants them. If so, free 'em. uint k; - for( k=0; k_num_succs; k++ ) { - Block *pbsucc = pb->_succs[k]; - if( !blk2value[pbsucc->_pre_order] && pbsucc != b ) + for (k = 0; k < pb->_num_succs; k++) { + Block* pbsucc = pb->_succs[k]; + if (!blk2value[pbsucc->_pre_order] && pbsucc != block) { break; // Found a future user + } } - if( k >= pb->_num_succs ) { // No more uses, free! + if (k >= pb->_num_succs) { // No more uses, free! freed = pb; // Record last block freed free_list.push(blk2value[pb->_pre_order]); free_list.push(blk2regnd[pb->_pre_order]); @@ -467,20 +470,20 @@ value.map(_max_reg,NULL); regnd.map(_max_reg,NULL); // Set mappings as OUR mappings - blk2value[b->_pre_order] = &value; - blk2regnd[b->_pre_order] = ®nd; + blk2value[block->_pre_order] = &value; + blk2regnd[block->_pre_order] = ®nd; // Initialize value & regnd for this block - if( missing_some_inputs ) { + if (missing_some_inputs) { // Some predecessor has not yet been visited; zap map to empty - for( uint k = 0; k < (uint)_max_reg; k++ ) { + for (uint k = 0; k < (uint)_max_reg; k++) { value.map(k,NULL); regnd.map(k,NULL); } } else { if( !freed ) { // Didn't get a freebie prior block // Must clone some data - freed = _cfg.get_block_for_node(b->pred(1)); + freed = _cfg.get_block_for_node(block->pred(1)); Node_List &f_value = *blk2value[freed->_pre_order]; Node_List &f_regnd = *blk2regnd[freed->_pre_order]; for( uint k = 0; k < (uint)_max_reg; k++ ) { @@ -489,9 +492,11 @@ } } // Merge all inputs together, setting to NULL any conflicts. - for( j = 1; j < b->num_preds(); j++ ) { - Block *pb = _cfg.get_block_for_node(b->pred(j)); - if( pb == freed ) continue; // Did self already via freelist + for (j = 1; j < block->num_preds(); j++) { + Block* pb = _cfg.get_block_for_node(block->pred(j)); + if (pb == freed) { + continue; // Did self already via freelist + } Node_List &p_regnd = *blk2regnd[pb->_pre_order]; for( uint k = 0; k < (uint)_max_reg; k++ ) { if( regnd[k] != p_regnd[k] ) { // Conflict on reaching defs? @@ -503,9 +508,9 @@ } // For all Phi's - for( j = 1; j < phi_dex; j++ ) { + for (j = 1; j < phi_dex; j++) { uint k; - Node *phi = b->_nodes[j]; + Node *phi = block->get_node(j); uint pidx = _lrg_map.live_range_id(phi); OptoReg::Name preg = lrgs(_lrg_map.live_range_id(phi)).reg(); @@ -516,8 +521,8 @@ if( phi != x && u != x ) // Found a different input u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input } - if( u != NodeSentinel ) { // Junk Phi. Remove - b->_nodes.remove(j--); + if (u != NodeSentinel) { // Junk Phi. Remove + block->remove_node(j--); phi_dex--; _cfg.unmap_node_from_block(phi); phi->replace_by(u); @@ -547,13 +552,13 @@ } // For all remaining instructions - for( j = phi_dex; j < b->_nodes.size(); j++ ) { - Node *n = b->_nodes[j]; + for (j = phi_dex; j < block->number_of_nodes(); j++) { + Node* n = block->get_node(j); - if( n->outcnt() == 0 && // Dead? - n != C->top() && // (ignore TOP, it has no du info) - !n->is_Proj() ) { // fat-proj kills - j -= yank_if_dead(n,b,&value,®nd); + if(n->outcnt() == 0 && // Dead? + n != C->top() && // (ignore TOP, it has no du info) + !n->is_Proj() ) { // fat-proj kills + j -= yank_if_dead(n, block, &value, ®nd); continue; } @@ -598,8 +603,9 @@ const uint two_adr = n->is_Mach() ? n->as_Mach()->two_adr() : 0; // Remove copies along input edges - for( k = 1; k < n->req(); k++ ) - j -= elide_copy( n, k, b, value, regnd, two_adr!=k ); + for (k = 1; k < n->req(); k++) { + j -= elide_copy(n, k, block, value, regnd, two_adr != k); + } // Unallocated Nodes define no registers uint lidx = _lrg_map.live_range_id(n); @@ -630,8 +636,8 @@ // then 'n' is a useless copy. Do not update the register->node // mapping so 'n' will go dead. if( value[nreg] != val ) { - if (eliminate_copy_of_constant(val, n, b, value, regnd, nreg, OptoReg::Bad)) { - j -= replace_and_yank_if_dead(n, nreg, b, value, regnd); + if (eliminate_copy_of_constant(val, n, block, value, regnd, nreg, OptoReg::Bad)) { + j -= replace_and_yank_if_dead(n, nreg, block, value, regnd); } else { // Update the mapping: record new Node defined by the register regnd.map(nreg,n); @@ -640,8 +646,8 @@ value.map(nreg,val); } } else if( !may_be_copy_of_callee(n) ) { - assert( n->is_Copy(), "" ); - j -= replace_and_yank_if_dead(n, nreg, b, value, regnd); + assert(n->is_Copy(), ""); + j -= replace_and_yank_if_dead(n, nreg, block, value, regnd); } } else if (RegMask::is_vector(n_ideal_reg)) { // If Node 'n' does not change the value mapped by the register, @@ -660,7 +666,7 @@ } } else if (n->is_Copy()) { // Note: vector can't be constant and can't be copy of calee. - j -= replace_and_yank_if_dead(n, nreg, b, value, regnd); + j -= replace_and_yank_if_dead(n, nreg, block, value, regnd); } } else { // If the value occupies a register pair, record same info @@ -674,18 +680,18 @@ tmp.Remove(nreg); nreg_lo = tmp.find_first_elem(); } - if( value[nreg] != val || value[nreg_lo] != val ) { - if (eliminate_copy_of_constant(val, n, b, value, regnd, nreg, nreg_lo)) { - j -= replace_and_yank_if_dead(n, nreg, b, value, regnd); + if (value[nreg] != val || value[nreg_lo] != val) { + if (eliminate_copy_of_constant(val, n, block, value, regnd, nreg, nreg_lo)) { + j -= replace_and_yank_if_dead(n, nreg, block, value, regnd); } else { regnd.map(nreg , n ); regnd.map(nreg_lo, n ); value.map(nreg ,val); value.map(nreg_lo,val); } - } else if( !may_be_copy_of_callee(n) ) { - assert( n->is_Copy(), "" ); - j -= replace_and_yank_if_dead(n, nreg, b, value, regnd); + } else if (!may_be_copy_of_callee(n)) { + assert(n->is_Copy(), ""); + j -= replace_and_yank_if_dead(n, nreg, block, value, regnd); } } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/reg_split.cpp --- a/src/share/vm/opto/reg_split.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/reg_split.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -112,17 +112,17 @@ void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) { // Skip intervening ProjNodes. Do not insert between a ProjNode and // its definer. - while( i < b->_nodes.size() && - (b->_nodes[i]->is_Proj() || - b->_nodes[i]->is_Phi() ) ) + while( i < b->number_of_nodes() && + (b->get_node(i)->is_Proj() || + b->get_node(i)->is_Phi() ) ) i++; // Do not insert between a call and his Catch - if( b->_nodes[i]->is_Catch() ) { + if( b->get_node(i)->is_Catch() ) { // Put the instruction at the top of the fall-thru block. // Find the fall-thru projection while( 1 ) { - const CatchProjNode *cp = b->_nodes[++i]->as_CatchProj(); + const CatchProjNode *cp = b->get_node(++i)->as_CatchProj(); if( cp->_con == CatchProjNode::fall_through_index ) break; } @@ -131,7 +131,7 @@ i = 1; // Right at start of block } - b->_nodes.insert(i,spill); // Insert node in block + b->insert_node(spill, i); // Insert node in block _cfg.map_node_to_block(spill, b); // Update node->block mapping to reflect // Adjust the point where we go hi-pressure if( i <= b->_ihrp_index ) b->_ihrp_index++; @@ -160,9 +160,9 @@ // (The implicit_null_check function ensures the use is also dominated // by the branch-not-taken block.) Node *be = b->end(); - if( be->is_MachNullCheck() && be->in(1) == def && def == b->_nodes[loc] ) { + if( be->is_MachNullCheck() && be->in(1) == def && def == b->get_node(loc)) { // Spill goes in the branch-not-taken block - b = b->_succs[b->_nodes[b->end_idx()+1]->Opcode() == Op_IfTrue]; + b = b->_succs[b->get_node(b->end_idx()+1)->Opcode() == Op_IfTrue]; loc = 0; // Just past the Region } assert( loc >= 0, "must insert past block head" ); @@ -397,10 +397,15 @@ #endif // See if the cloned def kills any flags, and copy those kills as well uint i = insidx+1; - if( clone_projs( b, i, def, spill, maxlrg) ) { + int found_projs = clone_projs( b, i, def, spill, maxlrg); + if (found_projs > 0) { // Adjust the point where we go hi-pressure - if( i <= b->_ihrp_index ) b->_ihrp_index++; - if( i <= b->_fhrp_index ) b->_fhrp_index++; + if (i <= b->_ihrp_index) { + b->_ihrp_index += found_projs; + } + if (i <= b->_fhrp_index) { + b->_fhrp_index += found_projs; + } } return spill; @@ -445,7 +450,7 @@ // Scan block for 1st use. for( uint i = 1; i <= b->end_idx(); i++ ) { - Node *n = b->_nodes[i]; + Node *n = b->get_node(i); // Ignore PHI use, these can be up or down if (n->is_Phi()) { continue; @@ -529,13 +534,13 @@ // a Def is UP or DOWN. UP means that it should get a register (ie - // it is always in LRP regions), and DOWN means that it is probably // on the stack (ie - it crosses HRP regions). - Node ***Reaches = NEW_SPLIT_ARRAY( Node**, _cfg._num_blocks+1 ); - bool **UP = NEW_SPLIT_ARRAY( bool*, _cfg._num_blocks+1 ); + Node ***Reaches = NEW_SPLIT_ARRAY( Node**, _cfg.number_of_blocks() + 1); + bool **UP = NEW_SPLIT_ARRAY( bool*, _cfg.number_of_blocks() + 1); Node **debug_defs = NEW_SPLIT_ARRAY( Node*, spill_cnt ); VectorSet **UP_entry= NEW_SPLIT_ARRAY( VectorSet*, spill_cnt ); // Initialize Reaches & UP - for( bidx = 0; bidx < _cfg._num_blocks+1; bidx++ ) { + for (bidx = 0; bidx < _cfg.number_of_blocks() + 1; bidx++) { Reaches[bidx] = NEW_SPLIT_ARRAY( Node*, spill_cnt ); UP[bidx] = NEW_SPLIT_ARRAY( bool, spill_cnt ); Node **Reachblock = Reaches[bidx]; @@ -555,13 +560,13 @@ //----------PASS 1---------- //----------Propagation & Node Insertion Code---------- // Walk the Blocks in RPO for DEF & USE info - for( bidx = 0; bidx < _cfg._num_blocks; bidx++ ) { + for( bidx = 0; bidx < _cfg.number_of_blocks(); bidx++ ) { if (C->check_node_count(spill_cnt, out_of_nodes)) { return 0; } - b = _cfg._blocks[bidx]; + b = _cfg.get_block(bidx); // Reaches & UP arrays for this block Reachblock = Reaches[b->_pre_order]; UPblock = UP[b->_pre_order]; @@ -642,7 +647,7 @@ // check block for appropriate phinode & update edges for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { - n1 = b->_nodes[insidx]; + n1 = b->get_node(insidx); // bail if this is not a phi phi = n1->is_Phi() ? n1->as_Phi() : NULL; if( phi == NULL ) { @@ -742,7 +747,7 @@ //----------Walk Instructions in the Block and Split---------- // For all non-phi instructions in the block for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { - Node *n = b->_nodes[insidx]; + Node *n = b->get_node(insidx); // Find the defining Node's live range index uint defidx = _lrg_map.find_id(n); uint cnt = n->req(); @@ -771,7 +776,7 @@ assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg"); n->replace_by(u); // Then replace with unique input n->disconnect_inputs(NULL, C); - b->_nodes.remove(insidx); + b->remove_node(insidx); insidx--; b->_ihrp_index--; b->_fhrp_index--; @@ -784,12 +789,12 @@ (b->_reg_pressure < (uint)INTPRESSURE) || b->_ihrp_index > 4000000 || b->_ihrp_index >= b->end_idx() || - !b->_nodes[b->_ihrp_index]->is_Proj(), "" ); + !b->get_node(b->_ihrp_index)->is_Proj(), "" ); assert( insidx > b->_fhrp_index || (b->_freg_pressure < (uint)FLOATPRESSURE) || b->_fhrp_index > 4000000 || b->_fhrp_index >= b->end_idx() || - !b->_nodes[b->_fhrp_index]->is_Proj(), "" ); + !b->get_node(b->_fhrp_index)->is_Proj(), "" ); // ********** Handle Crossing HRP Boundry ********** if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) { @@ -814,7 +819,7 @@ // Insert point is just past last use or def in the block int insert_point = insidx-1; while( insert_point > 0 ) { - Node *n = b->_nodes[insert_point]; + Node *n = b->get_node(insert_point); // Hit top of block? Quit going backwards if (n->is_Phi()) { break; @@ -860,7 +865,7 @@ } } // end if LRG is UP } // end for all spilling live ranges - assert( b->_nodes[insidx] == n, "got insidx set incorrectly" ); + assert( b->get_node(insidx) == n, "got insidx set incorrectly" ); } // end if crossing HRP Boundry // If the LRG index is oob, then this is a new spillcopy, skip it. @@ -873,7 +878,7 @@ if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) { n->replace_by( n->in(copyidx) ); n->set_req( copyidx, NULL ); - b->_nodes.remove(insidx--); + b->remove_node(insidx--); b->_ihrp_index--; // Adjust the point where we go hi-pressure b->_fhrp_index--; continue; @@ -927,10 +932,10 @@ // Rematerializable? Then clone def at use site instead // of store/load if( def->rematerialize() ) { - int old_size = b->_nodes.size(); + int old_size = b->number_of_nodes(); def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true ); if( !def ) return 0; // Bail out - insidx += b->_nodes.size()-old_size; + insidx += b->number_of_nodes()-old_size; } MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL; @@ -1327,8 +1332,8 @@ // so look at the node before it. int insert = pred->end_idx(); while (insert >= 1 && - pred->_nodes[insert - 1]->is_SpillCopy() && - _lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) { + pred->get_node(insert - 1)->is_SpillCopy() && + _lrg_map.find(pred->get_node(insert - 1)) >= lrgs_before_phi_split) { insert--; } def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false); @@ -1394,10 +1399,10 @@ // DEBUG #ifdef ASSERT // Validate all live range index assignments - for (bidx = 0; bidx < _cfg._num_blocks; bidx++) { - b = _cfg._blocks[bidx]; + for (bidx = 0; bidx < _cfg.number_of_blocks(); bidx++) { + b = _cfg.get_block(bidx); for (insidx = 0; insidx <= b->end_idx(); insidx++) { - Node *n = b->_nodes[insidx]; + Node *n = b->get_node(insidx); uint defidx = _lrg_map.find(n); assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split"); assert(defidx < maxlrg,"Bad live range index in Split"); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/subnode.cpp --- a/src/share/vm/opto/subnode.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/subnode.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1064,7 +1064,7 @@ // Print special per-node info #ifndef PRODUCT void BoolTest::dump_on(outputStream *st) const { - const char *msg[] = {"eq","gt","??","lt","ne","le","??","ge"}; + const char *msg[] = {"eq","gt","of","lt","ne","le","nof","ge"}; st->print(msg[_test]); } #endif @@ -1126,7 +1126,7 @@ Node *cmp = in(1); if( !cmp->is_Sub() ) return NULL; int cop = cmp->Opcode(); - if( cop == Op_FastLock || cop == Op_FastUnlock ) return NULL; + if( cop == Op_FastLock || cop == Op_FastUnlock || cop == Op_FlagsProj) return NULL; Node *cmp1 = cmp->in(1); Node *cmp2 = cmp->in(2); if( !cmp1 ) return NULL; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/subnode.hpp --- a/src/share/vm/opto/subnode.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/subnode.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -263,16 +263,16 @@ // We pick the values as 3 bits; the low order 2 bits we compare against the // condition codes, the high bit flips the sense of the result. struct BoolTest VALUE_OBJ_CLASS_SPEC { - enum mask { eq = 0, ne = 4, le = 5, ge = 7, lt = 3, gt = 1, illegal = 8 }; + enum mask { eq = 0, ne = 4, le = 5, ge = 7, lt = 3, gt = 1, overflow = 2, no_overflow = 6, illegal = 8 }; mask _test; BoolTest( mask btm ) : _test(btm) {} const Type *cc2logical( const Type *CC ) const; // Commute the test. I use a small table lookup. The table is created as // a simple char array where each element is the ASCII version of a 'mask' // enum from above. - mask commute( ) const { return mask("038147858"[_test]-'0'); } + mask commute( ) const { return mask("032147658"[_test]-'0'); } mask negate( ) const { return mask(_test^4); } - bool is_canonical( ) const { return (_test == BoolTest::ne || _test == BoolTest::lt || _test == BoolTest::le); } + bool is_canonical( ) const { return (_test == BoolTest::ne || _test == BoolTest::lt || _test == BoolTest::le || _test == BoolTest::overflow); } #ifndef PRODUCT void dump_on(outputStream *st) const; #endif diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/type.cpp --- a/src/share/vm/opto/type.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/type.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -189,6 +189,38 @@ } +//-----------------------make_from_constant------------------------------------ +const Type* Type::make_from_constant(ciConstant constant, + bool require_constant, bool is_autobox_cache) { + switch (constant.basic_type()) { + case T_BOOLEAN: return TypeInt::make(constant.as_boolean()); + case T_CHAR: return TypeInt::make(constant.as_char()); + case T_BYTE: return TypeInt::make(constant.as_byte()); + case T_SHORT: return TypeInt::make(constant.as_short()); + case T_INT: return TypeInt::make(constant.as_int()); + case T_LONG: return TypeLong::make(constant.as_long()); + case T_FLOAT: return TypeF::make(constant.as_float()); + case T_DOUBLE: return TypeD::make(constant.as_double()); + case T_ARRAY: + case T_OBJECT: + { + // cases: + // can_be_constant = (oop not scavengable || ScavengeRootsInCode != 0) + // should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2) + // An oop is not scavengable if it is in the perm gen. + ciObject* oop_constant = constant.as_object(); + if (oop_constant->is_null_object()) { + return Type::get_zero_type(T_OBJECT); + } else if (require_constant || oop_constant->should_be_constant()) { + return TypeOopPtr::make_from_constant(oop_constant, require_constant, is_autobox_cache); + } + } + } + // Fall through to failure + return NULL; +} + + //------------------------------make------------------------------------------- // Create a simple Type, with default empty symbol sets. Then hashcons it // and look for an existing copy in the type dictionary. @@ -398,6 +430,11 @@ longpair[1] = TypeLong::LONG; TypeTuple::LONG_PAIR = TypeTuple::make(2, longpair); + const Type **intccpair = TypeTuple::fields(2); + intccpair[0] = TypeInt::INT; + intccpair[1] = TypeInt::CC; + TypeTuple::INT_CC_PAIR = TypeTuple::make(2, intccpair); + _const_basic_type[T_NARROWOOP] = TypeNarrowOop::BOTTOM; _const_basic_type[T_NARROWKLASS] = Type::BOTTOM; _const_basic_type[T_BOOLEAN] = TypeInt::BOOL; @@ -1614,6 +1651,7 @@ const TypeTuple *TypeTuple::START_I2C; const TypeTuple *TypeTuple::INT_PAIR; const TypeTuple *TypeTuple::LONG_PAIR; +const TypeTuple *TypeTuple::INT_CC_PAIR; //------------------------------make------------------------------------------- @@ -1824,12 +1862,12 @@ } //------------------------------make------------------------------------------- -const TypeAry *TypeAry::make( const Type *elem, const TypeInt *size) { +const TypeAry* TypeAry::make(const Type* elem, const TypeInt* size, bool stable) { if (UseCompressedOops && elem->isa_oopptr()) { elem = elem->make_narrowoop(); } size = normalize_array_size(size); - return (TypeAry*)(new TypeAry(elem,size))->hashcons(); + return (TypeAry*)(new TypeAry(elem,size,stable))->hashcons(); } //------------------------------meet------------------------------------------- @@ -1850,7 +1888,8 @@ case Array: { // Meeting 2 arrays? const TypeAry *a = t->is_ary(); return TypeAry::make(_elem->meet(a->_elem), - _size->xmeet(a->_size)->is_int()); + _size->xmeet(a->_size)->is_int(), + _stable & a->_stable); } case Top: break; @@ -1863,7 +1902,7 @@ const Type *TypeAry::xdual() const { const TypeInt* size_dual = _size->dual()->is_int(); size_dual = normalize_array_size(size_dual); - return new TypeAry( _elem->dual(), size_dual); + return new TypeAry(_elem->dual(), size_dual, !_stable); } //------------------------------eq--------------------------------------------- @@ -1871,13 +1910,14 @@ bool TypeAry::eq( const Type *t ) const { const TypeAry *a = (const TypeAry*)t; return _elem == a->_elem && + _stable == a->_stable && _size == a->_size; } //------------------------------hash------------------------------------------- // Type-specific hashing function. int TypeAry::hash(void) const { - return (intptr_t)_elem + (intptr_t)_size; + return (intptr_t)_elem + (intptr_t)_size + (_stable ? 43 : 0); } //----------------------interface_vs_oop--------------------------------------- @@ -1894,6 +1934,7 @@ //------------------------------dump2------------------------------------------ #ifndef PRODUCT void TypeAry::dump2( Dict &d, uint depth, outputStream *st ) const { + if (_stable) st->print("stable:"); _elem->dump2(d, depth, st); st->print("["); _size->dump2(d, depth, st); @@ -2381,7 +2422,7 @@ #ifdef _LP64 if (_offset != 0) { if (_offset == oopDesc::klass_offset_in_bytes()) { - _is_ptr_to_narrowklass = UseCompressedKlassPointers; + _is_ptr_to_narrowklass = UseCompressedClassPointers; } else if (klass() == NULL) { // Array with unknown body type assert(this->isa_aryptr(), "only arrays without klass"); @@ -3457,11 +3498,39 @@ assert(new_size != NULL, ""); new_size = narrow_size_type(new_size); if (new_size == size()) return this; - const TypeAry* new_ary = TypeAry::make(elem(), new_size); + const TypeAry* new_ary = TypeAry::make(elem(), new_size, is_stable()); return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id); } +//------------------------------cast_to_stable--------------------------------- +const TypeAryPtr* TypeAryPtr::cast_to_stable(bool stable, int stable_dimension) const { + if (stable_dimension <= 0 || (stable_dimension == 1 && stable == this->is_stable())) + return this; + + const Type* elem = this->elem(); + const TypePtr* elem_ptr = elem->make_ptr(); + + if (stable_dimension > 1 && elem_ptr != NULL && elem_ptr->isa_aryptr()) { + // If this is widened from a narrow oop, TypeAry::make will re-narrow it. + elem = elem_ptr = elem_ptr->is_aryptr()->cast_to_stable(stable, stable_dimension - 1); + } + + const TypeAry* new_ary = TypeAry::make(elem, size(), stable); + + return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id); +} + +//-----------------------------stable_dimension-------------------------------- +int TypeAryPtr::stable_dimension() const { + if (!is_stable()) return 0; + int dim = 1; + const TypePtr* elem_ptr = elem()->make_ptr(); + if (elem_ptr != NULL && elem_ptr->isa_aryptr()) + dim += elem_ptr->is_aryptr()->stable_dimension(); + return dim; +} + //------------------------------eq--------------------------------------------- // Structural equality check for Type representations bool TypeAryPtr::eq( const Type *t ) const { @@ -3570,7 +3639,7 @@ // Something like byte[int+] meets char[int+]. // This must fall to bottom, not (int[-128..65535])[int+]. instance_id = InstanceBot; - tary = TypeAry::make(Type::BOTTOM, tary->_size); + tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable); } } else // Non integral arrays. // Must fall to bottom if exact klasses in upper lattice @@ -3584,7 +3653,7 @@ (tap ->_klass_is_exact && !tap->klass()->is_subtype_of(klass())) || // 'this' is exact and super or unrelated: (this->_klass_is_exact && !klass()->is_subtype_of(tap->klass())))) { - tary = TypeAry::make(Type::BOTTOM, tary->_size); + tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable); return make( NotNull, NULL, tary, lazy_klass, false, off, InstanceBot ); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/opto/type.hpp --- a/src/share/vm/opto/type.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/opto/type.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -169,7 +169,7 @@ public: - inline void* operator new( size_t x ) { + inline void* operator new( size_t x ) throw() { Compile* compile = Compile::current(); compile->set_type_last_size(x); void *temp = compile->type_arena()->Amalloc_D(x); @@ -372,6 +372,10 @@ // Mapping from CI type system to compiler type: static const Type* get_typeflow_type(ciType* type); + static const Type* make_from_constant(ciConstant constant, + bool require_constant = false, + bool is_autobox_cache = false); + private: // support arrays static const BasicType _basic_type[]; @@ -580,6 +584,7 @@ static const TypeTuple *START_I2C; static const TypeTuple *INT_PAIR; static const TypeTuple *LONG_PAIR; + static const TypeTuple *INT_CC_PAIR; #ifndef PRODUCT virtual void dump2( Dict &d, uint, outputStream *st ) const; // Specialized per-Type dumping #endif @@ -588,8 +593,8 @@ //------------------------------TypeAry---------------------------------------- // Class of Array Types class TypeAry : public Type { - TypeAry( const Type *elem, const TypeInt *size) : Type(Array), - _elem(elem), _size(size) {} + TypeAry(const Type* elem, const TypeInt* size, bool stable) : Type(Array), + _elem(elem), _size(size), _stable(stable) {} public: virtual bool eq( const Type *t ) const; virtual int hash() const; // Type specific hashing @@ -599,10 +604,11 @@ private: const Type *_elem; // Element type of array const TypeInt *_size; // Elements in array + const bool _stable; // Are elements @Stable? friend class TypeAryPtr; public: - static const TypeAry *make( const Type *elem, const TypeInt *size); + static const TypeAry* make(const Type* elem, const TypeInt* size, bool stable = false); virtual const Type *xmeet( const Type *t ) const; virtual const Type *xdual() const; // Compute dual right now. @@ -988,6 +994,7 @@ const TypeAry* ary() const { return _ary; } const Type* elem() const { return _ary->_elem; } const TypeInt* size() const { return _ary->_size; } + bool is_stable() const { return _ary->_stable; } bool is_autobox_cache() const { return _is_autobox_cache; } @@ -1011,6 +1018,9 @@ virtual const Type *xmeet( const Type *t ) const; virtual const Type *xdual() const; // Compute dual right now. + const TypeAryPtr* cast_to_stable(bool stable, int stable_dimension = 1) const; + int stable_dimension() const; + // Convenience common pre-built types. static const TypeAryPtr *RANGE; static const TypeAryPtr *OOPS; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/prims/jni.cpp --- a/src/share/vm/prims/jni.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/prims/jni.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1339,6 +1339,7 @@ if (call_type == JNI_VIRTUAL) { // jni_GetMethodID makes sure class is linked and initialized // so m should have a valid vtable index. + assert(!m->has_itable_index(), ""); int vtbl_index = m->vtable_index(); if (vtbl_index != Method::nonvirtual_vtable_index) { Klass* k = h_recv->klass(); @@ -1358,12 +1359,7 @@ // interface call KlassHandle h_holder(THREAD, holder); - int itbl_index = m->cached_itable_index(); - if (itbl_index == -1) { - itbl_index = klassItable::compute_itable_index(m); - m->set_cached_itable_index(itbl_index); - // the above may have grabbed a lock, 'm' and anything non-handlized can't be used again - } + int itbl_index = m->itable_index(); Klass* k = h_recv->klass(); selected_method = InstanceKlass::cast(k)->method_at_itable(h_holder(), itbl_index, CHECK); } @@ -3237,19 +3233,22 @@ HOTSPOT_JNI_GETSTRINGCHARS_ENTRY( env, string, (uintptr_t *) isCopy); #endif /* USDT2 */ - //%note jni_5 - if (isCopy != NULL) { - *isCopy = JNI_TRUE; - } oop s = JNIHandles::resolve_non_null(string); int s_len = java_lang_String::length(s); typeArrayOop s_value = java_lang_String::value(s); int s_offset = java_lang_String::offset(s); - jchar* buf = NEW_C_HEAP_ARRAY(jchar, s_len + 1, mtInternal); // add one for zero termination - if (s_len > 0) { - memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len); + jchar* buf = NEW_C_HEAP_ARRAY_RETURN_NULL(jchar, s_len + 1, mtInternal); // add one for zero termination + /* JNI Specification states return NULL on OOM */ + if (buf != NULL) { + if (s_len > 0) { + memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len); + } + buf[s_len] = 0; + //%note jni_5 + if (isCopy != NULL) { + *isCopy = JNI_TRUE; + } } - buf[s_len] = 0; #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetStringChars__return, buf); #else /* USDT2 */ @@ -3338,9 +3337,14 @@ #endif /* USDT2 */ oop java_string = JNIHandles::resolve_non_null(string); size_t length = java_lang_String::utf8_length(java_string); - char* result = AllocateHeap(length + 1, mtInternal); - java_lang_String::as_utf8_string(java_string, result, (int) length + 1); - if (isCopy != NULL) *isCopy = JNI_TRUE; + /* JNI Specification states return NULL on OOM */ + char* result = AllocateHeap(length + 1, mtInternal, 0, AllocFailStrategy::RETURN_NULL); + if (result != NULL) { + java_lang_String::as_utf8_string(java_string, result, (int) length + 1); + if (isCopy != NULL) { + *isCopy = JNI_TRUE; + } + } #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, GetStringUTFChars__return, result); #else /* USDT2 */ @@ -3594,11 +3598,16 @@ * Avoid asserts in typeArrayOop. */ \ result = (ElementType*)get_bad_address(); \ } else { \ - result = NEW_C_HEAP_ARRAY(ElementType, len, mtInternal); \ - /* copy the array to the c chunk */ \ - memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len); \ + /* JNI Specification states return NULL on OOM */ \ + result = NEW_C_HEAP_ARRAY_RETURN_NULL(ElementType, len, mtInternal); \ + if (result != NULL) { \ + /* copy the array to the c chunk */ \ + memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len); \ + if (isCopy) { \ + *isCopy = JNI_TRUE; \ + } \ + } \ } \ - if (isCopy) *isCopy = JNI_TRUE; \ DTRACE_PROBE1(hotspot_jni, Get##Result##ArrayElements__return, result);\ return result; \ JNI_END @@ -3631,11 +3640,16 @@ * Avoid asserts in typeArrayOop. */ \ result = (ElementType*)get_bad_address(); \ } else { \ - result = NEW_C_HEAP_ARRAY(ElementType, len, mtInternal); \ - /* copy the array to the c chunk */ \ - memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len); \ + /* JNI Specification states return NULL on OOM */ \ + result = NEW_C_HEAP_ARRAY_RETURN_NULL(ElementType, len, mtInternal); \ + if (result != NULL) { \ + /* copy the array to the c chunk */ \ + memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len); \ + if (isCopy) { \ + *isCopy = JNI_TRUE; \ + } \ + } \ } \ - if (isCopy) *isCopy = JNI_TRUE; \ ReturnProbe; \ return result; \ JNI_END @@ -5022,6 +5036,7 @@ #include "gc_implementation/g1/heapRegionRemSet.hpp" #endif #include "utilities/quickSort.hpp" +#include "utilities/ostream.hpp" #if INCLUDE_VM_STRUCTS #include "runtime/vmStructs.hpp" #endif @@ -5030,19 +5045,34 @@ tty->print_cr("Running test: " #unit_test_function_call); \ unit_test_function_call +// Forward declaration +void TestReservedSpace_test(); +void TestReserveMemorySpecial_test(); +void TestVirtualSpace_test(); +void TestMetaspaceAux_test(); +#if INCLUDE_ALL_GCS +void TestG1BiasedArray_test(); +#endif + void execute_internal_vm_tests() { if (ExecuteInternalVMTests) { tty->print_cr("Running internal VM tests"); + run_unit_test(TestReservedSpace_test()); + run_unit_test(TestReserveMemorySpecial_test()); + run_unit_test(TestVirtualSpace_test()); + run_unit_test(TestMetaspaceAux_test()); run_unit_test(GlobalDefinitions::test_globals()); run_unit_test(GCTimerAllTest::all()); run_unit_test(arrayOopDesc::test_max_array_length()); run_unit_test(CollectedHeap::test_is_in()); run_unit_test(QuickSort::test_quick_sort()); run_unit_test(AltHashing::test_alt_hash()); + run_unit_test(test_loggc_filename()); #if INCLUDE_VM_STRUCTS run_unit_test(VMStructs::test()); #endif #if INCLUDE_ALL_GCS + run_unit_test(TestG1BiasedArray_test()); run_unit_test(HeapRegionRemSet::test_prt()); #endif tty->print_cr("All internal VM tests passed"); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/prims/jvm.cpp --- a/src/share/vm/prims/jvm.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/prims/jvm.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1837,7 +1837,7 @@ } if (!publicOnly || fs.access_flags().is_public()) { - fd.initialize(k(), fs.index()); + fd.reinitialize(k(), fs.index()); oop field = Reflection::new_field(&fd, UseNewReflection, CHECK_NULL); result->obj_at_put(out_idx, field); ++out_idx; @@ -1848,16 +1848,27 @@ } JVM_END -JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredMethods(JNIEnv *env, jclass ofClass, jboolean publicOnly)) -{ - JVMWrapper("JVM_GetClassDeclaredMethods"); +static bool select_method(methodHandle method, bool want_constructor) { + if (want_constructor) { + return (method->is_initializer() && !method->is_static()); + } else { + return (!method->is_initializer() && !method->is_overpass()); + } +} + +static jobjectArray get_class_declared_methods_helper( + JNIEnv *env, + jclass ofClass, jboolean publicOnly, + bool want_constructor, + Klass* klass, TRAPS) { + JvmtiVMObjectAllocEventCollector oam; // Exclude primitive types and array types if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass)) || java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->oop_is_array()) { // Return empty array - oop res = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), 0, CHECK_NULL); + oop res = oopFactory::new_objArray(klass, 0, CHECK_NULL); return (jobjectArray) JNIHandles::make_local(env, res); } @@ -1868,87 +1879,67 @@ Array* methods = k->methods(); int methods_length = methods->length(); + + // Save original method_idnum in case of redefinition, which can change + // the idnum of obsolete methods. The new method will have the same idnum + // but if we refresh the methods array, the counts will be wrong. + ResourceMark rm(THREAD); + GrowableArray* idnums = new GrowableArray(methods_length); int num_methods = 0; - int i; - for (i = 0; i < methods_length; i++) { + for (int i = 0; i < methods_length; i++) { methodHandle method(THREAD, methods->at(i)); - if (!method->is_initializer() && !method->is_overpass()) { + if (select_method(method, want_constructor)) { if (!publicOnly || method->is_public()) { + idnums->push(method->method_idnum()); ++num_methods; } } } // Allocate result - objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), num_methods, CHECK_NULL); + objArrayOop r = oopFactory::new_objArray(klass, num_methods, CHECK_NULL); objArrayHandle result (THREAD, r); - int out_idx = 0; - for (i = 0; i < methods_length; i++) { - methodHandle method(THREAD, methods->at(i)); - if (!method->is_initializer() && !method->is_overpass()) { - if (!publicOnly || method->is_public()) { - oop m = Reflection::new_method(method, UseNewReflection, false, CHECK_NULL); - result->obj_at_put(out_idx, m); - ++out_idx; + // Now just put the methods that we selected above, but go by their idnum + // in case of redefinition. The methods can be redefined at any safepoint, + // so above when allocating the oop array and below when creating reflect + // objects. + for (int i = 0; i < num_methods; i++) { + methodHandle method(THREAD, k->method_with_idnum(idnums->at(i))); + if (method.is_null()) { + // Method may have been deleted and seems this API can handle null + // Otherwise should probably put a method that throws NSME + result->obj_at_put(i, NULL); + } else { + oop m; + if (want_constructor) { + m = Reflection::new_constructor(method, CHECK_NULL); + } else { + m = Reflection::new_method(method, UseNewReflection, false, CHECK_NULL); } + result->obj_at_put(i, m); } } - assert(out_idx == num_methods, "just checking"); + return (jobjectArray) JNIHandles::make_local(env, result()); } + +JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredMethods(JNIEnv *env, jclass ofClass, jboolean publicOnly)) +{ + JVMWrapper("JVM_GetClassDeclaredMethods"); + return get_class_declared_methods_helper(env, ofClass, publicOnly, + /*want_constructor*/ false, + SystemDictionary::reflect_Method_klass(), THREAD); +} JVM_END JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredConstructors(JNIEnv *env, jclass ofClass, jboolean publicOnly)) { JVMWrapper("JVM_GetClassDeclaredConstructors"); - JvmtiVMObjectAllocEventCollector oam; - - // Exclude primitive types and array types - if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass)) - || java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->oop_is_array()) { - // Return empty array - oop res = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), 0 , CHECK_NULL); - return (jobjectArray) JNIHandles::make_local(env, res); - } - - instanceKlassHandle k(THREAD, java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))); - - // Ensure class is linked - k->link_class(CHECK_NULL); - - Array* methods = k->methods(); - int methods_length = methods->length(); - int num_constructors = 0; - - int i; - for (i = 0; i < methods_length; i++) { - methodHandle method(THREAD, methods->at(i)); - if (method->is_initializer() && !method->is_static()) { - if (!publicOnly || method->is_public()) { - ++num_constructors; - } - } - } - - // Allocate result - objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), num_constructors, CHECK_NULL); - objArrayHandle result(THREAD, r); - - int out_idx = 0; - for (i = 0; i < methods_length; i++) { - methodHandle method(THREAD, methods->at(i)); - if (method->is_initializer() && !method->is_static()) { - if (!publicOnly || method->is_public()) { - oop m = Reflection::new_constructor(method, CHECK_NULL); - result->obj_at_put(out_idx, m); - ++out_idx; - } - } - } - assert(out_idx == num_constructors, "just checking"); - return (jobjectArray) JNIHandles::make_local(env, result()); + return get_class_declared_methods_helper(env, ofClass, publicOnly, + /*want_constructor*/ true, + SystemDictionary::reflect_Constructor_klass(), THREAD); } JVM_END @@ -4248,13 +4239,13 @@ JVM_LEAF(jboolean, JVM_AccessVMBooleanFlag(const char* name, jboolean* value, jboolean is_get)) JVMWrapper("JVM_AccessBoolVMFlag"); - return is_get ? CommandLineFlags::boolAt((char*) name, (bool*) value) : CommandLineFlags::boolAtPut((char*) name, (bool*) value, INTERNAL); + return is_get ? CommandLineFlags::boolAt((char*) name, (bool*) value) : CommandLineFlags::boolAtPut((char*) name, (bool*) value, Flag::INTERNAL); JVM_END JVM_LEAF(jboolean, JVM_AccessVMIntFlag(const char* name, jint* value, jboolean is_get)) JVMWrapper("JVM_AccessVMIntFlag"); intx v; - jboolean result = is_get ? CommandLineFlags::intxAt((char*) name, &v) : CommandLineFlags::intxAtPut((char*) name, &v, INTERNAL); + jboolean result = is_get ? CommandLineFlags::intxAt((char*) name, &v) : CommandLineFlags::intxAtPut((char*) name, &v, Flag::INTERNAL); *value = (jint)v; return result; JVM_END diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/prims/jvmti.xml --- a/src/share/vm/prims/jvmti.xml Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/prims/jvmti.xml Fri Oct 11 10:38:03 2013 +0200 @@ -1,7 +1,7 @@ &); void operator=(const Array&); - void* operator new(size_t size, ClassLoaderData* loader_data, int length, bool read_only, TRAPS) { + void* operator new(size_t size, ClassLoaderData* loader_data, int length, bool read_only, TRAPS) throw() { size_t word_size = Array::size(length); return (void*) Metaspace::allocate(loader_data, word_size, read_only, MetaspaceObj::array_type(sizeof(T)), CHECK_NULL); @@ -353,9 +353,9 @@ // sort the array. bool contains(const T& x) const { return index_of(x) >= 0; } - T at(int i) const { assert(i >= 0 && i< _length, err_msg_res("oob: 0 <= %d < %d", i, _length)); return _data[i]; } - void at_put(const int i, const T& x) { assert(i >= 0 && i< _length, err_msg_res("oob: 0 <= %d < %d", i, _length)); _data[i] = x; } - T* adr_at(const int i) { assert(i >= 0 && i< _length, err_msg_res("oob: 0 <= %d < %d", i, _length)); return &_data[i]; } + T at(int i) const { assert(i >= 0 && i< _length, err_msg("oob: 0 <= %d < %d", i, _length)); return _data[i]; } + void at_put(const int i, const T& x) { assert(i >= 0 && i< _length, err_msg("oob: 0 <= %d < %d", i, _length)); _data[i] = x; } + T* adr_at(const int i) { assert(i >= 0 && i< _length, err_msg("oob: 0 <= %d < %d", i, _length)); return &_data[i]; } int find(const T& x) { return index_of(x); } T at_acquire(const int which) { return OrderAccess::load_acquire(adr_at(which)); } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/utilities/bitMap.inline.hpp --- a/src/share/vm/utilities/bitMap.inline.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/utilities/bitMap.inline.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -52,16 +52,16 @@ inline bool BitMap::par_set_bit(idx_t bit) { verify_index(bit); - volatile idx_t* const addr = word_addr(bit); - const idx_t mask = bit_mask(bit); - idx_t old_val = *addr; + volatile bm_word_t* const addr = word_addr(bit); + const bm_word_t mask = bit_mask(bit); + bm_word_t old_val = *addr; do { - const idx_t new_val = old_val | mask; + const bm_word_t new_val = old_val | mask; if (new_val == old_val) { return false; // Someone else beat us to it. } - const idx_t cur_val = (idx_t) Atomic::cmpxchg_ptr((void*) new_val, + const bm_word_t cur_val = (bm_word_t) Atomic::cmpxchg_ptr((void*) new_val, (volatile void*) addr, (void*) old_val); if (cur_val == old_val) { @@ -73,16 +73,16 @@ inline bool BitMap::par_clear_bit(idx_t bit) { verify_index(bit); - volatile idx_t* const addr = word_addr(bit); - const idx_t mask = ~bit_mask(bit); - idx_t old_val = *addr; + volatile bm_word_t* const addr = word_addr(bit); + const bm_word_t mask = ~bit_mask(bit); + bm_word_t old_val = *addr; do { - const idx_t new_val = old_val & mask; + const bm_word_t new_val = old_val & mask; if (new_val == old_val) { return false; // Someone else beat us to it. } - const idx_t cur_val = (idx_t) Atomic::cmpxchg_ptr((void*) new_val, + const bm_word_t cur_val = (bm_word_t) Atomic::cmpxchg_ptr((void*) new_val, (volatile void*) addr, (void*) old_val); if (cur_val == old_val) { diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/utilities/decoder.cpp --- a/src/share/vm/utilities/decoder.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/utilities/decoder.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -24,7 +24,6 @@ #include "precompiled.hpp" #include "prims/jvm.h" -#include "runtime/mutexLocker.hpp" #include "runtime/os.hpp" #include "utilities/decoder.hpp" #include "utilities/vmError.hpp" @@ -80,6 +79,23 @@ return decoder; } +inline bool DecoderLocker::is_first_error_thread() { + return (os::current_thread_id() == VMError::get_first_error_tid()); +} + +DecoderLocker::DecoderLocker() : + MutexLockerEx(DecoderLocker::is_first_error_thread() ? + NULL : Decoder::shared_decoder_lock(), true) { + _decoder = is_first_error_thread() ? + Decoder::get_error_handler_instance() : Decoder::get_shared_instance(); + assert(_decoder != NULL, "null decoder"); +} + +Mutex* Decoder::shared_decoder_lock() { + assert(_shared_decoder_lock != NULL, "Just check"); + return _shared_decoder_lock; +} + bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const char* modulepath) { assert(_shared_decoder_lock != NULL, "Just check"); bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/utilities/decoder.hpp --- a/src/share/vm/utilities/decoder.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/utilities/decoder.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -28,6 +28,7 @@ #include "memory/allocation.hpp" #include "runtime/mutex.hpp" +#include "runtime/mutexLocker.hpp" class AbstractDecoder : public CHeapObj { public: @@ -124,6 +125,19 @@ protected: static Mutex* _shared_decoder_lock; + static Mutex* shared_decoder_lock(); + + friend class DecoderLocker; +}; + +class DecoderLocker : public MutexLockerEx { + AbstractDecoder* _decoder; + inline bool is_first_error_thread(); +public: + DecoderLocker(); + AbstractDecoder* decoder() { + return _decoder; + } }; #endif // SHARE_VM_UTILITIES_DECODER_HPP diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/utilities/globalDefinitions.hpp --- a/src/share/vm/utilities/globalDefinitions.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/utilities/globalDefinitions.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -402,6 +402,14 @@ #define align_size_up_(size, alignment) (((size) + ((alignment) - 1)) & ~((alignment) - 1)) +inline bool is_size_aligned(size_t size, size_t alignment) { + return align_size_up_(size, alignment) == size; +} + +inline bool is_ptr_aligned(void* ptr, size_t alignment) { + return align_size_up_((intptr_t)ptr, (intptr_t)alignment) == (intptr_t)ptr; +} + inline intptr_t align_size_up(intptr_t size, intptr_t alignment) { return align_size_up_(size, alignment); } @@ -414,6 +422,14 @@ #define is_size_aligned_(size, alignment) ((size) == (align_size_up_(size, alignment))) +inline void* align_ptr_up(void* ptr, size_t alignment) { + return (void*)align_size_up((intptr_t)ptr, (intptr_t)alignment); +} + +inline void* align_ptr_down(void* ptr, size_t alignment) { + return (void*)align_size_down((intptr_t)ptr, (intptr_t)alignment); +} + // Align objects by rounding up their size, in HeapWord units. #define align_object_size_(size) align_size_up_(size, MinObjAlignment) @@ -951,9 +967,9 @@ // (These must be implemented as #defines because C++ compilers are // not obligated to inline non-integral constants!) #define badAddress ((address)::badAddressVal) -#define badOop ((oop)::badOopVal) +#define badOop (cast_to_oop(::badOopVal)) #define badHeapWord (::badHeapWordVal) -#define badJNIHandle ((oop)::badJNIHandleVal) +#define badJNIHandle (cast_to_oop(::badJNIHandleVal)) // Default TaskQueue size is 16K (32-bit) or 128K (64-bit) #define TASKQUEUE_SIZE (NOT_LP64(1<<14) LP64_ONLY(1<<17)) diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/utilities/globalDefinitions_visCPP.hpp --- a/src/share/vm/utilities/globalDefinitions_visCPP.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/utilities/globalDefinitions_visCPP.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -189,6 +189,10 @@ #pragma warning( disable : 4201 ) // nonstandard extension used : nameless struct/union (needed in windows.h) #pragma warning( disable : 4511 ) // copy constructor could not be generated #pragma warning( disable : 4291 ) // no matching operator delete found; memory will not be freed if initialization thows an exception +#ifdef CHECK_UNHANDLED_OOPS +#pragma warning( disable : 4521 ) // class has multiple copy ctors of a single type +#pragma warning( disable : 4522 ) // class has multiple assignment operators of a single type +#endif // CHECK_UNHANDLED_OOPS #if _MSC_VER >= 1400 #pragma warning( disable : 4996 ) // unsafe string functions. Same as define _CRT_SECURE_NO_WARNINGS/_CRT_SECURE_NO_DEPRICATE #endif diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/utilities/growableArray.hpp --- a/src/share/vm/utilities/growableArray.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/utilities/growableArray.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -194,6 +194,7 @@ void clear() { _len = 0; } int length() const { return _len; } + int max_length() const { return _max; } void trunc_to(int l) { assert(l <= _len,"cannot increase length"); _len = l; } bool is_empty() const { return _len == 0; } bool is_nonempty() const { return _len != 0; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/utilities/hashtable.cpp --- a/src/share/vm/utilities/hashtable.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/utilities/hashtable.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -356,9 +356,9 @@ template class Hashtable; template class Hashtable; template class Hashtable; -#ifdef SOLARIS +#if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS) template class Hashtable; -#endif +#endif // SOLARIS || CHECK_UNHANDLED_OOPS template class Hashtable; template class Hashtable; template class HashtableEntry; diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/utilities/ostream.cpp --- a/src/share/vm/utilities/ostream.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/utilities/ostream.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -342,7 +342,7 @@ } char* stringStream::as_string() { - char* copy = NEW_RESOURCE_ARRAY(char, buffer_pos+1); + char* copy = NEW_RESOURCE_ARRAY(char, buffer_pos + 1); strncpy(copy, buffer, buffer_pos); copy[buffer_pos] = 0; // terminating null return copy; @@ -355,14 +355,190 @@ outputStream* gclog_or_tty; extern Mutex* tty_lock; +#define EXTRACHARLEN 32 +#define CURRENTAPPX ".current" +#define FILENAMEBUFLEN 1024 +// convert YYYY-MM-DD HH:MM:SS to YYYY-MM-DD_HH-MM-SS +char* get_datetime_string(char *buf, size_t len) { + os::local_time_string(buf, len); + int i = (int)strlen(buf); + while (i-- >= 0) { + if (buf[i] == ' ') buf[i] = '_'; + else if (buf[i] == ':') buf[i] = '-'; + } + return buf; +} + +static const char* make_log_name_internal(const char* log_name, const char* force_directory, + int pid, const char* tms) { + const char* basename = log_name; + char file_sep = os::file_separator()[0]; + const char* cp; + char pid_text[32]; + + for (cp = log_name; *cp != '\0'; cp++) { + if (*cp == '/' || *cp == file_sep) { + basename = cp + 1; + } + } + const char* nametail = log_name; + // Compute buffer length + size_t buffer_length; + if (force_directory != NULL) { + buffer_length = strlen(force_directory) + strlen(os::file_separator()) + + strlen(basename) + 1; + } else { + buffer_length = strlen(log_name) + 1; + } + + // const char* star = strchr(basename, '*'); + const char* pts = strstr(basename, "%p"); + int pid_pos = (pts == NULL) ? -1 : (pts - nametail); + + if (pid_pos >= 0) { + jio_snprintf(pid_text, sizeof(pid_text), "pid%u", pid); + buffer_length += strlen(pid_text); + } + + pts = strstr(basename, "%t"); + int tms_pos = (pts == NULL) ? -1 : (pts - nametail); + if (tms_pos >= 0) { + buffer_length += strlen(tms); + } + + // Create big enough buffer. + char *buf = NEW_C_HEAP_ARRAY(char, buffer_length, mtInternal); + + strcpy(buf, ""); + if (force_directory != NULL) { + strcat(buf, force_directory); + strcat(buf, os::file_separator()); + nametail = basename; // completely skip directory prefix + } + + // who is first, %p or %t? + int first = -1, second = -1; + const char *p1st = NULL; + const char *p2nd = NULL; + + if (pid_pos >= 0 && tms_pos >= 0) { + // contains both %p and %t + if (pid_pos < tms_pos) { + // case foo%pbar%tmonkey.log + first = pid_pos; + p1st = pid_text; + second = tms_pos; + p2nd = tms; + } else { + // case foo%tbar%pmonkey.log + first = tms_pos; + p1st = tms; + second = pid_pos; + p2nd = pid_text; + } + } else if (pid_pos >= 0) { + // contains %p only + first = pid_pos; + p1st = pid_text; + } else if (tms_pos >= 0) { + // contains %t only + first = tms_pos; + p1st = tms; + } + + int buf_pos = (int)strlen(buf); + const char* tail = nametail; + + if (first >= 0) { + tail = nametail + first + 2; + strncpy(&buf[buf_pos], nametail, first); + strcpy(&buf[buf_pos + first], p1st); + buf_pos = (int)strlen(buf); + if (second >= 0) { + strncpy(&buf[buf_pos], tail, second - first - 2); + strcpy(&buf[buf_pos + second - first - 2], p2nd); + tail = nametail + second + 2; + } + } + strcat(buf, tail); // append rest of name, or all of name + return buf; +} + +// log_name comes from -XX:LogFile=log_name or -Xloggc:log_name +// in log_name, %p => pipd1234 and +// %t => YYYY-MM-DD_HH-MM-SS +static const char* make_log_name(const char* log_name, const char* force_directory) { + char timestr[32]; + get_datetime_string(timestr, sizeof(timestr)); + return make_log_name_internal(log_name, force_directory, os::current_process_id(), + timestr); +} + +#ifndef PRODUCT +void test_loggc_filename() { + int pid; + char tms[32]; + char i_result[FILENAMEBUFLEN]; + const char* o_result; + get_datetime_string(tms, sizeof(tms)); + pid = os::current_process_id(); + + // test.log + jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test.log", tms); + o_result = make_log_name_internal("test.log", NULL, pid, tms); + assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test.log\", NULL)"); + FREE_C_HEAP_ARRAY(char, o_result, mtInternal); + + // test-%t-%p.log + jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test-%s-pid%u.log", tms, pid); + o_result = make_log_name_internal("test-%t-%p.log", NULL, pid, tms); + assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test-%%t-%%p.log\", NULL)"); + FREE_C_HEAP_ARRAY(char, o_result, mtInternal); + + // test-%t%p.log + jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test-%spid%u.log", tms, pid); + o_result = make_log_name_internal("test-%t%p.log", NULL, pid, tms); + assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test-%%t%%p.log\", NULL)"); + FREE_C_HEAP_ARRAY(char, o_result, mtInternal); + + // %p%t.log + jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "pid%u%s.log", pid, tms); + o_result = make_log_name_internal("%p%t.log", NULL, pid, tms); + assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%p%%t.log\", NULL)"); + FREE_C_HEAP_ARRAY(char, o_result, mtInternal); + + // %p-test.log + jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "pid%u-test.log", pid); + o_result = make_log_name_internal("%p-test.log", NULL, pid, tms); + assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%p-test.log\", NULL)"); + FREE_C_HEAP_ARRAY(char, o_result, mtInternal); + + // %t.log + jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "%s.log", tms); + o_result = make_log_name_internal("%t.log", NULL, pid, tms); + assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%t.log\", NULL)"); + FREE_C_HEAP_ARRAY(char, o_result, mtInternal); +} +#endif // PRODUCT + fileStream::fileStream(const char* file_name) { _file = fopen(file_name, "w"); - _need_close = true; + if (_file != NULL) { + _need_close = true; + } else { + warning("Cannot open file %s due to %s\n", file_name, strerror(errno)); + _need_close = false; + } } fileStream::fileStream(const char* file_name, const char* opentype) { _file = fopen(file_name, opentype); - _need_close = true; + if (_file != NULL) { + _need_close = true; + } else { + warning("Cannot open file %s due to %s\n", file_name, strerror(errno)); + _need_close = false; + } } void fileStream::write(const char* s, size_t len) { @@ -423,34 +599,51 @@ update_position(s, len); } -rotatingFileStream::~rotatingFileStream() { +// dump vm version, os version, platform info, build id, +// memory usage and command line flags into header +void gcLogFileStream::dump_loggc_header() { + if (is_open()) { + print_cr(Abstract_VM_Version::internal_vm_info_string()); + os::print_memory_info(this); + print("CommandLine flags: "); + CommandLineFlags::printSetFlags(this); + } +} + +gcLogFileStream::~gcLogFileStream() { if (_file != NULL) { if (_need_close) fclose(_file); - _file = NULL; + _file = NULL; + } + if (_file_name != NULL) { FREE_C_HEAP_ARRAY(char, _file_name, mtInternal); _file_name = NULL; } } -rotatingFileStream::rotatingFileStream(const char* file_name) { +gcLogFileStream::gcLogFileStream(const char* file_name) { _cur_file_num = 0; _bytes_written = 0L; - _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10, mtInternal); - jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num); - _file = fopen(_file_name, "w"); - _need_close = true; + _file_name = make_log_name(file_name, NULL); + + // gc log file rotation + if (UseGCLogFileRotation && NumberOfGCLogFiles > 1) { + char tempbuf[FILENAMEBUFLEN]; + jio_snprintf(tempbuf, sizeof(tempbuf), "%s.%d" CURRENTAPPX, _file_name, _cur_file_num); + _file = fopen(tempbuf, "w"); + } else { + _file = fopen(_file_name, "w"); + } + if (_file != NULL) { + _need_close = true; + dump_loggc_header(); + } else { + warning("Cannot open file %s due to %s\n", _file_name, strerror(errno)); + _need_close = false; + } } -rotatingFileStream::rotatingFileStream(const char* file_name, const char* opentype) { - _cur_file_num = 0; - _bytes_written = 0L; - _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10, mtInternal); - jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num); - _file = fopen(_file_name, opentype); - _need_close = true; -} - -void rotatingFileStream::write(const char* s, size_t len) { +void gcLogFileStream::write(const char* s, size_t len) { if (_file != NULL) { size_t count = fwrite(s, 1, len, _file); _bytes_written += count; @@ -466,7 +659,12 @@ // write to gc log file at safepoint. If in future, changes made for mutator threads or // concurrent GC threads to run parallel with VMThread at safepoint, write and rotate_log // must be synchronized. -void rotatingFileStream::rotate_log() { +void gcLogFileStream::rotate_log() { + char time_msg[FILENAMEBUFLEN]; + char time_str[EXTRACHARLEN]; + char current_file_name[FILENAMEBUFLEN]; + char renamed_file_name[FILENAMEBUFLEN]; + if (_bytes_written < (jlong)GCLogFileSize) { return; } @@ -481,27 +679,89 @@ // rotate in same file rewind(); _bytes_written = 0L; + jio_snprintf(time_msg, sizeof(time_msg), "File %s rotated at %s\n", + _file_name, os::local_time_string((char *)time_str, sizeof(time_str))); + write(time_msg, strlen(time_msg)); + dump_loggc_header(); return; } - // rotate file in names file.0, file.1, file.2, ..., file. - // close current file, rotate to next file +#if defined(_WINDOWS) +#ifndef F_OK +#define F_OK 0 +#endif +#endif // _WINDOWS + + // rotate file in names extended_filename.0, extended_filename.1, ..., + // extended_filename.. Current rotation file name will + // have a form of extended_filename..current where i is the current rotation + // file number. After it reaches max file size, the file will be saved and renamed + // with .current removed from its tail. + size_t filename_len = strlen(_file_name); if (_file != NULL) { - _cur_file_num ++; - if (_cur_file_num >= NumberOfGCLogFiles) _cur_file_num = 0; - jio_snprintf(_file_name, strlen(Arguments::gc_log_filename()) + 10, "%s.%d", - Arguments::gc_log_filename(), _cur_file_num); + jio_snprintf(renamed_file_name, filename_len + EXTRACHARLEN, "%s.%d", + _file_name, _cur_file_num); + jio_snprintf(current_file_name, filename_len + EXTRACHARLEN, "%s.%d" CURRENTAPPX, + _file_name, _cur_file_num); + jio_snprintf(time_msg, sizeof(time_msg), "%s GC log file has reached the" + " maximum size. Saved as %s\n", + os::local_time_string((char *)time_str, sizeof(time_str)), + renamed_file_name); + write(time_msg, strlen(time_msg)); + fclose(_file); _file = NULL; + + bool can_rename = true; + if (access(current_file_name, F_OK) != 0) { + // current file does not exist? + warning("No source file exists, cannot rename\n"); + can_rename = false; + } + if (can_rename) { + if (access(renamed_file_name, F_OK) == 0) { + if (remove(renamed_file_name) != 0) { + warning("Could not delete existing file %s\n", renamed_file_name); + can_rename = false; + } + } else { + // file does not exist, ok to rename + } + } + if (can_rename && rename(current_file_name, renamed_file_name) != 0) { + warning("Could not rename %s to %s\n", _file_name, renamed_file_name); + } } - _file = fopen(_file_name, "w"); + + _cur_file_num++; + if (_cur_file_num > NumberOfGCLogFiles - 1) _cur_file_num = 0; + jio_snprintf(current_file_name, filename_len + EXTRACHARLEN, "%s.%d" CURRENTAPPX, + _file_name, _cur_file_num); + _file = fopen(current_file_name, "w"); + if (_file != NULL) { _bytes_written = 0L; _need_close = true; + // reuse current_file_name for time_msg + jio_snprintf(current_file_name, filename_len + EXTRACHARLEN, + "%s.%d", _file_name, _cur_file_num); + jio_snprintf(time_msg, sizeof(time_msg), "%s GC log file created %s\n", + os::local_time_string((char *)time_str, sizeof(time_str)), + current_file_name); + write(time_msg, strlen(time_msg)); + dump_loggc_header(); + // remove the existing file + if (access(current_file_name, F_OK) == 0) { + if (remove(current_file_name) != 0) { + warning("Could not delete existing file %s\n", current_file_name); + } + } } else { - tty->print_cr("failed to open rotation log file %s due to %s\n", + warning("failed to open rotation log file %s due to %s\n" + "Turned off GC log file rotation\n", _file_name, strerror(errno)); _need_close = false; + FLAG_SET_DEFAULT(UseGCLogFileRotation, false); } } @@ -530,69 +790,9 @@ return _log_file != NULL; } -static const char* make_log_name(const char* log_name, const char* force_directory) { - const char* basename = log_name; - char file_sep = os::file_separator()[0]; - const char* cp; - for (cp = log_name; *cp != '\0'; cp++) { - if (*cp == '/' || *cp == file_sep) { - basename = cp+1; - } - } - const char* nametail = log_name; - - // Compute buffer length - size_t buffer_length; - if (force_directory != NULL) { - buffer_length = strlen(force_directory) + strlen(os::file_separator()) + - strlen(basename) + 1; - } else { - buffer_length = strlen(log_name) + 1; - } - - const char* star = strchr(basename, '*'); - int star_pos = (star == NULL) ? -1 : (star - nametail); - int skip = 1; - if (star == NULL) { - // Try %p - star = strstr(basename, "%p"); - if (star != NULL) { - skip = 2; - } - } - star_pos = (star == NULL) ? -1 : (star - nametail); - - char pid[32]; - if (star_pos >= 0) { - jio_snprintf(pid, sizeof(pid), "%u", os::current_process_id()); - buffer_length += strlen(pid); - } - - // Create big enough buffer. - char *buf = NEW_C_HEAP_ARRAY(char, buffer_length, mtInternal); - - strcpy(buf, ""); - if (force_directory != NULL) { - strcat(buf, force_directory); - strcat(buf, os::file_separator()); - nametail = basename; // completely skip directory prefix - } - - if (star_pos >= 0) { - // convert foo*bar.log or foo%pbar.log to foo123bar.log - int buf_pos = (int) strlen(buf); - strncpy(&buf[buf_pos], nametail, star_pos); - strcpy(&buf[buf_pos + star_pos], pid); - nametail += star_pos + skip; // skip prefix and pid format - } - - strcat(buf, nametail); // append rest of name, or all of name - return buf; -} - void defaultStream::init_log() { // %%% Need a MutexLocker? - const char* log_name = LogFile != NULL ? LogFile : "hotspot.log"; + const char* log_name = LogFile != NULL ? LogFile : "hotspot_pid%p.log"; const char* try_name = make_log_name(log_name, NULL); fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name); if (!file->is_open()) { @@ -603,14 +803,15 @@ // Note: This feature is for maintainer use only. No need for L10N. jio_print(warnbuf); FREE_C_HEAP_ARRAY(char, try_name, mtInternal); - try_name = make_log_name("hs_pid%p.log", os::get_temp_directory()); + try_name = make_log_name(log_name, os::get_temp_directory()); jio_snprintf(warnbuf, sizeof(warnbuf), "Warning: Forcing option -XX:LogFile=%s\n", try_name); jio_print(warnbuf); delete file; file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name); - FREE_C_HEAP_ARRAY(char, try_name, mtInternal); } + FREE_C_HEAP_ARRAY(char, try_name, mtInternal); + if (file->is_open()) { _log_file = file; xmlStream* xs = new(ResourceObj::C_HEAP, mtInternal) xmlStream(file); @@ -877,11 +1078,8 @@ gclog_or_tty = tty; // default to tty if (Arguments::gc_log_filename() != NULL) { - fileStream * gclog = UseGCLogFileRotation ? - new(ResourceObj::C_HEAP, mtInternal) - rotatingFileStream(Arguments::gc_log_filename()) : - new(ResourceObj::C_HEAP, mtInternal) - fileStream(Arguments::gc_log_filename()); + fileStream * gclog = new(ResourceObj::C_HEAP, mtInternal) + gcLogFileStream(Arguments::gc_log_filename()); if (gclog->is_open()) { // now we update the time stamp of the GC log to be synced up // with tty. diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/utilities/ostream.hpp --- a/src/share/vm/utilities/ostream.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/utilities/ostream.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -231,20 +231,24 @@ void flush() {}; }; -class rotatingFileStream : public fileStream { +class gcLogFileStream : public fileStream { protected: - char* _file_name; + const char* _file_name; jlong _bytes_written; - uintx _cur_file_num; // current logfile rotation number, from 0 to MaxGCLogFileNumbers-1 + uintx _cur_file_num; // current logfile rotation number, from 0 to NumberOfGCLogFiles-1 public: - rotatingFileStream(const char* file_name); - rotatingFileStream(const char* file_name, const char* opentype); - rotatingFileStream(FILE* file) : fileStream(file) {} - ~rotatingFileStream(); + gcLogFileStream(const char* file_name); + ~gcLogFileStream(); virtual void write(const char* c, size_t len); virtual void rotate_log(); + void dump_loggc_header(); }; +#ifndef PRODUCT +// unit test for checking -Xloggc: parsing result +void test_loggc_filename(); +#endif + void ostream_init(); void ostream_init_log(); void ostream_exit(); diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/utilities/taskqueue.hpp --- a/src/share/vm/utilities/taskqueue.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/utilities/taskqueue.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -132,6 +132,8 @@ } #endif // TASKQUEUE_STATS +// TaskQueueSuper collects functionality common to all GenericTaskQueue instances. + template class TaskQueueSuper: public CHeapObj { protected: @@ -249,7 +251,36 @@ TASKQUEUE_STATS_ONLY(TaskQueueStats stats;) }; - +// +// GenericTaskQueue implements an ABP, Aurora-Blumofe-Plaxton, double- +// ended-queue (deque), intended for use in work stealing. Queue operations +// are non-blocking. +// +// A queue owner thread performs push() and pop_local() operations on one end +// of the queue, while other threads may steal work using the pop_global() +// method. +// +// The main difference to the original algorithm is that this +// implementation allows wrap-around at the end of its allocated +// storage, which is an array. +// +// The original paper is: +// +// Arora, N. S., Blumofe, R. D., and Plaxton, C. G. +// Thread scheduling for multiprogrammed multiprocessors. +// Theory of Computing Systems 34, 2 (2001), 115-144. +// +// The following paper provides an correctness proof and an +// implementation for weakly ordered memory models including (pseudo-) +// code containing memory barriers for a Chase-Lev deque. Chase-Lev is +// similar to ABP, with the main difference that it allows resizing of the +// underlying storage: +// +// Le, N. M., Pop, A., Cohen A., and Nardell, F. Z. +// Correct and efficient work-stealing for weak memory models +// Proceedings of the 18th ACM SIGPLAN symposium on Principles and +// practice of parallel programming (PPoPP 2013), 69-80 +// template class GenericTaskQueue: public TaskQueueSuper { @@ -291,11 +322,11 @@ // Attempts to claim a task from the "local" end of the queue (the most // recently pushed). If successful, returns true and sets t to the task; // otherwise, returns false (the queue is empty). - inline bool pop_local(E& t); + inline bool pop_local(volatile E& t); // Like pop_local(), but uses the "global" end of the queue (the least // recently pushed). - bool pop_global(E& t); + bool pop_global(volatile E& t); // Delete any resource associated with the queue. ~GenericTaskQueue(); @@ -393,7 +424,7 @@ } template -bool GenericTaskQueue::pop_global(E& t) { +bool GenericTaskQueue::pop_global(volatile E& t) { Age oldAge = _age.get(); // Architectures with weak memory model require a barrier here // to guarantee that bottom is not older than age, @@ -670,7 +701,7 @@ } template inline bool -GenericTaskQueue::pop_local(E& t) { +GenericTaskQueue::pop_local(volatile E& t) { uint localBot = _bottom; // This value cannot be N-1. That can only occur as a result of // the assignment to bottom in this method. If it does, this method @@ -768,7 +799,7 @@ } volatile ObjArrayTask& operator =(const volatile ObjArrayTask& t) volatile { - _obj = t._obj; + (void)const_cast(_obj = t._obj); _index = t._index; return *this; } diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/utilities/vmError.cpp --- a/src/share/vm/utilities/vmError.cpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/utilities/vmError.cpp Fri Oct 11 10:38:03 2013 +0200 @@ -574,6 +574,10 @@ STEP(120, "(printing native stack)" ) if (_verbose) { + if (os::platform_print_native_stack(st, _context, buf, sizeof(buf))) { + // We have printed the native stack in platform-specific code + // Windows/x64 needs special handling. + } else { frame fr = _context ? os::fetch_frame_from_context(_context) : os::current_frame(); @@ -586,6 +590,13 @@ while (count++ < StackPrintLimit) { fr.print_on_error(st, buf, sizeof(buf)); st->cr(); + // Compiled code may use EBP register on x86 so it looks like + // non-walkable C frame. Use frame.sender() for java frames. + if (_thread && _thread->is_Java_thread() && fr.is_java_frame()) { + RegisterMap map((JavaThread*)_thread, false); // No update + fr = fr.sender(&map); + continue; + } if (os::is_first_C_frame(&fr)) break; fr = os::get_sender_for_C_frame(&fr); } @@ -597,6 +608,7 @@ st->cr(); } } + } STEP(130, "(printing Java stack)" ) diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/utilities/vmError.hpp --- a/src/share/vm/utilities/vmError.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/utilities/vmError.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -136,6 +136,10 @@ // check to see if fatal error reporting is in progress static bool fatal_error_in_progress() { return first_error != NULL; } + + static jlong get_first_error_tid() { + return first_error_tid; + } }; #endif // SHARE_VM_UTILITIES_VMERROR_HPP diff -r ccb4f2af2319 -r cefad50507d8 src/share/vm/utilities/yieldingWorkgroup.hpp --- a/src/share/vm/utilities/yieldingWorkgroup.hpp Thu Oct 10 18:26:22 2013 +0200 +++ b/src/share/vm/utilities/yieldingWorkgroup.hpp Fri Oct 11 10:38:03 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,10 +26,7 @@ #define SHARE_VM_UTILITIES_YIELDINGWORKGROUP_HPP #include "utilities/macros.hpp" -#if INCLUDE_ALL_GCS #include "utilities/workgroup.hpp" -#endif // INCLUDE_ALL_GCS - // Forward declarations class YieldingFlexibleWorkGang; diff -r ccb4f2af2319 -r cefad50507d8 test/TEST.ROOT --- a/test/TEST.ROOT Thu Oct 10 18:26:22 2013 +0200 +++ b/test/TEST.ROOT Fri Oct 11 10:38:03 2013 +0200 @@ -25,7 +25,8 @@ # This file identifies the root of the test-suite hierarchy. # It also contains test-suite configuration information. -# DO NOT EDIT without first contacting hotspot-regtest@sun.com # The list of keywords supported in this test suite keys=cte_test jcmd nmt regression gc + +groups=TEST.groups [closed/TEST.groups] diff -r ccb4f2af2319 -r cefad50507d8 test/TEST.groups --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/TEST.groups Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,205 @@ +# +# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# + +# Profile-based Test Group Definitions +# +# These groups define the tests that cover the different possible runtimes: +# - compact1, compact2, compact3, full JRE, JDK +# +# In addition they support testing of the minimal VM on compact1 and compact2. +# Essentially this defines groups based around the specified API's and VM +# services available in the runtime. +# +# The groups are defined hierarchically in two forms: +# - The need_xxx groups list all the tests that have a dependency on +# a specific profile. This is either because it tests a feature in +# that profile, or the test infrastructure uses a feature in that +# profile. +# - The primary groups are defined in terms of the other primary groups +# combined with the needs_xxx groups (including and excluding them as +# appropriate). For example the jre can run all tests from compact3, plus +# those from needs_jre, but excluding those from need_jdk. +# +# The bottom group defines all the actual tests to be considered, simply +# by listing the top-level test directories. +# +# To use a group simply list it on the jtreg command line eg: +# jtreg :jdk +# runs all tests. While +# jtreg :compact2 +# runs those tests that only require compact1 and compact2 API's. +# + +# Full JDK can run all tests +# +jdk = \ + :jre \ + :needs_jdk + +# Tests that require a full JDK to execute. Either they test a feature +# only in the JDK or they use tools that are only in the JDK. The latter +# can be resolved in some cases by using tools from the compile-jdk. +# +needs_jdk = \ + gc/TestG1ZeroPGCTJcmdThreadPrint.java \ + gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java \ + gc/metaspace/TestMetaspacePerfCounters.java \ + runtime/6819213/TestBootNativeLibraryPath.java \ + runtime/6878713/Test6878713.sh \ + runtime/6925573/SortMethodsTest.java \ + runtime/7107135/Test7107135.sh \ + runtime/7158988/FieldMonitor.java \ + runtime/7194254/Test7194254.java \ + runtime/jsig/Test8017498.sh \ + runtime/Metaspace/FragmentMetaspace.java \ + runtime/NMT/BaselineWithParameter.java \ + runtime/NMT/JcmdScale.java \ + runtime/NMT/JcmdWithNMTDisabled.java \ + runtime/NMT/MallocTestType.java \ + runtime/NMT/ReleaseCommittedMemory.java \ + runtime/NMT/ShutdownTwice.java \ + runtime/NMT/SummaryAfterShutdown.java \ + runtime/NMT/SummarySanityCheck.java \ + runtime/NMT/ThreadedMallocTestType.java \ + runtime/NMT/ThreadedVirtualAllocTestType.java \ + runtime/NMT/VirtualAllocTestType.java \ + runtime/RedefineObject/TestRedefineObject.java \ + runtime/XCheckJniJsig/XCheckJSig.java \ + serviceability/attach/AttachWithStalePidFile.java + +# JRE adds further tests to compact3 +# +jre = \ + :compact3 \ + :needs_jre \ + -:needs_jdk + +# Tests that require the full JRE +# +needs_jre = \ + compiler/6852078/Test6852078.java \ + compiler/7047069/Test7047069.java \ + runtime/6294277/SourceDebugExtension.java + +# Compact 3 adds further tests to compact2 +# +compact3 = \ + :compact2 \ + :needs_compact3 \ + -:needs_jre \ + -:needs_jdk + + +# Tests that require compact3 API's +# +needs_compact3 = \ + compiler/whitebox/DeoptimizeMethodTest.java \ + compiler/whitebox/SetForceInlineMethodTest.java \ + compiler/whitebox/SetDontInlineMethodTest.java \ + compiler/whitebox/DeoptimizeAllTest.java \ + compiler/whitebox/MakeMethodNotCompilableTest.java \ + compiler/whitebox/ClearMethodStateTest.java \ + compiler/whitebox/EnqueueMethodForCompilationTest.java \ + compiler/whitebox/IsMethodCompilableTest.java \ + gc/6581734/Test6581734.java \ + gc/7072527/TestFullGCCount.java \ + gc/7168848/HumongousAlloc.java \ + gc/arguments/TestG1HeapRegionSize.java \ + gc/metaspace/TestMetaspaceMemoryPool.java \ + runtime/InternalApi/ThreadCpuTimesDeadlock.java \ + serviceability/threads/TestFalseDeadLock.java + +# Compact 2 adds full VM tests +compact2 = \ + :compact2_minimal \ + :compact1 \ + :needs_full_vm_compact2 \ + -:needs_compact3 \ + -:needs_jre \ + -:needs_jdk + +# Tests that require compact2 API's and a full VM +# +needs_full_vm_compact2 = + +# Compact 1 adds full VM tests +# +compact1 = \ + :compact1_minimal \ + :needs_full_vm_compact1 \ + -:needs_compact2 \ + -:needs_full_vm_compact2 \ + -:needs_compact3 \ + -:needs_jre \ + -:needs_jdk + +# Tests that require compact1 API's and a full VM +# +needs_full_vm_compact1 = \ + runtime/NMT \ + gc/g1/TestRegionAlignment.java \ + gc/g1/TestShrinkToOneRegion.java \ + gc/metaspace/G1AddMetaspaceDependency.java \ + gc/startup_warnings/TestCMS.java \ + gc/startup_warnings/TestCMSIncrementalMode.java \ + gc/startup_warnings/TestCMSNoIncrementalMode.java \ + gc/startup_warnings/TestDefaultMaxRAMFraction.java \ + gc/startup_warnings/TestDefNewCMS.java \ + gc/startup_warnings/TestIncGC.java \ + gc/startup_warnings/TestParallelGC.java \ + gc/startup_warnings/TestParallelScavengeSerialOld.java \ + gc/startup_warnings/TestParNewCMS.java \ + gc/startup_warnings/TestParNewSerialOld.java \ + runtime/6929067/Test6929067.sh \ + runtime/SharedArchiveFile/SharedArchiveFile.java + +# Minimal VM on Compact 2 adds in some compact2 tests +# +compact2_minimal = \ + :compact1_minimal \ + :needs_compact2 \ + -:needs_full_vm_compact2 \ + -:needs_compact3 \ + -:needs_jre \ + -:needs_jdk + +# Tests that require compact2 API's +# +needs_compact2 = \ + compiler/6589834/Test_ia32.java + +# All tests that run on the most minimal configuration: Minimal VM on Compact 1 +compact1_minimal = \ + serviceability/ \ + compiler/ \ + testlibrary/ \ + testlibrary_tests/ \ + sanity/ \ + runtime/ \ + gc/ \ + -:needs_full_vm_compact1 \ + -:needs_full_vm_compact2 \ + -:needs_compact2 \ + -:needs_compact3 \ + -:needs_jre \ + -:needs_jdk diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/8004051/Test8004051.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/8004051/Test8004051.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 8004051 + * @bug 8005722 + * @summary assert(_oprs_len[mode] < maxNumberOfOperands) failed: array overflow + * + * @run main/othervm -Xcomp -client Test8004051 + */ + +public class Test8004051 { + public static void main(String[] argv) { + Object o = new Object(); + fillPrimRect(1.1f, 1.2f, 1.3f, 1.4f, + o, o, + 1.5f, 1.6f, 1.7f, 1.8f, + 2.0f, 2.1f, 2.2f, 2.3f, + 2.4f, 2.5f, 2.6f, 2.7f, + 100, 101); + System.out.println("Test passed, test did not assert"); + } + + static boolean fillPrimRect(float x, float y, float w, float h, + Object rectTex, Object wrapTex, + float bx, float by, float bw, float bh, + float f1, float f2, float f3, float f4, + float f5, float f6, float f7, float f8, + int i1, int i2 ) { + System.out.println(x + " " + y + " " + w + " " + h + " " + + bx + " " + by + " " + bw + " " + bh); + return true; + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/ciReplay/common.sh --- a/test/compiler/ciReplay/common.sh Thu Oct 10 18:26:22 2013 +0200 +++ b/test/compiler/ciReplay/common.sh Fri Oct 11 10:38:03 2013 +0200 @@ -89,7 +89,10 @@ # $1 - initial error_code common_tests() { positive_test $1 "COMMON :: THE SAME FLAGS" - positive_test `expr $1 + 1` "COMMON :: TIERED" -XX:+TieredCompilation + if [ $tiered_available -eq 1 ] + then + positive_test `expr $1 + 1` "COMMON :: TIERED" -XX:+TieredCompilation + fi } # $1 - initial error_code @@ -115,8 +118,11 @@ then negative_test $1 "SERVER :: NON-TIERED" -XX:-TieredCompilation \ -server - positive_test `expr $1 + 1` "SERVER :: TIERED" -XX:+TieredCompilation \ - -server + if [ $tiered_available -eq 1 ] + then + positive_test `expr $1 + 1` "SERVER :: TIERED" -XX:+TieredCompilation \ + -server + fi fi nontiered_tests `expr $1 + 2` $client_level } @@ -167,6 +173,9 @@ grep -c Client` server_available=`${JAVA} ${TESTVMOPTS} -server -Xinternalversion 2>&1 | \ grep -c Server` +tiered_available=`${JAVA} ${TESTVMOPTS} -XX:+TieredCompilation -XX:+PrintFlagsFinal -version | \ + grep TieredCompilation | \ + grep -c true` is_tiered=`${JAVA} ${TESTVMOPTS} -XX:+PrintFlagsFinal -version | \ grep TieredCompilation | \ grep -c true` @@ -177,6 +186,7 @@ echo "client_available=$client_available" echo "server_available=$server_available" +echo "tiered_available=$tiered_available" echo "is_tiered=$is_tiered" # crash vm in compiler thread with generation replay data and 'small' dump-file @@ -186,6 +196,11 @@ then # enable core dump ulimit -c unlimited + + if [ $VM_OS = "solaris" ] + then + coreadm -p core $$ + fi fi cmd="${JAVA} ${TESTVMOPTS} $@ \ diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/gcbarriers/G1CrashTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/gcbarriers/G1CrashTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 8023472 + * @summary C2 optimization breaks with G1 + * + * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -Dcount=100000 G1CrashTest + * + * @author pbiswal@palantir.com + */ + +public class G1CrashTest { + static Object[] set = new Object[11]; + + public static void main(String[] args) throws InterruptedException { + for (int j = 0; j < Integer.getInteger("count"); j++) { + Object key = new Object(); + insertKey(key); + if (j > set.length / 2) { + Object[] oldKeys = set; + set = new Object[2 * set.length - 1]; + for (Object o : oldKeys) { + if (o != null) + insertKey(o); + } + } + } + } + + static void insertKey(Object key) { + int hash = key.hashCode() & 0x7fffffff; + int index = hash % set.length; + Object cur = set[index]; + if (cur == null) + set[index] = key; + else + insertKeyRehash(key, index, hash, cur); + } + + static void insertKeyRehash(Object key, int index, int hash, Object cur) { + int loopIndex = index; + int firstRemoved = -1; + do { + if (cur == "dead") + firstRemoved = 1; + index--; + if (index < 0) + index += set.length; + cur = set[index]; + if (cur == null) { + if (firstRemoved != -1) + set[firstRemoved] = "dead"; + else + set[index] = key; + return; + } + } while (index != loopIndex); + if (firstRemoved != -1) + set[firstRemoved] = null; + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/intrinsics/mathexact/CondTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/CondTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8024924 + * @summary Test non constant addExact + * @compile CondTest.java Verify.java + * @run main CondTest + * + */ + +import java.lang.ArithmeticException; + +public class CondTest { + public static int result = 0; + + public static void main(String[] args) { + for (int i = 0; i < 50000; ++i) { + runTest(); + } + } + + public static void runTest() { + int i = 7; + while (java.lang.Math.addExact(i, result) < 89361) { + if ((java.lang.Math.addExact(i, i) & 1) == 1) { + i += 3; + } else if ((i & 5) == 4) { + i += 7; + } else if ((i & 0xf) == 6) { + i += 2; + } else { + i += 1; + } + result += 2; + } + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/intrinsics/mathexact/ConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/ConstantTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8024924 + * @summary Test constant addExact + * @compile ConstantTest.java Verify.java + * @run main ConstantTest + * + */ + +import java.lang.ArithmeticException; + +public class ConstantTest { + public static void main(String[] args) { + for (int i = 0; i < 50000; ++i) { + Verify.verify(5, 7); + Verify.verify(Integer.MAX_VALUE, 1); + Verify.verify(Integer.MIN_VALUE, -1); + Verify.verify(Integer.MAX_VALUE, -1); + Verify.verify(Integer.MIN_VALUE, 1); + Verify.verify(Integer.MAX_VALUE / 2, Integer.MAX_VALUE / 2); + Verify.verify(Integer.MAX_VALUE / 2, (Integer.MAX_VALUE / 2) + 3); + } + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/intrinsics/mathexact/LoadTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/LoadTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8024924 + * @summary Test non constant addExact + * @compile LoadTest.java Verify.java + * @run main LoadTest + * + */ + +import java.lang.ArithmeticException; + +public class LoadTest { + public static java.util.Random rnd = new java.util.Random(); + public static int[] values = new int[256]; + + public static void main(String[] args) { + for (int i = 0; i < values.length; ++i) { + values[i] = rnd.nextInt(); + } + + for (int i = 0; i < 50000; ++i) { + Verify.verify(values[i & 255], values[i & 255] - i); + Verify.verify(values[i & 255] + i, values[i & 255] - i); + Verify.verify(values[i & 255], values[i & 255]); + if ((i & 1) == 1 && i > 5) { + Verify.verify(values[i & 255] + i, values[i & 255] - i); + } else { + Verify.verify(values[i & 255] - i, values[i & 255] + i); + } + } + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/intrinsics/mathexact/LoopDependentTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/LoopDependentTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8024924 + * @summary Test non constant addExact + * @compile LoopDependentTest.java Verify.java + * @run main LoopDependentTest + * + */ + +import java.lang.ArithmeticException; + +public class LoopDependentTest { + public static java.util.Random rnd = new java.util.Random(); + + public static void main(String[] args) { + int rnd1 = rnd.nextInt(), rnd2 = rnd.nextInt(); + for (int i = 0; i < 50000; ++i) { + Verify.verify(rnd1 + i, rnd2 + i); + Verify.verify(rnd1 + i, rnd2 + (i & 0xff)); + Verify.verify(rnd1 - i, rnd2 - (i & 0xff)); + Verify.verify(rnd1 + i + 1, rnd2 + i + 2); + Verify.verify(rnd1 + i * 2, rnd2 + i); + } + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/intrinsics/mathexact/NonConstantTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/NonConstantTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8024924 + * @summary Test non constant addExact + * @compile NonConstantTest.java Verify.java + * @run main NonConstantTest + * + */ + +import java.lang.ArithmeticException; + +public class NonConstantTest { + public static java.util.Random rnd = new java.util.Random(); + + public static void main(String[] args) { + for (int i = 0; i < 50000; ++i) { + int rnd1 = rnd.nextInt(), rnd2 = rnd.nextInt(); + Verify.verify(rnd1, rnd2); + Verify.verify(rnd1, rnd2 + 1); + Verify.verify(rnd1 + 1, rnd2); + Verify.verify(rnd1 - 1, rnd2); + Verify.verify(rnd1, rnd2 - 1); + } + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/intrinsics/mathexact/Verify.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/Verify.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +public class Verify { + public static String throwWord(boolean threw) { + return (threw ? "threw" : "didn't throw"); + } + + public static void verify(int a, int b) { + boolean exception1 = false, exception2 = false; + int result1 = 0, result2 = 0; + try { + result1 = testIntrinsic(a, b); + } catch (ArithmeticException e) { + exception1 = true; + } + try { + result2 = testNonIntrinsic(a, b); + } catch (ArithmeticException e) { + exception2 = true; + } + + if (exception1 != exception2) { + throw new RuntimeException("Intrinsic version " + throwWord(exception1) + " exception, NonIntrinsic version " + throwWord(exception2) + " for: " + a + " + " + b); + } + if (result1 != result2) { + throw new RuntimeException("Intrinsic version returned: " + a + " while NonIntrinsic version returned: " + b); + } + } + + public static int testIntrinsic(int a, int b) { + return java.lang.Math.addExact(a, b); + } + + public static int testNonIntrinsic(int a, int b) { + return safeAddExact(a, b); + } + + // Copied java.lang.Math.addExact to avoid intrinsification + public static int safeAddExact(int x, int y) { + int r = x + y; + // HD 2-12 Overflow iff both arguments have the opposite sign of the result + if (((x ^ r) & (y ^ r)) < 0) { + throw new ArithmeticException("integer overflow"); + } + return r; + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/jsr292/ConcurrentClassLoadingTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/jsr292/ConcurrentClassLoadingTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8022595 + * @summary JSR292: deadlock during class loading of MethodHandles, MethodHandleImpl & MethodHandleNatives + * + * @run main/othervm ConcurrentClassLoadingTest + */ +import java.util.*; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; + +public class ConcurrentClassLoadingTest { + int numThreads = 0; + long seed = 0; + CyclicBarrier l; + Random rand; + + public static void main(String[] args) throws Throwable { + ConcurrentClassLoadingTest test = new ConcurrentClassLoadingTest(); + test.parseArgs(args); + test.run(); + } + + void parseArgs(String[] args) { + int i = 0; + while (i < args.length) { + String flag = args[i]; + switch(flag) { + case "-seed": + seed = Long.parseLong(args[++i]); + break; + case "-numThreads": + numThreads = Integer.parseInt(args[++i]); + break; + default: + throw new Error("Unknown flag: " + flag); + } + ++i; + } + } + + void init() { + if (numThreads == 0) { + numThreads = Runtime.getRuntime().availableProcessors(); + } + + if (seed == 0) { + seed = (new Random()).nextLong(); + } + rand = new Random(seed); + + l = new CyclicBarrier(numThreads + 1); + + System.out.printf("Threads: %d\n", numThreads); + System.out.printf("Seed: %d\n", seed); + } + + final List loaders = new ArrayList<>(); + + void prepare() { + List c = new ArrayList<>(Arrays.asList(classNames)); + + // Split classes between loading threads + int count = (classNames.length / numThreads) + 1; + for (int t = 0; t < numThreads; t++) { + List sel = new ArrayList<>(); + + System.out.printf("Thread #%d:\n", t); + for (int i = 0; i < count; i++) { + if (c.size() == 0) break; + + int k = rand.nextInt(c.size()); + String elem = c.remove(k); + sel.add(elem); + System.out.printf("\t%s\n", elem); + } + loaders.add(new Loader(sel)); + } + + // Print diagnostic info when the test hangs + Runtime.getRuntime().addShutdownHook(new Thread() { + public void run() { + boolean alive = false; + for (Loader l : loaders) { + if (!l.isAlive()) continue; + + if (!alive) { + System.out.println("Some threads are still alive:"); + alive = true; + } + + System.out.println(l.getName()); + for (StackTraceElement elem : l.getStackTrace()) { + System.out.println("\t"+elem.toString()); + } + } + } + }); + } + + public void run() throws Throwable { + init(); + prepare(); + + for (Loader loader : loaders) { + loader.start(); + } + + l.await(); + + for (Loader loader : loaders) { + loader.join(); + } + } + + class Loader extends Thread { + List classes; + + public Loader(List classes) { + this.classes = classes; + setDaemon(true); + } + + @Override + public void run() { + try { + l.await(); + + for (String name : classes) { + Class.forName(name).getName(); + } + } catch (ClassNotFoundException | BrokenBarrierException | InterruptedException e) { + throw new Error(e); + } + } + } + + final static String[] classNames = { + "java.lang.invoke.AbstractValidatingLambdaMetafactory", + "java.lang.invoke.BoundMethodHandle", + "java.lang.invoke.CallSite", + "java.lang.invoke.ConstantCallSite", + "java.lang.invoke.DirectMethodHandle", + "java.lang.invoke.InnerClassLambdaMetafactory", + "java.lang.invoke.InvokeDynamic", + "java.lang.invoke.InvokeGeneric", + "java.lang.invoke.InvokerBytecodeGenerator", + "java.lang.invoke.Invokers", + "java.lang.invoke.LambdaConversionException", + "java.lang.invoke.LambdaForm", + "java.lang.invoke.LambdaMetafactory", + "java.lang.invoke.MagicLambdaImpl", + "java.lang.invoke.MemberName", + "java.lang.invoke.MethodHandle", + "java.lang.invoke.MethodHandleImpl", + "java.lang.invoke.MethodHandleInfo", + "java.lang.invoke.MethodHandleNatives", + "java.lang.invoke.MethodHandleProxies", + "java.lang.invoke.MethodHandles", + "java.lang.invoke.MethodHandleStatics", + "java.lang.invoke.MethodType", + "java.lang.invoke.MethodTypeForm", + "java.lang.invoke.MutableCallSite", + "java.lang.invoke.SerializedLambda", + "java.lang.invoke.SimpleMethodHandle", + "java.lang.invoke.SwitchPoint", + "java.lang.invoke.TypeConvertingMethodAdapter", + "java.lang.invoke.VolatileCallSite", + "java.lang.invoke.WrongMethodTypeException" + }; +} diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/jsr292/methodHandleExceptions/ByteClassLoader.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/jsr292/methodHandleExceptions/ByteClassLoader.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * A minimal classloader for loading bytecodes that could not result from + * properly compiled Java. + * + * @author dr2chase + */ +public class ByteClassLoader extends ClassLoader { + /** + * (pre)load class name using classData for the definition. + * + * @param name + * @param classData + * @return + */ + public Class loadBytes(String name, byte[] classData) { + Class clazz = defineClass(name, classData, 0, classData.length); + resolveClass(clazz); + return clazz; + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/jsr292/methodHandleExceptions/C.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/jsr292/methodHandleExceptions/C.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * Test class -- implements I, which provides default for m, but this class + * declares it abstract which (should) hide the interface default, and throw + * an abstract method error if it is called (calling it requires bytecode hacking + * or inconsistent compilation). + */ +public abstract class C implements I { + public abstract int m(); +} diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/jsr292/methodHandleExceptions/I.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/jsr292/methodHandleExceptions/I.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +public interface I { + default public int m() { return 1; } +} diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/jsr292/methodHandleExceptions/TestAMEnotNPE.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/jsr292/methodHandleExceptions/TestAMEnotNPE.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +import java.lang.reflect.InvocationTargetException; +import jdk.internal.org.objectweb.asm.ClassWriter; +import jdk.internal.org.objectweb.asm.Handle; +import jdk.internal.org.objectweb.asm.MethodVisitor; +import jdk.internal.org.objectweb.asm.Opcodes; + +/** + * @test + * @bug 8025260 + * @summary Ensure that AbstractMethodError is thrown, not NullPointerException, through MethodHandles::jump_from_method_handle code path + * + * @compile -XDignore.symbol.file ByteClassLoader.java I.java C.java TestAMEnotNPE.java + * @run main/othervm TestAMEnotNPE + */ + +public class TestAMEnotNPE implements Opcodes { + + /** + * The bytes for D, a NOT abstract class extending abstract class C + * without supplying an implementation for abstract method m. + * There is a default method in the interface I, but it should lose to + * the abstract class. + + class D extends C { + D() { super(); } + // does not define m + } + + * @return + * @throws Exception + */ + public static byte[] bytesForD() throws Exception { + + ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES|ClassWriter.COMPUTE_MAXS); + MethodVisitor mv; + + cw.visit(V1_8, ACC_PUBLIC + ACC_SUPER, "D", null, "C", null); + + { + mv = cw.visitMethod(ACC_PUBLIC, "", "()V", null, null); + mv.visitCode(); + mv.visitVarInsn(ALOAD, 0); + mv.visitMethodInsn(INVOKESPECIAL, "C", "", "()V"); + mv.visitInsn(RETURN); + mv.visitMaxs(0, 0); + mv.visitEnd(); + } + cw.visitEnd(); + + return cw.toByteArray(); + } + + + /** + * The bytecodes for an invokeExact of a particular methodHandle, I.m, invoked on a D + + class T { + T() { super(); } // boring constructor + int test() { + MethodHandle mh = `I.m():int`; + D d = new D(); + return mh.invokeExact(d); // Should explode here, AbstractMethodError + } + } + + * @return + * @throws Exception + */ + public static byte[] bytesForT() throws Exception { + + ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES|ClassWriter.COMPUTE_MAXS); + MethodVisitor mv; + + cw.visit(V1_8, ACC_PUBLIC + ACC_SUPER, "T", null, "java/lang/Object", null); + { + mv = cw.visitMethod(ACC_PUBLIC, "", "()V", null, null); + mv.visitCode(); + mv.visitVarInsn(ALOAD, 0); + mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "", "()V"); + mv.visitInsn(RETURN); + mv.visitMaxs(0,0); + mv.visitEnd(); + } + { + mv = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, "test", "()I", null, null); + mv.visitCode(); + mv.visitLdcInsn(new Handle(Opcodes.H_INVOKEINTERFACE, "I", "m", "()I")); + mv.visitTypeInsn(NEW, "D"); + mv.visitInsn(DUP); + mv.visitMethodInsn(INVOKESPECIAL, "D", "", "()V"); + mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/invoke/MethodHandle", "invokeExact", "(LI;)I"); + mv.visitInsn(IRETURN); + mv.visitMaxs(0,0); + mv.visitEnd(); + } + cw.visitEnd(); + return cw.toByteArray(); + } + + public static void main(String args[] ) throws Throwable { + ByteClassLoader bcl = new ByteClassLoader(); + Class d = bcl.loadBytes("D", bytesForD()); + Class t = bcl.loadBytes("T", bytesForT()); + try { + Object result = t.getMethod("test").invoke(null); + System.out.println("Expected AbstractMethodError wrapped in InvocationTargetException, saw no exception"); + throw new Error("Missing expected exception"); + } catch (InvocationTargetException e) { + Throwable th = e.getCause(); + if (th instanceof AbstractMethodError) { + th.printStackTrace(System.out); + System.out.println("PASS, saw expected exception (AbstractMethodError, wrapped in InvocationTargetException)."); + } else { + System.out.println("Expected AbstractMethodError wrapped in InvocationTargetException, saw " + th); + throw th; + } + } + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/print/PrintInlining.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/print/PrintInlining.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8022585 + * @summary VM crashes when ran with -XX:+PrintInlining + * @run main/othervm -Xcomp -XX:+PrintInlining PrintInlining + * + */ + +public class PrintInlining { + public static void main(String[] args) { + System.out.println("Passed"); + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/whitebox/ClearMethodStateTest.java --- a/test/compiler/whitebox/ClearMethodStateTest.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/compiler/whitebox/ClearMethodStateTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -23,6 +23,7 @@ /* * @test ClearMethodStateTest + * @bug 8006683 8007288 8022832 * @library /testlibrary /testlibrary/whitebox * @build ClearMethodStateTest * @run main ClassFileInstaller sun.hotspot.WhiteBox @@ -59,16 +60,19 @@ WHITE_BOX.clearMethodState(method); checkCompiled(); WHITE_BOX.clearMethodState(method); - WHITE_BOX.deoptimizeMethod(method); + deoptimize(); checkNotCompiled(); - + if (testCase.isOsr) { + // part test isn't applicable for OSR test case + return; + } if (!TIERED_COMPILATION) { WHITE_BOX.clearMethodState(method); compile(COMPILE_THRESHOLD); checkCompiled(); - WHITE_BOX.deoptimizeMethod(method); + deoptimize(); checkNotCompiled(); WHITE_BOX.clearMethodState(method); diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/whitebox/CompilerWhiteBoxTest.java --- a/test/compiler/whitebox/CompilerWhiteBoxTest.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/compiler/whitebox/CompilerWhiteBoxTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -44,8 +44,14 @@ protected static int COMP_LEVEL_ANY = -1; /** {@code CompLevel::CompLevel_simple} -- C1 */ protected static int COMP_LEVEL_SIMPLE = 1; + /** {@code CompLevel::CompLevel_limited_profile} -- C1, invocation & backedge counters */ + protected static int COMP_LEVEL_LIMITED_PROFILE = 2; + /** {@code CompLevel::CompLevel_full_profile} -- C1, invocation & backedge counters + mdo */ + protected static int COMP_LEVEL_FULL_PROFILE = 3; /** {@code CompLevel::CompLevel_full_optimization} -- C2 or Shark */ protected static int COMP_LEVEL_FULL_OPTIMIZATION = 4; + /** Maximal value for CompLeveL */ + protected static int COMP_LEVEL_MAX = COMP_LEVEL_FULL_OPTIMIZATION; /** Instance of WhiteBox */ protected static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); @@ -64,6 +70,24 @@ /** Flag for verbose output, true if {@code -Dverbose} specified */ protected static final boolean IS_VERBOSE = System.getProperty("verbose") != null; + /** count of invocation to triger compilation */ + protected static final int THRESHOLD; + /** count of invocation to triger OSR compilation */ + protected static final long BACKEDGE_THRESHOLD; + /** Value of {@code java.vm.info} (interpreted|mixed|comp mode) */ + protected static final String MODE + = System.getProperty("java.vm.info"); + + static { + if (TIERED_COMPILATION) { + THRESHOLD = 150000; + BACKEDGE_THRESHOLD = 0xFFFFFFFFL; + } else { + THRESHOLD = COMPILE_THRESHOLD; + BACKEDGE_THRESHOLD = COMPILE_THRESHOLD * Long.parseLong(getVMOption( + "OnStackReplacePercentage")); + } + } /** * Returns value of VM option. @@ -112,7 +136,7 @@ /** tested method */ protected final Executable method; - private final Callable callable; + protected final TestCase testCase; /** * Constructor. @@ -123,7 +147,7 @@ Objects.requireNonNull(testCase); System.out.println("TEST CASE:" + testCase.name()); method = testCase.executable; - callable = testCase.callable; + this.testCase = testCase; } /** @@ -169,13 +193,19 @@ if (WHITE_BOX.isMethodQueuedForCompilation(method)) { throw new RuntimeException(method + " must not be in queue"); } - if (WHITE_BOX.isMethodCompiled(method)) { + if (WHITE_BOX.isMethodCompiled(method, false)) { throw new RuntimeException(method + " must be not compiled"); } - if (WHITE_BOX.getMethodCompilationLevel(method) != 0) { + if (WHITE_BOX.getMethodCompilationLevel(method, false) != 0) { throw new RuntimeException(method + " comp_level must be == 0"); } - } + if (WHITE_BOX.isMethodCompiled(method, true)) { + throw new RuntimeException(method + " must be not osr_compiled"); + } + if (WHITE_BOX.getMethodCompilationLevel(method, true) != 0) { + throw new RuntimeException(method + " osr_comp_level must be == 0"); + } + } /** * Checks, that {@linkplain #method} is compiled. @@ -192,12 +222,44 @@ method, System.currentTimeMillis() - start); return; } - if (!WHITE_BOX.isMethodCompiled(method)) { - throw new RuntimeException(method + " must be compiled"); + if (!WHITE_BOX.isMethodCompiled(method, testCase.isOsr)) { + throw new RuntimeException(method + " must be " + + (testCase.isOsr ? "osr_" : "") + "compiled"); + } + if (WHITE_BOX.getMethodCompilationLevel(method, testCase.isOsr) == 0) { + throw new RuntimeException(method + + (testCase.isOsr ? " osr_" : " ") + + "comp_level must be != 0"); + } + } + + protected final void deoptimize() { + WHITE_BOX.deoptimizeMethod(method, testCase.isOsr); + if (testCase.isOsr) { + WHITE_BOX.deoptimizeMethod(method, false); } - if (WHITE_BOX.getMethodCompilationLevel(method) == 0) { - throw new RuntimeException(method + " comp_level must be != 0"); - } + } + + protected final int getCompLevel() { + return WHITE_BOX.getMethodCompilationLevel(method, testCase.isOsr); + } + + protected final boolean isCompilable() { + return WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_ANY, + testCase.isOsr); + } + + protected final boolean isCompilable(int compLevel) { + return WHITE_BOX.isMethodCompilable(method, compLevel, testCase.isOsr); + } + + protected final void makeNotCompilable() { + WHITE_BOX.makeMethodNotCompilable(method, COMP_LEVEL_ANY, + testCase.isOsr); + } + + protected final void makeNotCompilable(int compLevel) { + WHITE_BOX.makeMethodNotCompilable(method, compLevel, testCase.isOsr); } /** @@ -226,12 +288,18 @@ protected final void printInfo() { System.out.printf("%n%s:%n", method); System.out.printf("\tcompilable:\t%b%n", - WHITE_BOX.isMethodCompilable(method)); + WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_ANY, false)); System.out.printf("\tcompiled:\t%b%n", - WHITE_BOX.isMethodCompiled(method)); + WHITE_BOX.isMethodCompiled(method, false)); System.out.printf("\tcomp_level:\t%d%n", - WHITE_BOX.getMethodCompilationLevel(method)); - System.out.printf("\tin_queue:\t%b%n", + WHITE_BOX.getMethodCompilationLevel(method, false)); + System.out.printf("\tosr_compilable:\t%b%n", + WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_ANY, true)); + System.out.printf("\tosr_compiled:\t%b%n", + WHITE_BOX.isMethodCompiled(method, true)); + System.out.printf("\tosr_comp_level:\t%d%n", + WHITE_BOX.getMethodCompilationLevel(method, true)); + System.out.printf("\tin_queue:\t%b%n", WHITE_BOX.isMethodQueuedForCompilation(method)); System.out.printf("compile_queues_size:\t%d%n%n", WHITE_BOX.getCompileQueuesSize()); @@ -244,18 +312,22 @@ /** * Tries to trigger compilation of {@linkplain #method} by call - * {@linkplain #callable} enough times. + * {@linkplain #testCase.callable} enough times. * * @return accumulated result * @see #compile(int) */ protected final int compile() { - return compile(Math.max(COMPILE_THRESHOLD, 150000)); + if (testCase.isOsr) { + return compile(1); + } else { + return compile(THRESHOLD); + } } /** * Tries to trigger compilation of {@linkplain #method} by call - * {@linkplain #callable} specified times. + * {@linkplain #testCase.callable} specified times. * * @param count invocation count * @return accumulated result @@ -265,7 +337,7 @@ Integer tmp; for (int i = 0; i < count; ++i) { try { - tmp = callable.call(); + tmp = testCase.callable.call(); } catch (Exception e) { tmp = null; } @@ -283,23 +355,36 @@ */ enum TestCase { /** constructor test case */ - CONSTRUCTOR_TEST(Helper.CONSTRUCTOR, Helper.CONSTRUCTOR_CALLABLE), + CONSTRUCTOR_TEST(Helper.CONSTRUCTOR, Helper.CONSTRUCTOR_CALLABLE, false), /** method test case */ - METOD_TEST(Helper.METHOD, Helper.METHOD_CALLABLE), + METOD_TEST(Helper.METHOD, Helper.METHOD_CALLABLE, false), /** static method test case */ - STATIC_TEST(Helper.STATIC, Helper.STATIC_CALLABLE); + STATIC_TEST(Helper.STATIC, Helper.STATIC_CALLABLE, false), + + /** OSR constructor test case */ + OSR_CONSTRUCTOR_TEST(Helper.OSR_CONSTRUCTOR, + Helper.OSR_CONSTRUCTOR_CALLABLE, true), + /** OSR method test case */ + OSR_METOD_TEST(Helper.OSR_METHOD, Helper.OSR_METHOD_CALLABLE, true), + /** OSR static method test case */ + OSR_STATIC_TEST(Helper.OSR_STATIC, Helper.OSR_STATIC_CALLABLE, true); /** tested method */ final Executable executable; /** object to invoke {@linkplain #executable} */ final Callable callable; + /** flag for OSR test case */ + final boolean isOsr; - private TestCase(Executable executable, Callable callable) { + private TestCase(Executable executable, Callable callable, + boolean isOsr) { this.executable = executable; this.callable = callable; + this.isOsr = isOsr; } private static class Helper { + private static final Callable CONSTRUCTOR_CALLABLE = new Callable() { @Override @@ -326,9 +411,39 @@ } }; + private static final Callable OSR_CONSTRUCTOR_CALLABLE + = new Callable() { + @Override + public Integer call() throws Exception { + return new Helper(null).hashCode(); + } + }; + + private static final Callable OSR_METHOD_CALLABLE + = new Callable() { + private final Helper helper = new Helper(); + + @Override + public Integer call() throws Exception { + return helper.osrMethod(); + } + }; + + private static final Callable OSR_STATIC_CALLABLE + = new Callable() { + @Override + public Integer call() throws Exception { + return osrStaticMethod(); + } + }; + + private static final Constructor CONSTRUCTOR; + private static final Constructor OSR_CONSTRUCTOR; private static final Method METHOD; private static final Method STATIC; + private static final Method OSR_METHOD; + private static final Method OSR_STATIC; static { try { @@ -338,17 +453,26 @@ "exception on getting method Helper.(int)", e); } try { - METHOD = Helper.class.getDeclaredMethod("method"); + OSR_CONSTRUCTOR = Helper.class.getDeclaredConstructor( + Object.class); } catch (NoSuchMethodException | SecurityException e) { throw new RuntimeException( - "exception on getting method Helper.method()", e); + "exception on getting method Helper.(Object)", e); } + METHOD = getMethod("method"); + STATIC = getMethod("staticMethod"); + OSR_METHOD = getMethod("osrMethod"); + OSR_STATIC = getMethod("osrStaticMethod"); + } + + private static Method getMethod(String name) { try { - STATIC = Helper.class.getDeclaredMethod("staticMethod"); + return Helper.class.getDeclaredMethod(name); } catch (NoSuchMethodException | SecurityException e) { throw new RuntimeException( - "exception on getting method Helper.staticMethod()", e); + "exception on getting method Helper." + name, e); } + } private static int staticMethod() { @@ -359,12 +483,39 @@ return 42; } + private static int osrStaticMethod() { + int result = 0; + for (long i = 0; i < CompilerWhiteBoxTest.BACKEDGE_THRESHOLD; ++i) { + result += staticMethod(); + } + return result; + } + + private int osrMethod() { + int result = 0; + for (long i = 0; i < CompilerWhiteBoxTest.BACKEDGE_THRESHOLD; ++i) { + result += method(); + } + return result; + } + private final int x; + // for method and OSR method test case public Helper() { x = 0; } + // for OSR constructor test case + private Helper(Object o) { + int result = 0; + for (long i = 0; i < CompilerWhiteBoxTest.BACKEDGE_THRESHOLD; ++i) { + result += method(); + } + x = result; + } + + // for constructor test case private Helper(int x) { this.x = x; } diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/whitebox/DeoptimizeAllTest.java --- a/test/compiler/whitebox/DeoptimizeAllTest.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/compiler/whitebox/DeoptimizeAllTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -23,6 +23,7 @@ /* * @test DeoptimizeAllTest + * @bug 8006683 8007288 8022832 * @library /testlibrary /testlibrary/whitebox * @build DeoptimizeAllTest * @run main ClassFileInstaller sun.hotspot.WhiteBox @@ -52,6 +53,12 @@ */ @Override protected void test() throws Exception { + if (testCase.isOsr && CompilerWhiteBoxTest.MODE.startsWith( + "compiled ")) { + System.err.printf("Warning: %s is not applicable in %s%n", + testCase.name(), CompilerWhiteBoxTest.MODE); + return; + } compile(); checkCompiled(); WHITE_BOX.deoptimizeAll(); diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/whitebox/DeoptimizeMethodTest.java --- a/test/compiler/whitebox/DeoptimizeMethodTest.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/compiler/whitebox/DeoptimizeMethodTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -23,6 +23,7 @@ /* * @test DeoptimizeMethodTest + * @bug 8006683 8007288 8022832 * @library /testlibrary /testlibrary/whitebox * @build DeoptimizeMethodTest * @run main ClassFileInstaller sun.hotspot.WhiteBox @@ -52,9 +53,15 @@ */ @Override protected void test() throws Exception { + if (testCase.isOsr && CompilerWhiteBoxTest.MODE.startsWith( + "compiled ")) { + System.err.printf("Warning: %s is not applicable in %s%n", + testCase.name(), CompilerWhiteBoxTest.MODE); + return; + } compile(); checkCompiled(); - WHITE_BOX.deoptimizeMethod(method); + deoptimize(); checkNotCompiled(); } } diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/whitebox/EnqueueMethodForCompilationTest.java --- a/test/compiler/whitebox/EnqueueMethodForCompilationTest.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/compiler/whitebox/EnqueueMethodForCompilationTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -23,10 +23,11 @@ /* * @test EnqueueMethodForCompilationTest + * @bug 8006683 8007288 8022832 * @library /testlibrary /testlibrary/whitebox * @build EnqueueMethodForCompilationTest * @run main ClassFileInstaller sun.hotspot.WhiteBox - * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* EnqueueMethodForCompilationTest + * @run main/othervm/timeout=600 -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* EnqueueMethodForCompilationTest * @summary testing of WB::enqueueMethodForCompilation() * @author igor.ignatyev@oracle.com */ @@ -50,7 +51,7 @@ // method can not be compiled on level 'none' WHITE_BOX.enqueueMethodForCompilation(method, COMP_LEVEL_NONE); - if (WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_NONE)) { + if (isCompilable(COMP_LEVEL_NONE)) { throw new RuntimeException(method + " is compilable at level COMP_LEVEL_NONE"); } @@ -60,27 +61,27 @@ WHITE_BOX.enqueueMethodForCompilation(method, COMP_LEVEL_ANY); checkNotCompiled(); - WHITE_BOX.enqueueMethodForCompilation(method, 5); - if (!WHITE_BOX.isMethodCompilable(method, 5)) { - checkNotCompiled(); - compile(); - checkCompiled(); - } else { - checkCompiled(); - } - - int compLevel = WHITE_BOX.getMethodCompilationLevel(method); - WHITE_BOX.deoptimizeMethod(method); - checkNotCompiled(); - - WHITE_BOX.enqueueMethodForCompilation(method, compLevel); - checkCompiled(); - WHITE_BOX.deoptimizeMethod(method); + // not existing comp level + WHITE_BOX.enqueueMethodForCompilation(method, 42); checkNotCompiled(); compile(); checkCompiled(); - WHITE_BOX.deoptimizeMethod(method); + + int compLevel = getCompLevel(); + int bci = WHITE_BOX.getMethodEntryBci(method); + deoptimize(); + checkNotCompiled(); + WHITE_BOX.clearMethodState(method); + + WHITE_BOX.enqueueMethodForCompilation(method, compLevel, bci); + checkCompiled(); + deoptimize(); + checkNotCompiled(); + + compile(); + checkCompiled(); + deoptimize(); checkNotCompiled(); } } diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/whitebox/IsMethodCompilableTest.java --- a/test/compiler/whitebox/IsMethodCompilableTest.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/compiler/whitebox/IsMethodCompilableTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -23,11 +23,11 @@ /* * @test IsMethodCompilableTest - * @bug 8007270 + * @bug 8007270 8006683 8007288 8022832 * @library /testlibrary /testlibrary/whitebox * @build IsMethodCompilableTest * @run main ClassFileInstaller sun.hotspot.WhiteBox - * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* IsMethodCompilableTest + * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* IsMethodCompilableTest * @summary testing of WB::isMethodCompilable() * @author igor.ignatyev@oracle.com */ @@ -68,7 +68,13 @@ */ @Override protected void test() throws Exception { - if (!WHITE_BOX.isMethodCompilable(method)) { + if (testCase.isOsr && CompilerWhiteBoxTest.MODE.startsWith( + "compiled ")) { + System.err.printf("Warning: %s is not applicable in %s%n", + testCase.name(), CompilerWhiteBoxTest.MODE); + return; + } + if (!isCompilable()) { throw new RuntimeException(method + " must be compilable"); } System.out.println("PerMethodRecompilationCutoff = " @@ -83,7 +89,8 @@ for (long i = 0L, n = PER_METHOD_RECOMPILATION_CUTOFF - 1; i < n; ++i) { compileAndDeoptimize(); } - if (!WHITE_BOX.isMethodCompilable(method)) { + if (!testCase.isOsr && !isCompilable()) { + // in osr test case count of deopt maybe more than iterations throw new RuntimeException(method + " is not compilable after " + (PER_METHOD_RECOMPILATION_CUTOFF - 1) + " iterations"); } @@ -92,15 +99,16 @@ // deoptimize 'PerMethodRecompilationCutoff' + 1 times long i; for (i = 0L; i < PER_METHOD_RECOMPILATION_CUTOFF - && WHITE_BOX.isMethodCompilable(method); ++i) { + && isCompilable(); ++i) { compileAndDeoptimize(); } - if (i != PER_METHOD_RECOMPILATION_CUTOFF) { + if (!testCase.isOsr && i != PER_METHOD_RECOMPILATION_CUTOFF) { + // in osr test case count of deopt maybe more than iterations throw new RuntimeException(method + " is not compilable after " + i + " iterations, but must only after " + PER_METHOD_RECOMPILATION_CUTOFF); } - if (WHITE_BOX.isMethodCompilable(method)) { + if (isCompilable()) { throw new RuntimeException(method + " is still compilable after " + PER_METHOD_RECOMPILATION_CUTOFF + " iterations"); } @@ -109,7 +117,7 @@ // WB.clearMethodState() must reset no-compilable flags WHITE_BOX.clearMethodState(method); - if (!WHITE_BOX.isMethodCompilable(method)) { + if (!isCompilable()) { throw new RuntimeException(method + " is not compilable after clearMethodState()"); } @@ -120,6 +128,6 @@ private void compileAndDeoptimize() throws Exception { compile(); waitBackgroundCompilation(); - WHITE_BOX.deoptimizeMethod(method); + deoptimize(); } } diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/whitebox/MakeMethodNotCompilableTest.java --- a/test/compiler/whitebox/MakeMethodNotCompilableTest.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/compiler/whitebox/MakeMethodNotCompilableTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -23,16 +23,16 @@ /* * @test MakeMethodNotCompilableTest - * @bug 8012322 + * @bug 8012322 8006683 8007288 8022832 * @library /testlibrary /testlibrary/whitebox * @build MakeMethodNotCompilableTest * @run main ClassFileInstaller sun.hotspot.WhiteBox - * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* MakeMethodNotCompilableTest + * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* MakeMethodNotCompilableTest * @summary testing of WB::makeMethodNotCompilable() * @author igor.ignatyev@oracle.com */ public class MakeMethodNotCompilableTest extends CompilerWhiteBoxTest { - + private int bci; public static void main(String[] args) throws Exception { if (args.length == 0) { for (TestCase test : TestCase.values()) { @@ -62,26 +62,34 @@ */ @Override protected void test() throws Exception { + if (testCase.isOsr && CompilerWhiteBoxTest.MODE.startsWith( + "compiled ")) { + System.err.printf("Warning: %s is not applicable in %s%n", + testCase.name(), CompilerWhiteBoxTest.MODE); + return; + } checkNotCompiled(); - if (!WHITE_BOX.isMethodCompilable(method)) { + if (!isCompilable()) { throw new RuntimeException(method + " must be compilable"); } + bci = getBci(); + if (TIERED_COMPILATION) { final int tierLimit = TIERED_STOP_AT_LEVEL + 1; for (int testedTier = 1; testedTier < tierLimit; ++testedTier) { testTier(testedTier); } for (int testedTier = 1; testedTier < tierLimit; ++testedTier) { - WHITE_BOX.makeMethodNotCompilable(method, testedTier); - if (WHITE_BOX.isMethodCompilable(method, testedTier)) { + makeNotCompilable(testedTier); + if (isCompilable(testedTier)) { throw new RuntimeException(method + " must be not compilable at level" + testedTier); } - WHITE_BOX.enqueueMethodForCompilation(method, testedTier); + WHITE_BOX.enqueueMethodForCompilation(method, testedTier, bci); checkNotCompiled(); - if (!WHITE_BOX.isMethodCompilable(method)) { + if (!isCompilable()) { System.out.println(method + " is not compilable after level " + testedTier); } @@ -89,15 +97,20 @@ } else { compile(); checkCompiled(); - int compLevel = WHITE_BOX.getMethodCompilationLevel(method); - WHITE_BOX.deoptimizeMethod(method); - WHITE_BOX.makeMethodNotCompilable(method, compLevel); - if (WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_ANY)) { + int compLevel = getCompLevel(); + deoptimize(); + makeNotCompilable(compLevel); + if (isCompilable(COMP_LEVEL_ANY)) { throw new RuntimeException(method + " must be not compilable at CompLevel::CompLevel_any," + " after it is not compilable at " + compLevel); } + WHITE_BOX.clearMethodState(method); + if (!isCompilable()) { + throw new RuntimeException(method + + " is not compilable after clearMethodState()"); + } // nocompilable at opposite level must make no sense int oppositeLevel; @@ -106,16 +119,16 @@ } else { oppositeLevel = COMP_LEVEL_SIMPLE; } - WHITE_BOX.makeMethodNotCompilable(method, oppositeLevel); + makeNotCompilable(oppositeLevel); - if (!WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_ANY)) { + if (!isCompilable(COMP_LEVEL_ANY)) { throw new RuntimeException(method + " must be compilable at CompLevel::CompLevel_any," + " even it is not compilable at opposite level [" + compLevel + "]"); } - if (!WHITE_BOX.isMethodCompilable(method, compLevel)) { + if (!isCompilable(compLevel)) { throw new RuntimeException(method + " must be compilable at level " + compLevel + ", even it is not compilable at opposite level [" @@ -126,24 +139,24 @@ // clearing after tiered/non-tiered tests // WB.clearMethodState() must reset no-compilable flags WHITE_BOX.clearMethodState(method); - if (!WHITE_BOX.isMethodCompilable(method)) { + if (!isCompilable()) { throw new RuntimeException(method + " is not compilable after clearMethodState()"); } - WHITE_BOX.makeMethodNotCompilable(method); - if (WHITE_BOX.isMethodCompilable(method)) { + makeNotCompilable(); + if (isCompilable()) { throw new RuntimeException(method + " must be not compilable"); } compile(); checkNotCompiled(); - if (WHITE_BOX.isMethodCompilable(method)) { + if (isCompilable()) { throw new RuntimeException(method + " must be not compilable"); } // WB.clearMethodState() must reset no-compilable flags WHITE_BOX.clearMethodState(method); - if (!WHITE_BOX.isMethodCompilable(method)) { + if (!isCompilable()) { throw new RuntimeException(method + " is not compilable after clearMethodState()"); } @@ -153,24 +166,23 @@ // separately tests each tier private void testTier(int testedTier) { - if (!WHITE_BOX.isMethodCompilable(method, testedTier)) { + if (!isCompilable(testedTier)) { throw new RuntimeException(method + " is not compilable on start"); } - WHITE_BOX.makeMethodNotCompilable(method, testedTier); + makeNotCompilable(testedTier); // tests for all other tiers for (int anotherTier = 1, tierLimit = TIERED_STOP_AT_LEVEL + 1; anotherTier < tierLimit; ++anotherTier) { - boolean isCompilable = WHITE_BOX.isMethodCompilable(method, - anotherTier); + boolean isCompilable = isCompilable(anotherTier); if (sameCompile(testedTier, anotherTier)) { if (isCompilable) { throw new RuntimeException(method + " must be not compilable at level " + anotherTier + ", if it is not compilable at " + testedTier); } - WHITE_BOX.enqueueMethodForCompilation(method, anotherTier); + WHITE_BOX.enqueueMethodForCompilation(method, anotherTier, bci); checkNotCompiled(); } else { if (!isCompilable) { @@ -179,12 +191,12 @@ + ", even if it is not compilable at " + testedTier); } - WHITE_BOX.enqueueMethodForCompilation(method, anotherTier); + WHITE_BOX.enqueueMethodForCompilation(method, anotherTier, bci); checkCompiled(); - WHITE_BOX.deoptimizeMethod(method); + deoptimize(); } - if (!WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_ANY)) { + if (!isCompilable(COMP_LEVEL_ANY)) { throw new RuntimeException(method + " must be compilable at 'CompLevel::CompLevel_any'" + ", if it is not compilable only at " + testedTier); @@ -193,7 +205,7 @@ // clear state after test WHITE_BOX.clearMethodState(method); - if (!WHITE_BOX.isMethodCompilable(method, testedTier)) { + if (!isCompilable(testedTier)) { throw new RuntimeException(method + " is not compilable after clearMethodState()"); } @@ -211,4 +223,13 @@ } return false; } + + private int getBci() { + compile(); + checkCompiled(); + int result = WHITE_BOX.getMethodEntryBci(method); + deoptimize(); + WHITE_BOX.clearMethodState(method); + return result; + } } diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/whitebox/SetDontInlineMethodTest.java --- a/test/compiler/whitebox/SetDontInlineMethodTest.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/compiler/whitebox/SetDontInlineMethodTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -23,6 +23,7 @@ /* * @test SetDontInlineMethodTest + * @bug 8006683 8007288 8022832 * @library /testlibrary /testlibrary/whitebox * @build SetDontInlineMethodTest * @run main ClassFileInstaller sun.hotspot.WhiteBox diff -r ccb4f2af2319 -r cefad50507d8 test/compiler/whitebox/SetForceInlineMethodTest.java --- a/test/compiler/whitebox/SetForceInlineMethodTest.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/compiler/whitebox/SetForceInlineMethodTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -23,6 +23,7 @@ /* * @test SetForceInlineMethodTest + * @bug 8006683 8007288 8022832 * @library /testlibrary /testlibrary/whitebox * @build SetForceInlineMethodTest * @run main ClassFileInstaller sun.hotspot.WhiteBox diff -r ccb4f2af2319 -r cefad50507d8 test/gc/TestObjectAlignment.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/TestObjectAlignment.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestObjectAlignment + * @key gc + * @bug 8021823 + * @summary G1: Concurrent marking crashes with -XX:ObjectAlignmentInBytes>=32 in 64bit VMs + * @library /testlibrary + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=8 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=16 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=32 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=64 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=128 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=256 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=8 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=16 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=32 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=64 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=128 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=256 + */ + +import com.oracle.java.testlibrary.ProcessTools; +import com.oracle.java.testlibrary.OutputAnalyzer; + +public class TestObjectAlignment { + + public static byte[] garbage; + + private static boolean runsOn32bit() { + return System.getProperty("sun.arch.data.model").equals("32"); + } + + public static void main(String[] args) throws Exception { + if (runsOn32bit()) { + // 32 bit VMs do not allow setting ObjectAlignmentInBytes, so there is nothing to test. We still get called. + return; + } + for (int i = 0; i < 10; i++) { + garbage = new byte[1000]; + System.gc(); + } + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/gc/TestVerifyDuringStartup.java diff -r ccb4f2af2319 -r cefad50507d8 test/gc/arguments/TestAlignmentToUseLargePages.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/arguments/TestAlignmentToUseLargePages.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test TestAlignmentToUseLargePages + * @summary All parallel GC variants may use large pages without the requirement that the + * heap alignment is large page aligned. Other collectors also need to start up with odd sized heaps. + * @bug 8024396 + * @key gc + * @key regression + * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:-UseParallelOldGC -XX:+UseLargePages TestAlignmentToUseLargePages + * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:-UseParallelOldGC -XX:-UseLargePages TestAlignmentToUseLargePages + * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:+UseParallelOldGC -XX:+UseLargePages TestAlignmentToUseLargePages + * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:+UseParallelOldGC -XX:-UseLargePages TestAlignmentToUseLargePages + * @run main/othervm -Xms7M -Xmx9M -XX:+UseSerialGC -XX:+UseLargePages TestAlignmentToUseLargePages + * @run main/othervm -Xms7M -Xmx9M -XX:+UseSerialGC -XX:-UseLargePages TestAlignmentToUseLargePages + * @run main/othervm -Xms7M -Xmx9M -XX:+UseConcMarkSweepGC -XX:+UseLargePages TestAlignmentToUseLargePages + * @run main/othervm -Xms7M -Xmx9M -XX:+UseConcMarkSweepGC -XX:-UseLargePages TestAlignmentToUseLargePages + * @run main/othervm -Xms7M -Xmx9M -XX:+UseG1GC -XX:+UseLargePages TestAlignmentToUseLargePages + * @run main/othervm -Xms7M -Xmx9M -XX:+UseG1GC -XX:-UseLargePages TestAlignmentToUseLargePages + */ + +public class TestAlignmentToUseLargePages { + public static void main(String args[]) throws Exception { + // nothing to do + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/gc/arguments/TestCompressedClassFlags.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/arguments/TestCompressedClassFlags.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import com.oracle.java.testlibrary.*; + +/* + * @test + * @bug 8015107 + * @summary Tests that VM prints a warning when -XX:CompressedClassSpaceSize + * is used together with -XX:-UseCompressedClassPointers + * @library /testlibrary + */ +public class TestCompressedClassFlags { + public static void main(String[] args) throws Exception { + if (Platform.is64bit()) { + OutputAnalyzer output = runJava("-XX:CompressedClassSpaceSize=1g", + "-XX:-UseCompressedClassPointers", + "-version"); + output.shouldContain("warning"); + output.shouldNotContain("error"); + output.shouldHaveExitValue(0); + } + } + + private static OutputAnalyzer runJava(String ... args) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args); + return new OutputAnalyzer(pb.start()); + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/gc/arguments/TestUseCompressedOopsErgo.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/arguments/TestUseCompressedOopsErgo.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,50 @@ +/* +* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +* +* This code is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License version 2 only, as +* published by the Free Software Foundation. +* +* This code is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +* version 2 for more details (a copy is included in the LICENSE file that +* accompanied this code). +* +* You should have received a copy of the GNU General Public License version +* 2 along with this work; if not, write to the Free Software Foundation, +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +* +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +* or visit www.oracle.com if you need additional information or have any +* questions. +*/ + +/* + * @test TestUseCompressedOopsErgo + * @key gc + * @bug 8010722 + * @summary Tests ergonomics for UseCompressedOops. + * @library /testlibrary /testlibrary/whitebox + * @build TestUseCompressedOopsErgo TestUseCompressedOopsErgoTools + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseG1GC + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseParallelGC + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseParallelGC -XX:-UseParallelOldGC + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseConcMarkSweepGC + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseSerialGC + */ + +public class TestUseCompressedOopsErgo { + + public static void main(String args[]) throws Exception { + if (!TestUseCompressedOopsErgoTools.is64bitVM()) { + // this test is relevant for 64 bit VMs only + return; + } + final String[] gcFlags = args; + TestUseCompressedOopsErgoTools.checkCompressedOopsErgo(gcFlags); + } +} + diff -r ccb4f2af2319 -r cefad50507d8 test/gc/arguments/TestUseCompressedOopsErgoTools.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/arguments/TestUseCompressedOopsErgoTools.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,177 @@ +/* +* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +* +* This code is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License version 2 only, as +* published by the Free Software Foundation. +* +* This code is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +* version 2 for more details (a copy is included in the LICENSE file that +* accompanied this code). +* +* You should have received a copy of the GNU General Public License version +* 2 along with this work; if not, write to the Free Software Foundation, +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +* +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +* or visit www.oracle.com if you need additional information or have any +* questions. +*/ + +import sun.management.ManagementFactoryHelper; +import com.sun.management.HotSpotDiagnosticMXBean; +import com.sun.management.VMOption; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.ArrayList; +import java.util.Arrays; + +import com.oracle.java.testlibrary.*; +import sun.hotspot.WhiteBox; + +class DetermineMaxHeapForCompressedOops { + public static void main(String[] args) throws Exception { + WhiteBox wb = WhiteBox.getWhiteBox(); + System.out.print(wb.getCompressedOopsMaxHeapSize()); + } +} + +class TestUseCompressedOopsErgoTools { + + private static long getCompressedClassSpaceSize() { + HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean(); + + VMOption option = diagnostic.getVMOption("CompressedClassSpaceSize"); + return Long.parseLong(option.getValue()); + } + + + public static long getMaxHeapForCompressedOops(String[] vmargs) throws Exception { + OutputAnalyzer output = runWhiteBoxTest(vmargs, DetermineMaxHeapForCompressedOops.class.getName(), new String[] {}, false); + return Long.parseLong(output.getStdout()); + } + + public static boolean is64bitVM() { + String val = System.getProperty("sun.arch.data.model"); + if (val == null) { + throw new RuntimeException("Could not read sun.arch.data.model"); + } + if (val.equals("64")) { + return true; + } else if (val.equals("32")) { + return false; + } + throw new RuntimeException("Unexpected value " + val + " of sun.arch.data.model"); + } + + /** + * Executes a new VM process with the given class and parameters. + * @param vmargs Arguments to the VM to run + * @param classname Name of the class to run + * @param arguments Arguments to the class + * @param useTestDotJavaDotOpts Use test.java.opts as part of the VM argument string + * @return The OutputAnalyzer with the results for the invocation. + */ + public static OutputAnalyzer runWhiteBoxTest(String[] vmargs, String classname, String[] arguments, boolean useTestDotJavaDotOpts) throws Exception { + ArrayList finalargs = new ArrayList(); + + String[] whiteboxOpts = new String[] { + "-Xbootclasspath/a:.", + "-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI", + "-cp", System.getProperty("java.class.path"), + }; + + if (useTestDotJavaDotOpts) { + // System.getProperty("test.java.opts") is '' if no options is set, + // we need to skip such a result + String[] externalVMOpts = new String[0]; + if (System.getProperty("test.java.opts") != null && System.getProperty("test.java.opts").length() != 0) { + externalVMOpts = System.getProperty("test.java.opts").split(" "); + } + finalargs.addAll(Arrays.asList(externalVMOpts)); + } + + finalargs.addAll(Arrays.asList(vmargs)); + finalargs.addAll(Arrays.asList(whiteboxOpts)); + finalargs.add(classname); + finalargs.addAll(Arrays.asList(arguments)); + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(finalargs.toArray(new String[0])); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + return output; + } + + private static String[] join(String[] part1, String part2) { + ArrayList result = new ArrayList(); + result.addAll(Arrays.asList(part1)); + result.add(part2); + return result.toArray(new String[0]); + } + + public static void checkCompressedOopsErgo(String[] gcflags) throws Exception { + long maxHeapForCompressedOops = getMaxHeapForCompressedOops(gcflags); + + checkUseCompressedOops(gcflags, maxHeapForCompressedOops, true); + checkUseCompressedOops(gcflags, maxHeapForCompressedOops - 1, true); + checkUseCompressedOops(gcflags, maxHeapForCompressedOops + 1, false); + + // the use of HeapBaseMinAddress should not change the outcome + checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops, true); + checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops - 1, true); + checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops + 1, false); + + // use a different object alignment + maxHeapForCompressedOops = getMaxHeapForCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16")); + + checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops, true); + checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops - 1, true); + checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops + 1, false); + + // use a different CompressedClassSpaceSize + String compressedClassSpaceSizeArg = "-XX:CompressedClassSpaceSize=" + 2 * getCompressedClassSpaceSize(); + maxHeapForCompressedOops = getMaxHeapForCompressedOops(join(gcflags, compressedClassSpaceSizeArg)); + + checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops, true); + checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops - 1, true); + checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops + 1, false); + } + + private static void checkUseCompressedOops(String[] args, long heapsize, boolean expectUseCompressedOops) throws Exception { + ArrayList finalargs = new ArrayList(); + finalargs.addAll(Arrays.asList(args)); + finalargs.add("-Xmx" + heapsize); + finalargs.add("-XX:+PrintFlagsFinal"); + finalargs.add("-version"); + + String output = expectValid(finalargs.toArray(new String[0])); + + boolean actualUseCompressedOops = getFlagBoolValue(" UseCompressedOops", output); + + Asserts.assertEQ(expectUseCompressedOops, actualUseCompressedOops); + } + + private static boolean getFlagBoolValue(String flag, String where) { + Matcher m = Pattern.compile(flag + "\\s+:?= (true|false)").matcher(where); + if (!m.find()) { + throw new RuntimeException("Could not find value for flag " + flag + " in output string"); + } + return m.group(1).equals("true"); + } + + private static String expect(String[] flags, boolean hasWarning, boolean hasError, int errorcode) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(flags); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(errorcode); + return output.getStdout(); + } + + private static String expectValid(String[] flags) throws Exception { + return expect(flags, false, false, 0); + } +} + diff -r ccb4f2af2319 -r cefad50507d8 test/gc/g1/TestSummarizeRSetStats.java --- a/test/gc/g1/TestSummarizeRSetStats.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/gc/g1/TestSummarizeRSetStats.java Fri Oct 11 10:38:03 2013 +0200 @@ -25,140 +25,61 @@ * @test TestSummarizeRSetStats.java * @bug 8013895 * @library /testlibrary - * @build TestSummarizeRSetStats + * @build TestSummarizeRSetStatsTools TestSummarizeRSetStats * @summary Verify output of -XX:+G1SummarizeRSetStats * @run main TestSummarizeRSetStats * * Test the output of G1SummarizeRSetStats in conjunction with G1SummarizeRSetStatsPeriod. */ -import com.oracle.java.testlibrary.*; -import java.lang.Thread; -import java.util.ArrayList; -import java.util.Arrays; - -class RunSystemGCs { - // 4M size, both are directly allocated into the old gen - static Object[] largeObject1 = new Object[1024 * 1024]; - static Object[] largeObject2 = new Object[1024 * 1024]; - - static int[] temp; - - public static void main(String[] args) { - // create some cross-references between these objects - for (int i = 0; i < largeObject1.length; i++) { - largeObject1[i] = largeObject2; - } - - for (int i = 0; i < largeObject2.length; i++) { - largeObject2[i] = largeObject1; - } - - int numGCs = Integer.parseInt(args[0]); - - if (numGCs > 0) { - // try to force a minor collection: the young gen is 4M, the - // amount of data allocated below is roughly that (4*1024*1024 + - // some header data) - for (int i = 0; i < 1024 ; i++) { - temp = new int[1024]; - } - } - - for (int i = 0; i < numGCs - 1; i++) { - System.gc(); - } - } -} - public class TestSummarizeRSetStats { - public static String runTest(String[] additionalArgs, int numGCs) throws Exception { - ArrayList finalargs = new ArrayList(); - String[] defaultArgs = new String[] { - "-XX:+UseG1GC", - "-Xmn4m", - "-Xmx20m", - "-XX:InitiatingHeapOccupancyPercent=100", // we don't want the additional GCs due to initial marking - "-XX:+PrintGC", - "-XX:+UnlockDiagnosticVMOptions", - "-XX:G1HeapRegionSize=1M", - }; - - finalargs.addAll(Arrays.asList(defaultArgs)); - - if (additionalArgs != null) { - finalargs.addAll(Arrays.asList(additionalArgs)); - } - - finalargs.add(RunSystemGCs.class.getName()); - finalargs.add(String.valueOf(numGCs)); - - ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( - finalargs.toArray(new String[0])); - OutputAnalyzer output = new OutputAnalyzer(pb.start()); - - output.shouldHaveExitValue(0); - - String result = output.getStdout(); - return result; - } - - private static void expectStatistics(String result, int expectedCumulative, int expectedPeriodic) throws Exception { - int actualTotal = result.split("Concurrent RS processed").length - 1; - int actualCumulative = result.split("Cumulative RS summary").length - 1; - - if (expectedCumulative != actualCumulative) { - throw new Exception("Incorrect amount of RSet summaries at the end. Expected " + expectedCumulative + ", got " + actualCumulative); - } - - if (expectedPeriodic != (actualTotal - actualCumulative)) { - throw new Exception("Incorrect amount of per-period RSet summaries at the end. Expected " + expectedPeriodic + ", got " + (actualTotal - actualCumulative)); - } - } - public static void main(String[] args) throws Exception { String result; - // no RSet statistics output - result = runTest(null, 0); - expectStatistics(result, 0, 0); + if (!TestSummarizeRSetStatsTools.testingG1GC()) { + return; + } - // no RSet statistics output - result = runTest(null, 2); - expectStatistics(result, 0, 0); + // no remembered set summary output + result = TestSummarizeRSetStatsTools.runTest(null, 0); + TestSummarizeRSetStatsTools.expectRSetSummaries(result, 0, 0); - // no RSet statistics output - result = runTest(new String[] { "-XX:G1SummarizeRSetStatsPeriod=1" }, 3); - expectStatistics(result, 0, 0); + // no remembered set summary output + result = TestSummarizeRSetStatsTools.runTest(null, 2); + TestSummarizeRSetStatsTools.expectRSetSummaries(result, 0, 0); - // single RSet statistics output at the end - result = runTest(new String[] { "-XX:+G1SummarizeRSetStats" }, 0); - expectStatistics(result, 1, 0); + // no remembered set summary output + result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:G1SummarizeRSetStatsPeriod=1" }, 3); + TestSummarizeRSetStatsTools.expectRSetSummaries(result, 0, 0); - // single RSet statistics output at the end - result = runTest(new String[] { "-XX:+G1SummarizeRSetStats" }, 2); - expectStatistics(result, 1, 0); + // single remembered set summary output at the end + result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats" }, 0); + TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 0); - // single RSet statistics output - result = runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 0); - expectStatistics(result, 1, 0); + // single remembered set summary output at the end + result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats" }, 2); + TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 0); - // two times RSet statistics output - result = runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 1); - expectStatistics(result, 1, 1); + // single remembered set summary output + result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 0); + TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 0); + + // two times remembered set summary output + result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 1); + TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 2); - // four times RSet statistics output - result = runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 3); - expectStatistics(result, 1, 3); + // four times remembered set summary output + result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 3); + TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 6); - // three times RSet statistics output - result = runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=2" }, 3); - expectStatistics(result, 1, 2); + // three times remembered set summary output + result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=2" }, 3); + TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 4); - // single RSet statistics output - result = runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=100" }, 3); - expectStatistics(result, 1, 1); + // single remembered set summary output + result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=100" }, 3); + TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 2); } } diff -r ccb4f2af2319 -r cefad50507d8 test/gc/g1/TestSummarizeRSetStatsPerRegion.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/g1/TestSummarizeRSetStatsPerRegion.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestSummarizeRSetStatsPerRegion.java + * @bug 8014078 + * @library /testlibrary + * @build TestSummarizeRSetStatsTools TestSummarizeRSetStatsPerRegion + * @summary Verify output of -XX:+G1SummarizeRSetStats in regards to per-region type output + * @run main TestSummarizeRSetStatsPerRegion + */ + +import com.oracle.java.testlibrary.*; +import java.lang.Thread; +import java.util.ArrayList; +import java.util.Arrays; + +public class TestSummarizeRSetStatsPerRegion { + + public static void main(String[] args) throws Exception { + String result; + + if (!TestSummarizeRSetStatsTools.testingG1GC()) { + return; + } + + // single remembered set summary output at the end + result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats" }, 0); + TestSummarizeRSetStatsTools.expectPerRegionRSetSummaries(result, 1, 0); + + // two times remembered set summary output + result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 1); + TestSummarizeRSetStatsTools.expectPerRegionRSetSummaries(result, 1, 2); + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/gc/g1/TestSummarizeRSetStatsThreads.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/g1/TestSummarizeRSetStatsThreads.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestSummarizeRSetStatsThreads + * @bug 8025441 + * @summary Ensure that various values of worker threads/concurrent + * refinement threads do not crash the VM. + * @key gc + * @library /testlibrary + */ + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import com.oracle.java.testlibrary.ProcessTools; +import com.oracle.java.testlibrary.OutputAnalyzer; + +public class TestSummarizeRSetStatsThreads { + + private static void runTest(int refinementThreads, int workerThreads) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+G1SummarizeRSetStats", + "-XX:G1ConcRefinementThreads=" + refinementThreads, + "-XX:ParallelGCThreads=" + workerThreads, + "-version"); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + // check output to contain the string "Concurrent RS threads times (s)" followed by + // the correct number of values in the next line. + + // a zero in refinement thread numbers indicates that the value in ParallelGCThreads should be used. + // Additionally use at least one thread. + int expectedNumRefinementThreads = refinementThreads == 0 ? workerThreads : refinementThreads; + expectedNumRefinementThreads = Math.max(1, expectedNumRefinementThreads); + // create the pattern made up of n copies of a floating point number pattern + String numberPattern = String.format("%0" + expectedNumRefinementThreads + "d", 0) + .replace("0", "\\s+\\d+\\.\\d+"); + String pattern = "Concurrent RS threads times \\(s\\)$" + numberPattern + "$"; + Matcher m = Pattern.compile(pattern, Pattern.MULTILINE).matcher(output.getStdout()); + + if (!m.find()) { + throw new Exception("Could not find correct output for concurrent RS threads times in stdout," + + " should match the pattern \"" + pattern + "\", but stdout is \n" + output.getStdout()); + } + output.shouldHaveExitValue(0); + } + + public static void main(String[] args) throws Exception { + if (!TestSummarizeRSetStatsTools.testingG1GC()) { + return; + } + // different valid combinations of number of refinement and gc worker threads + runTest(0, 0); + runTest(0, 5); + runTest(5, 0); + runTest(10, 10); + runTest(1, 2); + runTest(4, 3); + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/gc/g1/TestSummarizeRSetStatsTools.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/g1/TestSummarizeRSetStatsTools.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * Common helpers for TestSummarizeRSetStats* tests + */ + +import sun.management.ManagementFactoryHelper; +import com.sun.management.HotSpotDiagnosticMXBean; +import com.sun.management.VMOption; + +import com.oracle.java.testlibrary.*; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.lang.Thread; +import java.util.ArrayList; +import java.util.Arrays; + +class VerifySummaryOutput { + // 4M size, both are directly allocated into the old gen + static Object[] largeObject1 = new Object[1024 * 1024]; + static Object[] largeObject2 = new Object[1024 * 1024]; + + static int[] temp; + + public static void main(String[] args) { + // create some cross-references between these objects + for (int i = 0; i < largeObject1.length; i++) { + largeObject1[i] = largeObject2; + } + + for (int i = 0; i < largeObject2.length; i++) { + largeObject2[i] = largeObject1; + } + + int numGCs = Integer.parseInt(args[0]); + + if (numGCs > 0) { + // try to force a minor collection: the young gen is 4M, the + // amount of data allocated below is roughly that (4*1024*1024 + + // some header data) + for (int i = 0; i < 1024 ; i++) { + temp = new int[1024]; + } + } + + for (int i = 0; i < numGCs - 1; i++) { + System.gc(); + } + } +} + +public class TestSummarizeRSetStatsTools { + + // the VM is currently run using G1GC, i.e. trying to test G1 functionality. + public static boolean testingG1GC() { + HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean(); + + VMOption option = diagnostic.getVMOption("UseG1GC"); + if (option.getValue().equals("false")) { + System.out.println("Skipping this test. It is only a G1 test."); + return false; + } + return true; + } + + public static String runTest(String[] additionalArgs, int numGCs) throws Exception { + ArrayList finalargs = new ArrayList(); + String[] defaultArgs = new String[] { + "-XX:+UseG1GC", + "-XX:+UseCompressedOops", + "-Xmn4m", + "-Xmx20m", + "-XX:InitiatingHeapOccupancyPercent=100", // we don't want the additional GCs due to initial marking + "-XX:+PrintGC", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:G1HeapRegionSize=1M", + }; + + finalargs.addAll(Arrays.asList(defaultArgs)); + + if (additionalArgs != null) { + finalargs.addAll(Arrays.asList(additionalArgs)); + } + + finalargs.add(VerifySummaryOutput.class.getName()); + finalargs.add(String.valueOf(numGCs)); + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + finalargs.toArray(new String[0])); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + output.shouldHaveExitValue(0); + + String result = output.getStdout(); + return result; + } + + private static void checkCounts(int expected, int actual, String which) throws Exception { + if (expected != actual) { + throw new Exception("RSet summaries mention " + which + " regions an incorrect number of times. Expected " + expected + ", got " + actual); + } + } + + public static void expectPerRegionRSetSummaries(String result, int expectedCumulative, int expectedPeriodic) throws Exception { + expectRSetSummaries(result, expectedCumulative, expectedPeriodic); + int actualYoung = result.split("Young regions").length - 1; + int actualHumonguous = result.split("Humonguous regions").length - 1; + int actualFree = result.split("Free regions").length - 1; + int actualOther = result.split("Old regions").length - 1; + + // the strings we check for above are printed four times per summary + int expectedPerRegionTypeInfo = (expectedCumulative + expectedPeriodic) * 4; + + checkCounts(expectedPerRegionTypeInfo, actualYoung, "Young"); + checkCounts(expectedPerRegionTypeInfo, actualHumonguous, "Humonguous"); + checkCounts(expectedPerRegionTypeInfo, actualFree, "Free"); + checkCounts(expectedPerRegionTypeInfo, actualOther, "Old"); + } + + public static void expectRSetSummaries(String result, int expectedCumulative, int expectedPeriodic) throws Exception { + int actualTotal = result.split("concurrent refinement").length - 1; + int actualCumulative = result.split("Cumulative RS summary").length - 1; + + if (expectedCumulative != actualCumulative) { + throw new Exception("Incorrect amount of RSet summaries at the end. Expected " + expectedCumulative + ", got " + actualCumulative); + } + + if (expectedPeriodic != (actualTotal - actualCumulative)) { + throw new Exception("Incorrect amount of per-period RSet summaries at the end. Expected " + expectedPeriodic + ", got " + (actualTotal - actualCumulative)); + } + } +} + diff -r ccb4f2af2319 -r cefad50507d8 test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java --- a/test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java Thu Oct 10 18:26:22 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * @test ClassMetaspaceSizeInJmapHeap - * @bug 8004924 - * @summary Checks that jmap -heap contains the flag ClassMetaspaceSize - * @library /testlibrary - * @run main/othervm -XX:ClassMetaspaceSize=50m ClassMetaspaceSizeInJmapHeap - */ - -import com.oracle.java.testlibrary.*; -import java.nio.file.*; -import java.io.File; -import java.nio.charset.Charset; -import java.util.List; - -public class ClassMetaspaceSizeInJmapHeap { - public static void main(String[] args) throws Exception { - String pid = Integer.toString(ProcessTools.getProcessId()); - - JDKToolLauncher jmap = JDKToolLauncher.create("jmap") - .addToolArg("-heap") - .addToolArg(pid); - ProcessBuilder pb = new ProcessBuilder(jmap.getCommand()); - - File out = new File("ClassMetaspaceSizeInJmapHeap.stdout.txt"); - pb.redirectOutput(out); - - File err = new File("ClassMetaspaceSizeInJmapHeap.stderr.txt"); - pb.redirectError(err); - - run(pb); - - OutputAnalyzer output = new OutputAnalyzer(read(out)); - output.shouldContain("ClassMetaspaceSize = 52428800 (50.0MB)"); - out.delete(); - } - - private static void run(ProcessBuilder pb) throws Exception { - Process p = pb.start(); - p.waitFor(); - int exitValue = p.exitValue(); - if (exitValue != 0) { - throw new Exception("jmap -heap exited with error code: " + exitValue); - } - } - - private static String read(File f) throws Exception { - Path p = f.toPath(); - List lines = Files.readAllLines(p, Charset.defaultCharset()); - - StringBuilder sb = new StringBuilder(); - for (String line : lines) { - sb.append(line).append('\n'); - } - return sb.toString(); - } -} diff -r ccb4f2af2319 -r cefad50507d8 test/gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test CompressedClassSpaceSizeInJmapHeap + * @bug 8004924 + * @summary Checks that jmap -heap contains the flag CompressedClassSpaceSize + * @library /testlibrary + * @run main/othervm -XX:CompressedClassSpaceSize=50m CompressedClassSpaceSizeInJmapHeap + */ + +import com.oracle.java.testlibrary.*; +import java.nio.file.*; +import java.io.File; +import java.nio.charset.Charset; +import java.util.List; + +public class CompressedClassSpaceSizeInJmapHeap { + public static void main(String[] args) throws Exception { + String pid = Integer.toString(ProcessTools.getProcessId()); + + JDKToolLauncher jmap = JDKToolLauncher.create("jmap") + .addToolArg("-heap") + .addToolArg(pid); + ProcessBuilder pb = new ProcessBuilder(jmap.getCommand()); + + File out = new File("CompressedClassSpaceSizeInJmapHeap.stdout.txt"); + pb.redirectOutput(out); + + File err = new File("CompressedClassSpaceSizeInJmapHeap.stderr.txt"); + pb.redirectError(err); + + run(pb); + + OutputAnalyzer output = new OutputAnalyzer(read(out)); + output.shouldContain("CompressedClassSpaceSize = 52428800 (50.0MB)"); + out.delete(); + } + + private static void run(ProcessBuilder pb) throws Exception { + Process p = pb.start(); + p.waitFor(); + int exitValue = p.exitValue(); + if (exitValue != 0) { + throw new Exception("jmap -heap exited with error code: " + exitValue); + } + } + + private static String read(File f) throws Exception { + Path p = f.toPath(); + List lines = Files.readAllLines(p, Charset.defaultCharset()); + + StringBuilder sb = new StringBuilder(); + for (String line : lines) { + sb.append(line).append('\n'); + } + return sb.toString(); + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/gc/metaspace/G1AddMetaspaceDependency.java --- a/test/gc/metaspace/G1AddMetaspaceDependency.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/gc/metaspace/G1AddMetaspaceDependency.java Fri Oct 11 10:38:03 2013 +0200 @@ -107,7 +107,6 @@ Loader f_loader = new Loader(b_name, b_bytes, a_name, a_loader); Loader g_loader = new Loader(b_name, b_bytes, a_name, a_loader); - byte[] b = new byte[20 * 2 << 20]; Class c; c = b_loader.loadClass(b_name); c = c_loader.loadClass(b_name); diff -r ccb4f2af2319 -r cefad50507d8 test/gc/metaspace/TestMetaspaceMemoryPool.java --- a/test/gc/metaspace/TestMetaspaceMemoryPool.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/gc/metaspace/TestMetaspaceMemoryPool.java Fri Oct 11 10:38:03 2013 +0200 @@ -22,55 +22,35 @@ */ import java.util.List; -import java.lang.management.ManagementFactory; -import java.lang.management.MemoryManagerMXBean; -import java.lang.management.MemoryPoolMXBean; -import java.lang.management.MemoryUsage; - -import java.lang.management.RuntimeMXBean; -import java.lang.management.ManagementFactory; +import java.lang.management.*; +import com.oracle.java.testlibrary.*; +import static com.oracle.java.testlibrary.Asserts.*; /* @test TestMetaspaceMemoryPool * @bug 8000754 * @summary Tests that a MemoryPoolMXBeans is created for metaspace and that a * MemoryManagerMXBean is created. + * @library /testlibrary * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops TestMetaspaceMemoryPool * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:MaxMetaspaceSize=60m TestMetaspaceMemoryPool - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers TestMetaspaceMemoryPool - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:ClassMetaspaceSize=60m TestMetaspaceMemoryPool + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers TestMetaspaceMemoryPool + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:CompressedClassSpaceSize=60m TestMetaspaceMemoryPool */ public class TestMetaspaceMemoryPool { public static void main(String[] args) { verifyThatMetaspaceMemoryManagerExists(); - verifyMemoryPool(getMemoryPool("Metaspace"), isFlagDefined("MaxMetaspaceSize")); - if (runsOn64bit()) { - if (usesCompressedOops()) { + boolean isMetaspaceMaxDefined = InputArguments.containsPrefix("-XX:MaxMetaspaceSize"); + verifyMemoryPool(getMemoryPool("Metaspace"), isMetaspaceMaxDefined); + + if (Platform.is64bit()) { + if (InputArguments.contains("-XX:+UseCompressedOops")) { MemoryPoolMXBean cksPool = getMemoryPool("Compressed Class Space"); verifyMemoryPool(cksPool, true); } } } - private static boolean runsOn64bit() { - return !System.getProperty("sun.arch.data.model").equals("32"); - } - - private static boolean usesCompressedOops() { - return isFlagDefined("+UseCompressedOops"); - } - - private static boolean isFlagDefined(String name) { - RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean(); - List args = runtimeMxBean.getInputArguments(); - for (String arg : args) { - if (arg.startsWith("-XX:" + name)) { - return true; - } - } - return false; - } - private static void verifyThatMetaspaceMemoryManagerExists() { List managers = ManagementFactory.getMemoryManagerMXBeans(); for (MemoryManagerMXBean manager : managers) { @@ -95,32 +75,19 @@ private static void verifyMemoryPool(MemoryPoolMXBean pool, boolean isMaxDefined) { MemoryUsage mu = pool.getUsage(); - assertDefined(mu.getInit(), "init"); - assertDefined(mu.getUsed(), "used"); - assertDefined(mu.getCommitted(), "committed"); + long init = mu.getInit(); + long used = mu.getUsed(); + long committed = mu.getCommitted(); + long max = mu.getMax(); + + assertGTE(init, 0L); + assertGTE(used, init); + assertGTE(committed, used); if (isMaxDefined) { - assertDefined(mu.getMax(), "max"); + assertGTE(max, committed); } else { - assertUndefined(mu.getMax(), "max"); - } - } - - private static void assertDefined(long value, String name) { - assertTrue(value != -1, "Expected " + name + " to be defined"); - } - - private static void assertUndefined(long value, String name) { - assertEquals(value, -1, "Expected " + name + " to be undefined"); - } - - private static void assertEquals(long actual, long expected, String msg) { - assertTrue(actual == expected, msg); - } - - private static void assertTrue(boolean condition, String msg) { - if (!condition) { - throw new RuntimeException(msg); + assertEQ(max, -1L); } } } diff -r ccb4f2af2319 -r cefad50507d8 test/gc/metaspace/TestMetaspacePerfCounters.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/metaspace/TestMetaspacePerfCounters.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.util.List; +import java.util.ArrayList; + +import com.oracle.java.testlibrary.*; +import static com.oracle.java.testlibrary.Asserts.*; + +/* @test TestMetaspacePerfCounters + * @bug 8014659 + * @library /testlibrary + * @summary Tests that performance counters for metaspace and compressed class + * space exists and works. + * + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters + * + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters + */ +public class TestMetaspacePerfCounters { + public static Class fooClass = null; + private static final String[] counterNames = {"minCapacity", "maxCapacity", "capacity", "used"}; + + public static void main(String[] args) throws Exception { + String metaspace = "sun.gc.metaspace"; + String ccs = "sun.gc.compressedclassspace"; + + checkPerfCounters(metaspace); + + if (isUsingCompressedClassPointers()) { + checkPerfCounters(ccs); + checkUsedIncreasesWhenLoadingClass(ccs); + } else { + checkEmptyPerfCounters(ccs); + checkUsedIncreasesWhenLoadingClass(metaspace); + } + } + + private static void checkPerfCounters(String ns) throws Exception { + long minCapacity = getMinCapacity(ns); + long maxCapacity = getMaxCapacity(ns); + long capacity = getCapacity(ns); + long used = getUsed(ns); + + assertGTE(minCapacity, 0L); + assertGTE(used, minCapacity); + assertGTE(capacity, used); + assertGTE(maxCapacity, capacity); + } + + private static void checkEmptyPerfCounters(String ns) throws Exception { + for (PerfCounter counter : countersInNamespace(ns)) { + String msg = "Expected " + counter.getName() + " to equal 0"; + assertEQ(counter.longValue(), 0L, msg); + } + } + + private static void checkUsedIncreasesWhenLoadingClass(String ns) throws Exception { + long before = getUsed(ns); + fooClass = compileAndLoad("Foo", "public class Foo { }"); + System.gc(); + long after = getUsed(ns); + + assertGT(after, before); + } + + private static List countersInNamespace(String ns) throws Exception { + List counters = new ArrayList<>(); + for (String name : counterNames) { + counters.add(PerfCounters.findByName(ns + "." + name)); + } + return counters; + } + + private static Class compileAndLoad(String name, String source) throws Exception { + byte[] byteCode = InMemoryJavaCompiler.compile(name, source); + return ByteCodeLoader.load(name, byteCode); + } + + private static boolean isUsingCompressedClassPointers() { + return Platform.is64bit() && InputArguments.contains("-XX:+UseCompressedClassPointers"); + } + + private static long getMinCapacity(String ns) throws Exception { + return PerfCounters.findByName(ns + ".minCapacity").longValue(); + } + + private static long getCapacity(String ns) throws Exception { + return PerfCounters.findByName(ns + ".capacity").longValue(); + } + + private static long getMaxCapacity(String ns) throws Exception { + return PerfCounters.findByName(ns + ".maxCapacity").longValue(); + } + + private static long getUsed(String ns) throws Exception { + return PerfCounters.findByName(ns + ".used").longValue(); + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/gc/metaspace/TestMetaspaceSizeFlags.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/metaspace/TestMetaspaceSizeFlags.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import com.oracle.java.testlibrary.Asserts; +import com.oracle.java.testlibrary.OutputAnalyzer; +import com.oracle.java.testlibrary.ProcessTools; + +/* + * @test TestMetaspaceSizeFlags + * @key gc + * @bug 8024650 + * @summary Test that metaspace size flags can be set correctly + * @library /testlibrary + */ +public class TestMetaspaceSizeFlags { + public static final long K = 1024L; + public static final long M = 1024L * K; + + // HotSpot uses a number of different values to align memory size flags. + // This is currently the largest alignment (unless huge large pages are used). + public static final long MAX_ALIGNMENT = 32 * M; + + public static void main(String [] args) throws Exception { + testMaxMetaspaceSizeEQMetaspaceSize(MAX_ALIGNMENT, MAX_ALIGNMENT); + // 8024650: MaxMetaspaceSize was adjusted instead of MetaspaceSize. + testMaxMetaspaceSizeLTMetaspaceSize(MAX_ALIGNMENT, MAX_ALIGNMENT * 2); + testMaxMetaspaceSizeGTMetaspaceSize(MAX_ALIGNMENT * 2, MAX_ALIGNMENT); + testTooSmallInitialMetaspace(0, 0); + testTooSmallInitialMetaspace(0, MAX_ALIGNMENT); + testTooSmallInitialMetaspace(MAX_ALIGNMENT, 0); + } + + private static void testMaxMetaspaceSizeEQMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception { + MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize); + Asserts.assertEQ(maxMetaspaceSize, metaspaceSize); + Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize); + Asserts.assertEQ(mf.metaspaceSize, metaspaceSize); + } + + private static void testMaxMetaspaceSizeLTMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception { + MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize); + Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize); + Asserts.assertEQ(mf.metaspaceSize, maxMetaspaceSize); + } + + private static void testMaxMetaspaceSizeGTMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception { + MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize); + Asserts.assertGT(maxMetaspaceSize, metaspaceSize); + Asserts.assertGT(mf.maxMetaspaceSize, mf.metaspaceSize); + Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize); + Asserts.assertEQ(mf.metaspaceSize, metaspaceSize); + } + + private static void testTooSmallInitialMetaspace(long maxMetaspaceSize, long metaspaceSize) throws Exception { + OutputAnalyzer output = run(maxMetaspaceSize, metaspaceSize); + output.shouldContain("Too small initial Metaspace size"); + } + + private static MetaspaceFlags runAndGetValue(long maxMetaspaceSize, long metaspaceSize) throws Exception { + OutputAnalyzer output = run(maxMetaspaceSize, metaspaceSize); + output.shouldNotMatch("Error occurred during initialization of VM\n.*"); + + String stringMaxMetaspaceSize = output.firstMatch(".* MaxMetaspaceSize .* := (\\d+).*", 1); + String stringMetaspaceSize = output.firstMatch(".* MetaspaceSize .* := (\\d+).*", 1); + + return new MetaspaceFlags(Long.parseLong(stringMaxMetaspaceSize), + Long.parseLong(stringMetaspaceSize)); + } + + private static OutputAnalyzer run(long maxMetaspaceSize, long metaspaceSize) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:MaxMetaspaceSize=" + maxMetaspaceSize, + "-XX:MetaspaceSize=" + metaspaceSize, + "-XX:-UseLargePages", // Prevent us from using 2GB large pages on solaris + sparc. + "-XX:+PrintFlagsFinal", + "-version"); + return new OutputAnalyzer(pb.start()); + } + + private static class MetaspaceFlags { + public long maxMetaspaceSize; + public long metaspaceSize; + public MetaspaceFlags(long maxMetaspaceSize, long metaspaceSize) { + this.maxMetaspaceSize = maxMetaspaceSize; + this.metaspaceSize = metaspaceSize; + } + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/gc/metaspace/TestPerfCountersAndMemoryPools.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/metaspace/TestPerfCountersAndMemoryPools.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.util.List; +import java.lang.management.*; + +import com.oracle.java.testlibrary.*; +import static com.oracle.java.testlibrary.Asserts.*; + +/* @test TestPerfCountersAndMemoryPools + * @bug 8023476 + * @library /testlibrary + * @summary Tests that a MemoryPoolMXBeans and PerfCounters for metaspace + * report the same data. + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData -Xint TestPerfCountersAndMemoryPools + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData -Xint TestPerfCountersAndMemoryPools + */ +public class TestPerfCountersAndMemoryPools { + public static void main(String[] args) throws Exception { + checkMemoryUsage("Metaspace", "sun.gc.metaspace"); + + if (InputArguments.contains("-XX:+UseCompressedKlassPointers") && Platform.is64bit()) { + checkMemoryUsage("Compressed Class Space", "sun.gc.compressedclassspace"); + } + } + + private static MemoryPoolMXBean getMemoryPool(String memoryPoolName) { + List pools = ManagementFactory.getMemoryPoolMXBeans(); + for (MemoryPoolMXBean pool : pools) { + if (pool.getName().equals(memoryPoolName)) { + return pool; + } + } + + throw new RuntimeException("Excpted to find a memory pool with name " + + memoryPoolName); + } + + private static void checkMemoryUsage(String memoryPoolName, String perfNS) + throws Exception { + MemoryPoolMXBean pool = getMemoryPool(memoryPoolName); + + // Must do a GC to update performance counters + System.gc(); + assertEQ(getMinCapacity(perfNS), pool.getUsage().getInit()); + + // Must do a second GC to update the perfomance counters again, since + // the call pool.getUsage().getInit() could have allocated some + // metadata. + System.gc(); + assertEQ(getUsed(perfNS), pool.getUsage().getUsed()); + assertEQ(getCapacity(perfNS), pool.getUsage().getCommitted()); + } + + private static long getMinCapacity(String ns) throws Exception { + return PerfCounters.findByName(ns + ".minCapacity").longValue(); + } + + private static long getCapacity(String ns) throws Exception { + return PerfCounters.findByName(ns + ".capacity").longValue(); + } + + private static long getUsed(String ns) throws Exception { + return PerfCounters.findByName(ns + ".used").longValue(); + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/6878713/Test6878713.sh --- a/test/runtime/6878713/Test6878713.sh Thu Oct 10 18:26:22 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,137 +0,0 @@ -#!/bin/sh - -# -# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# - - - -## -## @test -## @bug 6878713 -## @bug 7030610 -## @bug 7037122 -## @bug 7123945 -## @summary Verifier heap corruption, relating to backward jsrs -## @run shell Test6878713.sh -## -## some tests require path to find test source dir -if [ "${TESTSRC}" = "" ] -then - TESTSRC=${PWD} - echo "TESTSRC not set. Using "${TESTSRC}" as default" -fi -echo "TESTSRC=${TESTSRC}" -## Adding common setup Variables for running shell tests. -. ${TESTSRC}/../../test_env.sh - -TARGET_CLASS=OOMCrashClass1960_2 - -echo "INFO: extracting the target class." -${COMPILEJAVA}${FS}bin${FS}jar xvf \ - ${TESTSRC}${FS}testcase.jar ${TARGET_CLASS}.class - -# remove any hs_err_pid that might exist here -rm -f hs_err_pid*.log - -echo "INFO: checking for 32-bit versus 64-bit VM." -${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -version 2>&1 \ - | grep "64-Bit [^ ][^ ]* VM" > /dev/null 2>&1 -status="$?" -if [ "$status" = 0 ]; then - echo "INFO: testing a 64-bit VM." - is_64_bit=true -else - echo "INFO: testing a 32-bit VM." -fi - -if [ "$is_64_bit" = true ]; then - # limit is 768MB in 8-byte words (1024 * 1024 * 768 / 8) == 100663296 - MALLOC_MAX=100663296 -else - # limit is 768MB in 4-byte words (1024 * 1024 * 768 / 4) == 201326592 - MALLOC_MAX=201326592 -fi -echo "INFO: MALLOC_MAX=$MALLOC_MAX" - -echo "INFO: executing the target class." -# -XX:+PrintCommandLineFlags for debugging purposes -# -XX:+IgnoreUnrecognizedVMOptions so test will run on a VM without -# the new -XX:MallocMaxTestWords option -# -XX:+UnlockDiagnosticVMOptions so we can use -XX:MallocMaxTestWords -# -XX:MallocMaxTestWords limits malloc to $MALLOC_MAX -${TESTJAVA}${FS}bin${FS}java \ - -XX:+PrintCommandLineFlags \ - -XX:+IgnoreUnrecognizedVMOptions \ - -XX:+UnlockDiagnosticVMOptions \ - -XX:MallocMaxTestWords=$MALLOC_MAX \ - ${TESTVMOPTS} ${TARGET_CLASS} > test.out 2>&1 - -echo "INFO: begin contents of test.out:" -cat test.out -echo "INFO: end contents of test.out." - -echo "INFO: checking for memory allocation error message." -# We are looking for this specific memory allocation failure mesg so -# we know we exercised the right allocation path with the test class: -MESG1="Native memory allocation (malloc) failed to allocate 25696531[0-9][0-9] bytes" -grep "$MESG1" test.out -status="$?" -if [ "$status" = 0 ]; then - echo "INFO: found expected memory allocation error message." -else - echo "INFO: did not find expected memory allocation error message." - - # If we didn't find MESG1 above, then there are several scenarios: - # 1) -XX:MallocMaxTestWords is not supported by the current VM and we - # didn't fail TARGET_CLASS's memory allocation attempt; instead - # we failed to find TARGET_CLASS's main() method. The TARGET_CLASS - # is designed to provoke a memory allocation failure during class - # loading; we actually don't care about running the class which is - # why it doesn't have a main() method. - # 2) we failed a memory allocation, but not the one we were looking - # so it might be that TARGET_CLASS no longer tickles the same - # memory allocation code path - # 3) TARGET_CLASS reproduces the failure mode (SIGSEGV) fixed by - # 6878713 because the test is running on a pre-fix VM. - echo "INFO: checking for no main() method message." - MESG2="Error: Main method not found in class" - grep "$MESG2" test.out - status="$?" - if [ "$status" = 0 ]; then - echo "INFO: found no main() method message." - else - echo "FAIL: did not find no main() method message." - # status is non-zero for exit below - - if [ -s hs_err_pid*.log ]; then - echo "INFO: begin contents of hs_err_pid file:" - cat hs_err_pid*.log - echo "INFO: end contents of hs_err_pid file." - fi - fi -fi - -if [ "$status" = 0 ]; then - echo "PASS: test found one of the expected messages." -fi -exit "$status" diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/6878713/testcase.jar Binary file test/runtime/6878713/testcase.jar has changed diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/7020373/Test7020373.sh --- a/test/runtime/7020373/Test7020373.sh Thu Oct 10 18:26:22 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,43 +0,0 @@ -#!/bin/sh - -## -## @test -## @bug 7020373 7055247 7053586 7185550 -## @key cte_test -## @summary JSR rewriting can overflow memory address size variables -## @ignore Ignore it as 7053586 test uses lots of memory. See bug report for detail. -## @run shell Test7020373.sh -## - -if [ "${TESTSRC}" = "" ] -then - TESTSRC=${PWD} - echo "TESTSRC not set. Using "${TESTSRC}" as default" -fi -echo "TESTSRC=${TESTSRC}" -## Adding common setup Variables for running shell tests. -. ${TESTSRC}/../../test_env.sh - -${COMPILEJAVA}${FS}bin${FS}jar xvf ${TESTSRC}${FS}testcase.jar - -${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} OOMCrashClass4000_1 > test.out 2>&1 - -cat test.out - -egrep "SIGSEGV|An unexpected error has been detected" test.out - -if [ $? = 0 ] -then - echo "Test Failed" - exit 1 -else - egrep "java.lang.LinkageError|java.lang.NoSuchMethodError|Main method not found in class OOMCrashClass4000_1|insufficient memory" test.out - if [ $? = 0 ] - then - echo "Test Passed" - exit 0 - else - echo "Test Failed" - exit 1 - fi -fi diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/7020373/testcase.jar Binary file test/runtime/7020373/testcase.jar has changed diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/7051189/Xchecksig.sh --- a/test/runtime/7051189/Xchecksig.sh Thu Oct 10 18:26:22 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,126 +0,0 @@ -# -# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# - - -# @test Xchecksig.sh -# @bug 7051189 -# @summary Need to suppress info message if -xcheck:jni used with libjsig.so -# @run shell Xchecksig.sh -# - -if [ "${TESTSRC}" = "" ] -then - TESTSRC=${PWD} - echo "TESTSRC not set. Using "${TESTSRC}" as default" -fi -echo "TESTSRC=${TESTSRC}" -## Adding common setup Variables for running shell tests. -. ${TESTSRC}/../../test_env.sh - -OS=`uname -s` -case "$OS" in - Windows_* | CYGWIN_* ) - printf "Not testing libjsig.so on Windows. PASSED.\n " - exit 0 - ;; -esac - -JAVA=${TESTJAVA}${FS}bin${FS}java - -# LD_PRELOAD arch needs to match the binary we run, so run the java -# 64-bit binary directly if we are testing 64-bit (bin/ARCH/java). -# Check if TESTVMOPS contains -d64, but cannot use -# java ${TESTVMOPS} to run "java -d64" with LD_PRELOAD. - -if [ ${OS} -eq "SunOS" ] -then - printf "SunOS test TESTVMOPTS = ${TESTVMOPTS}" - printf ${TESTVMOPTS} | grep d64 > /dev/null - if [ $? -eq 0 ] - then - printf "SunOS 64-bit test\n" - BIT_FLAG=-d64 - fi -fi - -ARCH=`uname -p` -case $ARCH in - i386) - if [ X${BIT_FLAG} != "X" ] - then - ARCH=amd64 - JAVA=${TESTJAVA}${FS}bin${FS}${ARCH}${FS}java - fi - ;; - sparc) - if [ X${BIT_FLAG} != "X" ] - then - ARCH=sparcv9 - JAVA=${TESTJAVA}${FS}bin${FS}${ARCH}${FS}java - fi - ;; - * ) - printf "Not testing architecture $ARCH, skipping test.\n" - exit 0 - ;; -esac - -LIBJSIG=${COMPILEJAVA}${FS}jre${FS}lib${FS}${ARCH}${FS}libjsig.so - -# If libjsig and binary do not match, skip test. - -A=`file ${LIBJSIG} | awk '{ print $3 }'` -B=`file ${JAVA} | awk '{ print $3 }'` - -if [ $A -ne $B ] -then - printf "Mismatching binary and library to preload, skipping test.\n" - exit 0 -fi - -if [ ! -f ${LIBJSIG} ] -then - printf "Skipping test: libjsig missing for given architecture: ${LIBJSIG}\n" - exit 0 -fi -# Use java -version to test, java version info appears on stderr, -# the libjsig message we are removing appears on stdout. - -# grep returns zero meaning found, non-zero means not found: - -LD_PRELOAD=${LIBJSIG} ${JAVA} ${TESTVMOPTS} -Xcheck:jni -version 2>&1 | grep "libjsig is activated" -if [ $? -eq 0 ]; then - printf "Failed: -Xcheck:jni prints message when libjsig.so is loaded.\n" - exit 1 -fi - - -LD_PRELOAD=${LIBJSIG} ${JAVA} ${TESTVMOPTS} -Xcheck:jni -verbose:jni -version 2>&1 | grep "libjsig is activated" -if [ $? != 0 ]; then - printf "Failed: -Xcheck:jni does not print message when libjsig.so is loaded and -verbose:jni is set.\n" - exit 1 -fi - -printf "PASSED\n" -exit 0 - diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java --- a/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java Fri Oct 11 10:38:03 2013 +0200 @@ -24,7 +24,7 @@ /* * @test * @bug 8003424 - * @summary Testing UseCompressedKlassPointers with CDS + * @summary Testing UseCompressedClassPointers with CDS * @library /testlibrary * @run main CDSCompressedKPtrs */ @@ -36,7 +36,7 @@ ProcessBuilder pb; if (Platform.is64bit()) { pb = ProcessTools.createJavaProcessBuilder( - "-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops", + "-XX:+UseCompressedClassPointers", "-XX:+UseCompressedOops", "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump"); OutputAnalyzer output = new OutputAnalyzer(pb.start()); try { @@ -44,16 +44,15 @@ output.shouldHaveExitValue(0); pb = ProcessTools.createJavaProcessBuilder( - "-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops", + "-XX:+UseCompressedClassPointers", "-XX:+UseCompressedOops", "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version"); output = new OutputAnalyzer(pb.start()); output.shouldContain("sharing"); output.shouldHaveExitValue(0); } catch (RuntimeException e) { - // Report 'passed' if CDS was turned off because we could not allocate - // the klass metaspace at an address that would work with CDS. - output.shouldContain("Could not allocate metaspace at a compatible address"); + // Report 'passed' if CDS was turned off. + output.shouldContain("Unable to use shared archive"); output.shouldHaveExitValue(1); } } diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrsError.java --- a/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrsError.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrsError.java Fri Oct 11 10:38:03 2013 +0200 @@ -24,7 +24,7 @@ /* * @test * @bug 8003424 - * @summary Test that cannot use CDS if UseCompressedKlassPointers is turned off. + * @summary Test that cannot use CDS if UseCompressedClassPointers is turned off. * @library /testlibrary * @run main CDSCompressedKPtrsError */ @@ -36,7 +36,7 @@ ProcessBuilder pb; if (Platform.is64bit()) { pb = ProcessTools.createJavaProcessBuilder( - "-XX:+UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions", + "-XX:+UseCompressedOops", "-XX:+UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump"); OutputAnalyzer output = new OutputAnalyzer(pb.start()); try { @@ -44,21 +44,21 @@ output.shouldHaveExitValue(0); pb = ProcessTools.createJavaProcessBuilder( - "-XX:-UseCompressedKlassPointers", "-XX:-UseCompressedOops", + "-XX:-UseCompressedClassPointers", "-XX:-UseCompressedOops", "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version"); output = new OutputAnalyzer(pb.start()); output.shouldContain("Unable to use shared archive"); output.shouldHaveExitValue(0); pb = ProcessTools.createJavaProcessBuilder( - "-XX:-UseCompressedKlassPointers", "-XX:+UseCompressedOops", + "-XX:-UseCompressedClassPointers", "-XX:+UseCompressedOops", "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version"); output = new OutputAnalyzer(pb.start()); output.shouldContain("Unable to use shared archive"); output.shouldHaveExitValue(0); pb = ProcessTools.createJavaProcessBuilder( - "-XX:+UseCompressedKlassPointers", "-XX:-UseCompressedOops", + "-XX:+UseCompressedClassPointers", "-XX:-UseCompressedOops", "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version"); output = new OutputAnalyzer(pb.start()); output.shouldContain("Unable to use shared archive"); @@ -71,19 +71,19 @@ // Test bad options with -Xshare:dump. pb = ProcessTools.createJavaProcessBuilder( - "-XX:-UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions", + "-XX:-UseCompressedOops", "-XX:+UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump"); output = new OutputAnalyzer(pb.start()); output.shouldContain("Cannot dump shared archive"); pb = ProcessTools.createJavaProcessBuilder( - "-XX:+UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions", + "-XX:+UseCompressedOops", "-XX:-UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump"); output = new OutputAnalyzer(pb.start()); output.shouldContain("Cannot dump shared archive"); pb = ProcessTools.createJavaProcessBuilder( - "-XX:-UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions", + "-XX:-UseCompressedOops", "-XX:-UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump"); output = new OutputAnalyzer(pb.start()); output.shouldContain("Cannot dump shared archive"); diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/CDSCompressedKPtrs/XShareAuto.java --- a/test/runtime/CDSCompressedKPtrs/XShareAuto.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/runtime/CDSCompressedKPtrs/XShareAuto.java Fri Oct 11 10:38:03 2013 +0200 @@ -33,16 +33,9 @@ public class XShareAuto { public static void main(String[] args) throws Exception { - if (!Platform.is64bit()) { - System.out.println("ObjectAlignmentInBytes for CDS is only " + - "supported on 64bit platforms; this plaform is " + - System.getProperty("sun.arch.data.model")); - System.out.println("Skipping the test"); - return; - } ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( - "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", - "-Xshare:dump"); + "-server", "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump"); OutputAnalyzer output = new OutputAnalyzer(pb.start()); output.shouldContain("Loading classes to share"); output.shouldHaveExitValue(0); @@ -69,7 +62,7 @@ "-server", "-Xshare:on", "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-version"); output = new OutputAnalyzer(pb.start()); - output.shouldContain("Could not allocate metaspace at a compatible address"); + output.shouldContain("Unable to use shared archive"); output.shouldHaveExitValue(1); } } diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/ClassFile/JsrRewriting.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/ClassFile/JsrRewriting.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + + +/* + * @test JsrRewriting + * @summary JSR (jump local subroutine) + * rewriting can overflow memory address size variables + * @bug 7020373 + * @bug 7055247 + * @bug 7053586 + * @bug 7185550 + * @bug 7149464 + * @key cte_test + * @library /testlibrary + * @run main JsrRewriting + */ + +import com.oracle.java.testlibrary.*; +import java.io.File; + +public class JsrRewriting { + + public static void main(String[] args) throws Exception { + + // ======= Configure the test + String jarFile = System.getProperty("test.src") + + File.separator + "JsrRewritingTestCase.jar"; + String className = "OOMCrashClass4000_1"; + + // limit is 768MB in native words + int mallocMaxTestWords = (1024 * 1024 * 768 / 4); + if (Platform.is64bit()) + mallocMaxTestWords = (mallocMaxTestWords / 2); + + // ======= extract the test class + ProcessBuilder pb = new ProcessBuilder(new String[] { + JDKToolFinder.getJDKTool("jar"), + "xvf", jarFile } ); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + + // ======= execute the test + pb = ProcessTools.createJavaProcessBuilder( + "-cp", ".", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:MallocMaxTestWords=" + mallocMaxTestWords, + className); + + output = new OutputAnalyzer(pb.start()); + String[] expectedMsgs = { + "java.lang.LinkageError", + "java.lang.NoSuchMethodError", + "Main method not found in class " + className, + "insufficient memory" + }; + + MultipleOrMatch(output, expectedMsgs); + } + + private static void + MultipleOrMatch(OutputAnalyzer analyzer, String[] whatToMatch) { + String output = analyzer.getOutput(); + + for (String expected : whatToMatch) + if (output.contains(expected)) + return; + + String err = + " stdout: [" + analyzer.getOutput() + "];\n" + + " exitValue = " + analyzer.getExitValue() + "\n"; + System.err.println(err); + + StringBuilder msg = new StringBuilder("Output did not contain " + + "any of the following expected messages: \n"); + for (String expected : whatToMatch) + msg.append(expected).append(System.lineSeparator()); + throw new RuntimeException(msg.toString()); + } +} + diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/ClassFile/JsrRewritingTestCase.jar Binary file test/runtime/ClassFile/JsrRewritingTestCase.jar has changed diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/ClassFile/OomWhileParsingRepeatedJsr.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/ClassFile/OomWhileParsingRepeatedJsr.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + + +/* + * @test OomWhileParsingRepeatedJsr + * @summary Testing class file parser; specifically parsing + * a file with repeated JSR (jump local subroutine) + * bytecode command. + * @bug 6878713 + * @bug 7030610 + * @bug 7037122 + * @bug 7123945 + * @bug 8016029 + * @library /testlibrary + * @run main OomWhileParsingRepeatedJsr + */ + +import com.oracle.java.testlibrary.*; + + +public class OomWhileParsingRepeatedJsr { + + public static void main(String[] args) throws Exception { + + // ======= Configure the test + String jarFile = System.getProperty("test.src") + "/testcase.jar"; + String className = "OOMCrashClass1960_2"; + + // limit is 768MB in native words + int mallocMaxTestWords = (1024 * 1024 * 768 / 4); + if (Platform.is64bit()) + mallocMaxTestWords = (mallocMaxTestWords / 2); + + // ======= extract the test class + ProcessBuilder pb = new ProcessBuilder(new String[] { + JDKToolFinder.getJDKTool("jar"), + "xvf", jarFile } ); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + + // ======= execute the test + pb = ProcessTools.createJavaProcessBuilder( + "-cp", ".", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:MallocMaxTestWords=" + mallocMaxTestWords, + className ); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Cannot reserve enough memory"); + } +} + diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/ClassFile/testcase.jar Binary file test/runtime/ClassFile/testcase.jar has changed diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/CompressedOops/CompressedKlassPointerAndOops.java --- a/test/runtime/CompressedOops/CompressedKlassPointerAndOops.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/runtime/CompressedOops/CompressedKlassPointerAndOops.java Fri Oct 11 10:38:03 2013 +0200 @@ -25,7 +25,7 @@ * @test * @bug 8000968 * @key regression - * @summary NPG: UseCompressedKlassPointers asserts with ObjectAlignmentInBytes=32 + * @summary NPG: UseCompressedClassPointers asserts with ObjectAlignmentInBytes=32 * @library /testlibrary */ @@ -52,7 +52,7 @@ OutputAnalyzer output; pb = ProcessTools.createJavaProcessBuilder( - "-XX:+UseCompressedKlassPointers", + "-XX:+UseCompressedClassPointers", "-XX:+UseCompressedOops", "-XX:ObjectAlignmentInBytes=" + alignment, "-version"); diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/InitialThreadOverflow/DoOverflow.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/InitialThreadOverflow/DoOverflow.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +public class DoOverflow { + + static int count; + + public void overflow() { + count+=1; + overflow(); + } + + public static void printIt() { + System.out.println("Going to overflow stack"); + try { + new DoOverflow().overflow(); + } catch(java.lang.StackOverflowError e) { + System.out.println("Overflow OK " + count); + } + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/InitialThreadOverflow/invoke.cxx --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/InitialThreadOverflow/invoke.cxx Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include +#include + +#include + +JavaVM* jvm; + +void * +floobydust (void *p) { + JNIEnv *env; + + jvm->AttachCurrentThread((void**)&env, NULL); + + jclass class_id = env->FindClass ("DoOverflow"); + assert (class_id); + + jmethodID method_id = env->GetStaticMethodID(class_id, "printIt", "()V"); + assert (method_id); + + env->CallStaticVoidMethod(class_id, method_id, NULL); + + jvm->DetachCurrentThread(); +} + +int +main (int argc, const char** argv) { + JavaVMOption options[1]; + options[0].optionString = (char*) "-Xss320k"; + + JavaVMInitArgs vm_args; + vm_args.version = JNI_VERSION_1_2; + vm_args.ignoreUnrecognized = JNI_TRUE; + vm_args.options = options; + vm_args.nOptions = 1; + + JNIEnv* env; + jint result = JNI_CreateJavaVM(&jvm, (void**)&env, &vm_args); + assert(result >= 0); + + pthread_t thr; + pthread_create(&thr, NULL, floobydust, NULL); + pthread_join(thr, NULL); + + floobydust(NULL); + + return 0; +} diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/InitialThreadOverflow/testme.sh --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/InitialThreadOverflow/testme.sh Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,73 @@ +#!/bin/sh + +# Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. + +# @test testme.sh +# @bug 8009062 +# @summary Poor performance of JNI AttachCurrentThread after fix for 7017193 +# @compile DoOverflow.java +# @run shell testme.sh + +set -x +if [ "${TESTSRC}" = "" ] +then + TESTSRC=${PWD} + echo "TESTSRC not set. Using "${TESTSRC}" as default" +fi +echo "TESTSRC=${TESTSRC}" +## Adding common setup Variables for running shell tests. +. ${TESTSRC}/../../test_env.sh + +if [ "${VM_OS}" != "linux" ] +then + echo "Test only valid for Linux" + exit 0 +fi + +gcc_cmd=`which g++` +if [ "x$gcc_cmd" = "x" ]; then + echo "WARNING: g++ not found. Cannot execute test." 2>&1 + exit 0; +fi + +CFLAGS="-m${VM_BITS}" + +LD_LIBRARY_PATH=.:${COMPILEJAVA}/jre/lib/${VM_CPU}/${VM_TYPE}:/usr/lib:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH + +cp ${TESTSRC}${FS}invoke.cxx . + +# Copy the result of our @compile action: +cp ${TESTCLASSES}${FS}DoOverflow.class . + +echo "Compilation flag: ${COMP_FLAG}" +# Note pthread may not be found thus invoke creation will fail to be created. +# Check to ensure you have a /usr/lib/libpthread.so if you don't please look +# for /usr/lib/`uname -m`-linux-gnu version ensure to add that path to below compilation. + +$gcc_cmd -DLINUX ${CFLAGS} -o invoke \ + -I${COMPILEJAVA}/include -I${COMPILEJAVA}/include/linux \ + -L${COMPILEJAVA}/jre/lib/${VM_CPU}/${VM_TYPE} \ + -ljvm -lpthread invoke.cxx + +./invoke +exit $? diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/LoadClass/LoadClassNegative.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/LoadClass/LoadClassNegative.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @key regression + * @bug 8020675 + * @summary make sure there is no fatal error if a class is loaded from an invalid jar file which is in the bootclasspath + * @library /testlibrary + * @build TestForName + * @build LoadClassNegative + * @run main LoadClassNegative + */ + +import java.io.File; +import com.oracle.java.testlibrary.*; + +public class LoadClassNegative { + + public static void main(String args[]) throws Exception { + String bootCP = "-Xbootclasspath/a:" + System.getProperty("test.src") + + File.separator + "dummy.jar"; + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + bootCP, + "TestForName"); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("ClassNotFoundException"); + output.shouldHaveExitValue(0); + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/LoadClass/TestForName.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/LoadClass/TestForName.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +public class TestForName { + public static void main(String[] args) { + try { + Class cls = Class.forName("xxx"); + System.out.println("Class = " + cls.getName()); + } catch (ClassNotFoundException cnfe) { + cnfe.printStackTrace(); + } + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/LoadClass/dummy.jar diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/NMT/ThreadedVirtualAllocTestType.java --- a/test/runtime/NMT/ThreadedVirtualAllocTestType.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/runtime/NMT/ThreadedVirtualAllocTestType.java Fri Oct 11 10:38:03 2013 +0200 @@ -45,6 +45,13 @@ String pid = Integer.toString(ProcessTools.getProcessId()); ProcessBuilder pb = new ProcessBuilder(); + boolean has_nmt_detail = wb.NMTIsDetailSupported(); + if (has_nmt_detail) { + System.out.println("NMT detail support detected."); + } else { + System.out.println("NMT detail support not detected."); + } + Thread reserveThread = new Thread() { public void run() { addr = wb.NMTReserveMemory(reserveSize); @@ -58,7 +65,9 @@ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"}); output = new OutputAnalyzer(pb.start()); output.shouldContain("Test (reserved=512KB, committed=0KB)"); - output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved 512KB for Test"); + if (has_nmt_detail) { + output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved 512KB for Test"); + } Thread commitThread = new Thread() { public void run() { @@ -72,7 +81,9 @@ output = new OutputAnalyzer(pb.start()); output.shouldContain("Test (reserved=512KB, committed=128KB)"); - output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed 128KB"); + if (has_nmt_detail) { + output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed 128KB"); + } Thread uncommitThread = new Thread() { public void run() { diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/NMT/VirtualAllocTestType.java --- a/test/runtime/NMT/VirtualAllocTestType.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/runtime/NMT/VirtualAllocTestType.java Fri Oct 11 10:38:03 2013 +0200 @@ -46,13 +46,22 @@ String pid = Integer.toString(ProcessTools.getProcessId()); ProcessBuilder pb = new ProcessBuilder(); + boolean has_nmt_detail = wb.NMTIsDetailSupported(); + if (has_nmt_detail) { + System.out.println("NMT detail support detected."); + } else { + System.out.println("NMT detail support not detected."); + } + addr = wb.NMTReserveMemory(reserveSize); mergeData(); + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"}); - pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"}); output = new OutputAnalyzer(pb.start()); output.shouldContain("Test (reserved=256KB, committed=0KB)"); - output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved 256KB for Test"); + if (has_nmt_detail) { + output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved 256KB for Test"); + } wb.NMTCommitMemory(addr, commitSize); @@ -60,7 +69,9 @@ output = new OutputAnalyzer(pb.start()); output.shouldContain("Test (reserved=256KB, committed=128KB)"); - output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed 128KB"); + if (has_nmt_detail) { + output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed 128KB"); + } wb.NMTUncommitMemory(addr, commitSize); @@ -71,7 +82,6 @@ output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed"); wb.NMTReleaseMemory(addr, reserveSize); - mergeData(); output = new OutputAnalyzer(pb.start()); diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java --- a/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java Fri Oct 11 10:38:03 2013 +0200 @@ -84,7 +84,7 @@ // there is a chance such reservation will fail // If it does, it is NOT considered a failure of the feature, // rather a possible expected outcome, though not likely - output.shouldContain("Could not allocate metaspace at a compatible address"); + output.shouldContain("Unable to use shared archive"); output.shouldHaveExitValue(1); } } diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/XCheckJniJsig/XCheckJSig.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/XCheckJniJsig/XCheckJSig.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 7051189 8023393 + * @summary Need to suppress info message if -Xcheck:jni is used with libjsig.so + * @library /testlibrary + * @run main XCheckJSig + */ + +import java.util.*; +import com.oracle.java.testlibrary.*; + +public class XCheckJSig { + public static void main(String args[]) throws Throwable { + + System.out.println("Regression test for bugs 7051189 and 8023393"); + if (!Platform.isSolaris() && !Platform.isLinux() && !Platform.isOSX()) { + System.out.println("Test only applicable on Solaris, Linux, and Mac OSX, skipping"); + return; + } + + String jdk_path = System.getProperty("test.jdk"); + String os_arch = Platform.getOsArch(); + String libjsig; + String env_var; + if (Platform.isOSX()) { + libjsig = jdk_path + "/jre/lib/server/libjsig.dylib"; + env_var = "DYLD_INSERT_LIBRARIES"; + } else { + libjsig = jdk_path + "/jre/lib/" + os_arch + "/libjsig.so"; + env_var = "LD_PRELOAD"; + } + String java_program; + if (Platform.isSolaris()) { + // On Solaris, need to call the 64-bit Java directly in order for + // LD_PRELOAD to work because libjsig.so is 64-bit. + java_program = jdk_path + "/jre/bin/" + os_arch + "/java"; + } else { + java_program = JDKToolFinder.getJDKTool("java"); + } + // If this test fails, these might be useful to know. + System.out.println("libjsig: " + libjsig); + System.out.println("osArch: " + os_arch); + System.out.println("java_program: " + java_program); + + ProcessBuilder pb = new ProcessBuilder(java_program, "-Xcheck:jni", "-version"); + Map env = pb.environment(); + env.put(env_var, libjsig); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("libjsig is activated"); + output.shouldHaveExitValue(0); + + pb = new ProcessBuilder(java_program, "-Xcheck:jni", "-verbose:jni", "-version"); + env = pb.environment(); + env.put(env_var, libjsig); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("libjsig is activated"); + output.shouldHaveExitValue(0); + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/runtime/contended/Options.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/contended/Options.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import com.oracle.java.testlibrary.*; + +/* + * @test + * @bug 8006997 + * @summary ContendedPaddingWidth should be range-checked + * + * @library /testlibrary + * @run main Options + */ +public class Options { + + public static void main(String[] args) throws Exception { + ProcessBuilder pb; + OutputAnalyzer output; + + pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=-128", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("ContendedPaddingWidth"); + output.shouldContain("must be in between"); + output.shouldHaveExitValue(1); + + pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=-8", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("ContendedPaddingWidth"); + output.shouldContain("must be in between"); + output.shouldHaveExitValue(1); + + pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=-1", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("ContendedPaddingWidth"); + output.shouldContain("must be in between"); + output.shouldContain("must be a multiple of 8"); + output.shouldHaveExitValue(1); + + pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=0", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + + pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=1", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("ContendedPaddingWidth"); + output.shouldContain("must be a multiple of 8"); + output.shouldHaveExitValue(1); + + pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + + pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8184", "-version"); // 8192-8 = 8184 + output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + + pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8191", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("ContendedPaddingWidth"); + output.shouldContain("must be a multiple of 8"); + output.shouldHaveExitValue(1); + + pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8192", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + + pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8193", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("ContendedPaddingWidth"); + output.shouldContain("must be in between"); + output.shouldContain("must be a multiple of 8"); + output.shouldHaveExitValue(1); + + pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8200", "-version"); // 8192+8 = 8200 + output = new OutputAnalyzer(pb.start()); + output.shouldContain("ContendedPaddingWidth"); + output.shouldContain("must be in between"); + output.shouldHaveExitValue(1); + + } + +} + diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/OutputAnalyzerReportingTest.java --- a/test/testlibrary/OutputAnalyzerReportingTest.java Thu Oct 10 18:26:22 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,124 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - - -/* - * @test - * @summary Test the OutputAnalyzer reporting functionality, - * such as printing additional diagnostic info - * (exit code, stdout, stderr, command line, etc.) - * @library /testlibrary - */ - -import java.io.ByteArrayOutputStream; -import java.io.PrintStream; - -import com.oracle.java.testlibrary.OutputAnalyzer; -import com.oracle.java.testlibrary.ProcessTools; - - -public class OutputAnalyzerReportingTest { - - public static void main(String[] args) throws Exception { - // Create the output analyzer under test - String stdout = "aaaaaa"; - String stderr = "bbbbbb"; - OutputAnalyzer output = new OutputAnalyzer(stdout, stderr); - - // Expected summary values should be the same for all cases, - // since the outputAnalyzer object is the same - String expectedExitValue = "-1"; - String expectedSummary = - " stdout: [" + stdout + "];\n" + - " stderr: [" + stderr + "]\n" + - " exitValue = " + expectedExitValue + "\n"; - - - DiagnosticSummaryTestRunner testRunner = - new DiagnosticSummaryTestRunner(); - - // should have exit value - testRunner.init(expectedSummary); - int unexpectedExitValue = 2; - try { - output.shouldHaveExitValue(unexpectedExitValue); - } catch (RuntimeException e) { } - testRunner.closeAndCheckResults(); - - // should not contain - testRunner.init(expectedSummary); - try { - output.shouldNotContain(stdout); - } catch (RuntimeException e) { } - testRunner.closeAndCheckResults(); - - // should contain - testRunner.init(expectedSummary); - try { - output.shouldContain("unexpected-stuff"); - } catch (RuntimeException e) { } - testRunner.closeAndCheckResults(); - - // should not match - testRunner.init(expectedSummary); - try { - output.shouldNotMatch("[a]"); - } catch (RuntimeException e) { } - testRunner.closeAndCheckResults(); - - // should match - testRunner.init(expectedSummary); - try { - output.shouldMatch("[qwerty]"); - } catch (RuntimeException e) { } - testRunner.closeAndCheckResults(); - - } - - private static class DiagnosticSummaryTestRunner { - private ByteArrayOutputStream byteStream = - new ByteArrayOutputStream(10000); - - private String expectedSummary = ""; - private PrintStream errStream; - - - public void init(String expectedSummary) { - this.expectedSummary = expectedSummary; - byteStream.reset(); - errStream = new PrintStream(byteStream); - System.setErr(errStream); - } - - public void closeAndCheckResults() { - // check results - errStream.close(); - String stdErrStr = byteStream.toString(); - if (!stdErrStr.contains(expectedSummary)) { - throw new RuntimeException("The output does not contain " - + "the diagnostic message, or the message is incorrect"); - } - } - } - -} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/OutputAnalyzerTest.java --- a/test/testlibrary/OutputAnalyzerTest.java Thu Oct 10 18:26:22 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,176 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * @test - * @summary Test the OutputAnalyzer utility class - * @library /testlibrary - */ - -import com.oracle.java.testlibrary.OutputAnalyzer; - -public class OutputAnalyzerTest { - - public static void main(String args[]) throws Exception { - - String stdout = "aaaaaa"; - String stderr = "bbbbbb"; - - // Regexps used for testing pattern matching of the test input - String stdoutPattern = "[a]"; - String stderrPattern = "[b]"; - String nonExistingPattern = "[c]"; - - OutputAnalyzer output = new OutputAnalyzer(stdout, stderr); - - if (!stdout.equals(output.getStdout())) { - throw new Exception("getStdout() returned '" + output.getStdout() + "', expected '" + stdout + "'"); - } - - if (!stderr.equals(output.getStderr())) { - throw new Exception("getStderr() returned '" + output.getStderr() + "', expected '" + stderr + "'"); - } - - try { - output.shouldContain(stdout); - output.stdoutShouldContain(stdout); - output.shouldContain(stderr); - output.stderrShouldContain(stderr); - } catch (RuntimeException e) { - throw new Exception("shouldContain() failed", e); - } - - try { - output.shouldContain("cccc"); - throw new Exception("shouldContain() failed to throw exception"); - } catch (RuntimeException e) { - // expected - } - - try { - output.stdoutShouldContain(stderr); - throw new Exception("stdoutShouldContain() failed to throw exception"); - } catch (RuntimeException e) { - // expected - } - - try { - output.stderrShouldContain(stdout); - throw new Exception("stdoutShouldContain() failed to throw exception"); - } catch (RuntimeException e) { - // expected - } - - try { - output.shouldNotContain("cccc"); - output.stdoutShouldNotContain("cccc"); - output.stderrShouldNotContain("cccc"); - } catch (RuntimeException e) { - throw new Exception("shouldNotContain() failed", e); - } - - try { - output.shouldNotContain(stdout); - throw new Exception("shouldContain() failed to throw exception"); - } catch (RuntimeException e) { - // expected - } - - try { - output.stdoutShouldNotContain(stdout); - throw new Exception("shouldContain() failed to throw exception"); - } catch (RuntimeException e) { - // expected - } - - try { - output.stderrShouldNotContain(stderr); - throw new Exception("shouldContain() failed to throw exception"); - } catch (RuntimeException e) { - // expected - } - - // Should match - try { - output.shouldMatch(stdoutPattern); - output.stdoutShouldMatch(stdoutPattern); - output.shouldMatch(stderrPattern); - output.stderrShouldMatch(stderrPattern); - } catch (RuntimeException e) { - throw new Exception("shouldMatch() failed", e); - } - - try { - output.shouldMatch(nonExistingPattern); - throw new Exception("shouldMatch() failed to throw exception"); - } catch (RuntimeException e) { - // expected - } - - try { - output.stdoutShouldMatch(stderrPattern); - throw new Exception( - "stdoutShouldMatch() failed to throw exception"); - } catch (RuntimeException e) { - // expected - } - - try { - output.stderrShouldMatch(stdoutPattern); - throw new Exception( - "stderrShouldMatch() failed to throw exception"); - } catch (RuntimeException e) { - // expected - } - - // Should not match - try { - output.shouldNotMatch(nonExistingPattern); - output.stdoutShouldNotMatch(nonExistingPattern); - output.stderrShouldNotMatch(nonExistingPattern); - } catch (RuntimeException e) { - throw new Exception("shouldNotMatch() failed", e); - } - - try { - output.shouldNotMatch(stdoutPattern); - throw new Exception("shouldNotMatch() failed to throw exception"); - } catch (RuntimeException e) { - // expected - } - - try { - output.stdoutShouldNotMatch(stdoutPattern); - throw new Exception("shouldNotMatch() failed to throw exception"); - } catch (RuntimeException e) { - // expected - } - - try { - output.stderrShouldNotMatch(stderrPattern); - throw new Exception("shouldNotMatch() failed to throw exception"); - } catch (RuntimeException e) { - // expected - } - } -} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/com/oracle/java/testlibrary/Asserts.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/com/oracle/java/testlibrary/Asserts.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,395 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.java.testlibrary; + +/** + * Asserts that can be used for verifying assumptions in tests. + * + * An assertion will throw a {@link RuntimeException} if the assertion isn't + * valid. All the asserts can be imported into a test by using a static + * import: + * + *
+ * {@code
+ * import static com.oracle.java.testlibrary.Asserts.*;
+ * }
+ *
+ * Always provide a message describing the assumption if the line number of the
+ * failing assertion isn't enough to understand why the assumption failed. For
+ * example, if the assertion is in a loop or in a method that is called
+ * multiple times, then the line number won't provide enough context to
+ * understand the failure.
+ * 
+ */ +public class Asserts { + + /** + * Shorthand for {@link #assertLessThan(T, T)}. + * + * @see #assertLessThan(T, T) + */ + public static > void assertLT(T lhs, T rhs) { + assertLessThan(lhs, rhs); + } + + /** + * Shorthand for {@link #assertLessThan(T, T, String)}. + * + * @see #assertLessThan(T, T, String) + */ + public static > void assertLT(T lhs, T rhs, String msg) { + assertLessThan(lhs, rhs, msg); + } + + /** + * Calls {@link #assertLessThan(T, T, String)} with a default message. + * + * @see #assertLessThan(T, T, String) + */ + public static > void assertLessThan(T lhs, T rhs) { + String msg = "Expected that " + format(lhs) + " < " + format(rhs); + assertLessThan(lhs, rhs, msg); + } + + /** + * Asserts that {@code lhs} is less than {@code rhs}. + * + * @param lhs The left hand side of the comparison. + * @param rhs The right hand side of the comparison. + * @param msg A description of the assumption. + * @throws RuntimeException if the assertion isn't valid. + */ + public static >void assertLessThan(T lhs, T rhs, String msg) { + assertTrue(compare(lhs, rhs, msg) < 0, msg); + } + + /** + * Shorthand for {@link #assertLessThanOrEqual(T, T)}. + * + * @see #assertLessThanOrEqual(T, T) + */ + public static > void assertLTE(T lhs, T rhs) { + assertLessThanOrEqual(lhs, rhs); + } + + /** + * Shorthand for {@link #assertLessThanOrEqual(T, T, String)}. + * + * @see #assertLessThanOrEqual(T, T, String) + */ + public static > void assertLTE(T lhs, T rhs, String msg) { + assertLessThanOrEqual(lhs, rhs, msg); + } + + /** + * Calls {@link #assertLessThanOrEqual(T, T, String)} with a default message. + * + * @see #assertLessThanOrEqual(T, T, String) + */ + public static > void assertLessThanOrEqual(T lhs, T rhs) { + String msg = "Expected that " + format(lhs) + " <= " + format(rhs); + assertLessThanOrEqual(lhs, rhs, msg); + } + + /** + * Asserts that {@code lhs} is less than or equal to {@code rhs}. + * + * @param lhs The left hand side of the comparison. + * @param rhs The right hand side of the comparison. + * @param msg A description of the assumption. + * @throws RuntimeException if the assertion isn't valid. + */ + public static > void assertLessThanOrEqual(T lhs, T rhs, String msg) { + assertTrue(compare(lhs, rhs, msg) <= 0, msg); + } + + /** + * Shorthand for {@link #assertEquals(T, T)}. + * + * @see #assertEquals(T, T) + */ + public static void assertEQ(Object lhs, Object rhs) { + assertEquals(lhs, rhs); + } + + /** + * Shorthand for {@link #assertEquals(T, T, String)}. + * + * @see #assertEquals(T, T, String) + */ + public static void assertEQ(Object lhs, Object rhs, String msg) { + assertEquals(lhs, rhs, msg); + } + + /** + * Calls {@link #assertEquals(T, T, String)} with a default message. + * + * @see #assertEquals(T, T, String) + */ + public static void assertEquals(Object lhs, Object rhs) { + String msg = "Expected " + format(lhs) + " to equal " + format(rhs); + assertEquals(lhs, rhs, msg); + } + + /** + * Asserts that {@code lhs} is equal to {@code rhs}. + * + * @param lhs The left hand side of the comparison. + * @param rhs The right hand side of the comparison. + * @param msg A description of the assumption. + * @throws RuntimeException if the assertion isn't valid. + */ + public static void assertEquals(Object lhs, Object rhs, String msg) { + if (lhs == null) { + if (rhs != null) { + error(msg); + } + } else { + assertTrue(lhs.equals(rhs), msg); + } + } + + /** + * Shorthand for {@link #assertGreaterThanOrEqual(T, T)}. + * + * @see #assertGreaterThanOrEqual(T, T) + */ + public static > void assertGTE(T lhs, T rhs) { + assertGreaterThanOrEqual(lhs, rhs); + } + + /** + * Shorthand for {@link #assertGreaterThanOrEqual(T, T, String)}. + * + * @see #assertGreaterThanOrEqual(T, T, String) + */ + public static > void assertGTE(T lhs, T rhs, String msg) { + assertGreaterThanOrEqual(lhs, rhs, msg); + } + + /** + * Calls {@link #assertGreaterThanOrEqual(T, T, String)} with a default message. + * + * @see #assertGreaterThanOrEqual(T, T, String) + */ + public static > void assertGreaterThanOrEqual(T lhs, T rhs) { + String msg = "Expected that " + format(lhs) + " >= " + format(rhs); + assertGreaterThanOrEqual(lhs, rhs, msg); + } + + /** + * Asserts that {@code lhs} is greater than or equal to {@code rhs}. + * + * @param lhs The left hand side of the comparison. + * @param rhs The right hand side of the comparison. + * @param msg A description of the assumption. + * @throws RuntimeException if the assertion isn't valid. + */ + public static > void assertGreaterThanOrEqual(T lhs, T rhs, String msg) { + assertTrue(compare(lhs, rhs, msg) >= 0, msg); + } + + /** + * Shorthand for {@link #assertGreaterThan(T, T)}. + * + * @see #assertGreaterThan(T, T) + */ + public static > void assertGT(T lhs, T rhs) { + assertGreaterThan(lhs, rhs); + } + + /** + * Shorthand for {@link #assertGreaterThan(T, T, String)}. + * + * @see #assertGreaterThan(T, T, String) + */ + public static > void assertGT(T lhs, T rhs, String msg) { + assertGreaterThan(lhs, rhs, msg); + } + + /** + * Calls {@link #assertGreaterThan(T, T, String)} with a default message. + * + * @see #assertGreaterThan(T, T, String) + */ + public static > void assertGreaterThan(T lhs, T rhs) { + String msg = "Expected that " + format(lhs) + " > " + format(rhs); + assertGreaterThan(lhs, rhs, msg); + } + + /** + * Asserts that {@code lhs} is greater than {@code rhs}. + * + * @param lhs The left hand side of the comparison. + * @param rhs The right hand side of the comparison. + * @param msg A description of the assumption. + * @throws RuntimeException if the assertion isn't valid. + */ + public static > void assertGreaterThan(T lhs, T rhs, String msg) { + assertTrue(compare(lhs, rhs, msg) > 0, msg); + } + + /** + * Shorthand for {@link #assertNotEquals(T, T)}. + * + * @see #assertNotEquals(T, T) + */ + public static void assertNE(Object lhs, Object rhs) { + assertNotEquals(lhs, rhs); + } + + /** + * Shorthand for {@link #assertNotEquals(T, T, String)}. + * + * @see #assertNotEquals(T, T, String) + */ + public static void assertNE(Object lhs, Object rhs, String msg) { + assertNotEquals(lhs, rhs, msg); + } + + /** + * Calls {@link #assertNotEquals(T, T, String)} with a default message. + * + * @see #assertNotEquals(T, T, String) + */ + public static void assertNotEquals(Object lhs, Object rhs) { + String msg = "Expected " + format(lhs) + " to not equal " + format(rhs); + assertNotEquals(lhs, rhs, msg); + } + + /** + * Asserts that {@code lhs} is not equal to {@code rhs}. + * + * @param lhs The left hand side of the comparison. + * @param rhs The right hand side of the comparison. + * @param msg A description of the assumption. + * @throws RuntimeException if the assertion isn't valid. + */ + public static void assertNotEquals(Object lhs, Object rhs, String msg) { + if (lhs == null) { + if (rhs == null) { + error(msg); + } + } else { + assertFalse(lhs.equals(rhs), msg); + } + } + + /** + * Calls {@link #assertNull(Object, String)} with a default message. + * + * @see #assertNull(Object, String) + */ + public static void assertNull(Object o) { + assertNull(o, "Expected " + format(o) + " to be null"); + } + + /** + * Asserts that {@code o} is null. + * + * @param o The reference assumed to be null. + * @param msg A description of the assumption. + * @throws RuntimeException if the assertion isn't valid. + */ + public static void assertNull(Object o, String msg) { + assertEquals(o, null, msg); + } + + /** + * Calls {@link #assertNotNull(Object, String)} with a default message. + * + * @see #assertNotNull(Object, String) + */ + public static void assertNotNull(Object o) { + assertNotNull(o, "Expected non null reference"); + } + + /** + * Asserts that {@code o} is not null. + * + * @param o The reference assumed not to be null, + * @param msg A description of the assumption. + * @throws RuntimeException if the assertion isn't valid. + */ + public static void assertNotNull(Object o, String msg) { + assertNotEquals(o, null, msg); + } + + /** + * Calls {@link #assertFalse(boolean, String)} with a default message. + * + * @see #assertFalse(boolean, String) + */ + public static void assertFalse(boolean value) { + assertFalse(value, "Expected value to be false"); + } + + /** + * Asserts that {@code value} is {@code false}. + * + * @param value The value assumed to be false. + * @param msg A description of the assumption. + * @throws RuntimeException if the assertion isn't valid. + */ + public static void assertFalse(boolean value, String msg) { + assertTrue(!value, msg); + } + + /** + * Calls {@link #assertTrue(boolean, String)} with a default message. + * + * @see #assertTrue(boolean, String) + */ + public static void assertTrue(boolean value) { + assertTrue(value, "Expected value to be true"); + } + + /** + * Asserts that {@code value} is {@code true}. + * + * @param value The value assumed to be true. + * @param msg A description of the assumption. + * @throws RuntimeException if the assertion isn't valid. + */ + public static void assertTrue(boolean value, String msg) { + if (!value) { + error(msg); + } + } + + private static > int compare(T lhs, T rhs, String msg) { + assertNotNull(lhs, msg); + assertNotNull(rhs, msg); + return lhs.compareTo(rhs); + } + + private static String format(Object o) { + return o == null? "null" : o.toString(); + } + + private static void error(String msg) { + throw new RuntimeException(msg); + } + +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/com/oracle/java/testlibrary/ByteCodeLoader.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/com/oracle/java/testlibrary/ByteCodeLoader.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.java.testlibrary; + +import java.security.SecureClassLoader; + +/** + * {@code ByteCodeLoader} can be used for easy loading of byte code already + * present in memory. + * + * {@code InMemoryCompiler} can be used for compiling source code in a string + * into byte code, which then can be loaded with {@code ByteCodeLoader}. + * + * @see InMemoryCompiler + */ +public class ByteCodeLoader extends SecureClassLoader { + private final String className; + private final byte[] byteCode; + + /** + * Creates a new {@code ByteCodeLoader} ready to load a class with the + * given name and the given byte code. + * + * @param className The name of the class + * @param byteCode The byte code of the class + */ + public ByteCodeLoader(String className, byte[] byteCode) { + this.className = className; + this.byteCode = byteCode; + } + + @Override + protected Class findClass(String name) throws ClassNotFoundException { + if (!name.equals(className)) { + throw new ClassNotFoundException(name); + } + + return defineClass(name, byteCode, 0, byteCode.length); + } + + /** + * Utility method for creating a new {@code ByteCodeLoader} and then + * directly load the given byte code. + * + * @param className The name of the class + * @param byteCode The byte code for the class + * @throws ClassNotFoundException if the class can't be loaded + * @return A {@see Class} object representing the class + */ + public static Class load(String className, byte[] byteCode) throws ClassNotFoundException { + return new ByteCodeLoader(className, byteCode).loadClass(className); + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/com/oracle/java/testlibrary/InMemoryJavaCompiler.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/com/oracle/java/testlibrary/InMemoryJavaCompiler.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.java.testlibrary; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; + +import java.net.URI; +import java.util.Arrays; + +import javax.tools.ForwardingJavaFileManager; +import javax.tools.ForwardingJavaFileManager; +import javax.tools.FileObject; +import javax.tools.JavaCompiler; +import javax.tools.JavaCompiler.CompilationTask; +import javax.tools.JavaFileManager; +import javax.tools.JavaFileObject; +import javax.tools.JavaFileObject.Kind; +import javax.tools.SimpleJavaFileObject; +import javax.tools.ToolProvider; + +/** + * {@code InMemoryJavaCompiler} can be used for compiling a {@link + * CharSequence} to a {@code byte[]}. + * + * The compiler will not use the file system at all, instead using a {@link + * ByteArrayOutputStream} for storing the byte code. For the source code, any + * kind of {@link CharSequence} can be used, e.g. {@link String}, {@link + * StringBuffer} or {@link StringBuilder}. + * + * The {@code InMemoryCompiler} can easily be used together with a {@code + * ByteClassLoader} to easily compile and load source code in a {@link String}: + * + *
+ * {@code
+ * import com.oracle.java.testlibrary.InMemoryJavaCompiler;
+ * import com.oracle.java.testlibrary.ByteClassLoader;
+ *
+ * class Example {
+ *     public static void main(String[] args) {
+ *         String className = "Foo";
+ *         String sourceCode = "public class " + className + " {" +
+ *                             "    public void bar() {" +
+ *                             "        System.out.println("Hello from bar!");" +
+ *                             "    }" +
+ *                             "}";
+ *         byte[] byteCode = InMemoryJavaCompiler.compile(className, sourceCode);
+ *         Class fooClass = ByteClassLoader.load(className, byteCode);
+ *     }
+ * }
+ * }
+ * 
+ */ +public class InMemoryJavaCompiler { + private static class MemoryJavaFileObject extends SimpleJavaFileObject { + private final String className; + private final CharSequence sourceCode; + private final ByteArrayOutputStream byteCode; + + public MemoryJavaFileObject(String className, CharSequence sourceCode) { + super(URI.create("string:///" + className.replace('.','/') + Kind.SOURCE.extension), Kind.SOURCE); + this.className = className; + this.sourceCode = sourceCode; + this.byteCode = new ByteArrayOutputStream(); + } + + @Override + public CharSequence getCharContent(boolean ignoreEncodingErrors) { + return sourceCode; + } + + @Override + public OutputStream openOutputStream() throws IOException { + return byteCode; + } + + public byte[] getByteCode() { + return byteCode.toByteArray(); + } + + public String getClassName() { + return className; + } + } + + private static class FileManagerWrapper extends ForwardingJavaFileManager { + private MemoryJavaFileObject file; + + public FileManagerWrapper(MemoryJavaFileObject file) { + super(getCompiler().getStandardFileManager(null, null, null)); + this.file = file; + } + + @Override + public JavaFileObject getJavaFileForOutput(Location location, String className, + Kind kind, FileObject sibling) + throws IOException { + if (!file.getClassName().equals(className)) { + throw new IOException("Expected class with name " + file.getClassName() + + ", but got " + className); + } + return file; + } + } + + /** + * Compiles the class with the given name and source code. + * + * @param className The name of the class + * @param sourceCode The source code for the class with name {@code className} + * @throws RuntimeException if the compilation did not succeed + * @return The resulting byte code from the compilation + */ + public static byte[] compile(String className, CharSequence sourceCode) { + MemoryJavaFileObject file = new MemoryJavaFileObject(className, sourceCode); + CompilationTask task = getCompilationTask(file); + + if(!task.call()) { + throw new RuntimeException("Could not compile " + className + " with source code " + sourceCode); + } + + return file.getByteCode(); + } + + private static JavaCompiler getCompiler() { + return ToolProvider.getSystemJavaCompiler(); + } + + private static CompilationTask getCompilationTask(MemoryJavaFileObject file) { + return getCompiler().getTask(null, new FileManagerWrapper(file), null, null, null, Arrays.asList(file)); + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/com/oracle/java/testlibrary/InputArguments.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/com/oracle/java/testlibrary/InputArguments.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.java.testlibrary; + +import java.lang.management.RuntimeMXBean; +import java.lang.management.ManagementFactory; +import java.util.List; + +/** + * This class provides access to the input arguments to the VM. + */ +public class InputArguments { + private static final List args; + + static { + RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean(); + args = runtimeMxBean.getInputArguments(); + } + + /** + * Returns true if {@code arg} is an input argument to the VM. + * + * This is useful for checking boolean flags such as -XX:+UseSerialGC or + * -XX:-UsePerfData. + * + * @param arg The name of the argument. + * @return {@code true} if the given argument is an input argument, + * otherwise {@code false}. + */ + public static boolean contains(String arg) { + return args.contains(arg); + } + + /** + * Returns true if {@code prefix} is the start of an input argument to the + * VM. + * + * This is useful for checking if flags describing a quantity, such as + * -XX:+MaxMetaspaceSize=100m, is set without having to know the quantity. + * To check if the flag -XX:MaxMetaspaceSize is set, use + * {@code InputArguments.containsPrefix("-XX:MaxMetaspaceSize")}. + * + * @param prefix The start of the argument. + * @return {@code true} if the given argument is the start of an input + * argument, otherwise {@code false}. + */ + public static boolean containsPrefix(String prefix) { + for (String arg : args) { + if (arg.startsWith(prefix)) { + return true; + } + } + return false; + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/com/oracle/java/testlibrary/JDKToolFinder.java --- a/test/testlibrary/com/oracle/java/testlibrary/JDKToolFinder.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/testlibrary/com/oracle/java/testlibrary/JDKToolFinder.java Fri Oct 11 10:38:03 2013 +0200 @@ -23,28 +23,84 @@ package com.oracle.java.testlibrary; -import java.io.File; +import java.io.FileNotFoundException; +import java.nio.file.Path; +import java.nio.file.Paths; public final class JDKToolFinder { - private JDKToolFinder() { - } + private JDKToolFinder() { + } + + /** + * Returns the full path to an executable in jdk/bin based on System + * property {@code test.jdk} or {@code compile.jdk} (both are set by the jtreg test suite) + * + * @return Full path to an executable in jdk/bin + */ + public static String getJDKTool(String tool) { - /** - * Returns the full path to an executable in jdk/bin based on System property - * test.jdk (set by jtreg test suite) - * - * @return Full path to an executable in jdk/bin - */ - public static String getJDKTool(String tool) { - String binPath = System.getProperty("test.jdk"); - if (binPath == null) { - throw new RuntimeException("System property 'test.jdk' not set. This property is normally set by jtreg. " - + "When running test separately, set this property using '-Dtest.jdk=/path/to/jdk'."); + // First try to find the executable in test.jdk + try { + return getTool(tool, "test.jdk"); + } catch (FileNotFoundException e) { + + } + + // Now see if it's available in compile.jdk + try { + return getTool(tool, "compile.jdk"); + } catch (FileNotFoundException e) { + throw new RuntimeException("Failed to find " + tool + + ", looked in test.jdk (" + System.getProperty("test.jdk") + + ") and compile.jdk (" + System.getProperty("compile.jdk") + ")"); + } } - binPath += File.separatorChar + "bin" + File.separatorChar + tool; + /** + * Returns the full path to an executable in jdk/bin based on System + * property {@code compile.jdk} + * + * @return Full path to an executable in jdk/bin + */ + public static String getCompileJDKTool(String tool) { + try { + return getTool(tool, "compile.jdk"); + } catch (FileNotFoundException e) { + throw new RuntimeException(e); + } + } - return binPath; - } + /** + * Returns the full path to an executable in jdk/bin based on System + * property {@code test.jdk} + * + * @return Full path to an executable in jdk/bin + */ + public static String getTestJDKTool(String tool) { + try { + return getTool(tool, "test.jdk"); + } catch (FileNotFoundException e) { + throw new RuntimeException(e); + } + } + + private static String getTool(String tool, String property) throws FileNotFoundException { + String jdkPath = System.getProperty(property); + + if (jdkPath == null) { + throw new RuntimeException( + "System property '" + property + "' not set. This property is normally set by jtreg. " + + "When running test separately, set this property using '-D" + property + "=/path/to/jdk'."); + } + + Path toolName = Paths.get("bin", tool + (Platform.isWindows() ? ".exe" : "")); + + Path jdkTool = Paths.get(jdkPath, toolName.toString()); + if (!jdkTool.toFile().exists()) { + throw new FileNotFoundException("Could not find file " + jdkTool.toAbsolutePath()); + } + + return jdkTool.toAbsolutePath().toString(); + } } diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java --- a/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java Fri Oct 11 10:38:03 2013 +0200 @@ -211,13 +211,13 @@ if (matcher.find()) { reportDiagnosticSummary(); throw new RuntimeException("'" + pattern - + "' found in stdout \n"); + + "' found in stdout: '" + matcher.group() + "' \n"); } matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr); if (matcher.find()) { reportDiagnosticSummary(); throw new RuntimeException("'" + pattern - + "' found in stderr \n"); + + "' found in stderr: '" + matcher.group() + "' \n"); } } @@ -254,6 +254,37 @@ } /** + * Get the captured group of the first string matching the pattern. + * stderr is searched before stdout. + * + * @param pattern The multi-line pattern to match + * @param group The group to capture + * @return The matched string or null if no match was found + */ + public String firstMatch(String pattern, int group) { + Matcher stderrMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr); + Matcher stdoutMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stdout); + if (stderrMatcher.find()) { + return stderrMatcher.group(group); + } + if (stdoutMatcher.find()) { + return stdoutMatcher.group(group); + } + return null; + } + + /** + * Get the first string matching the pattern. + * stderr is searched before stdout. + * + * @param pattern The multi-line pattern to match + * @return The matched string or null if no match was found + */ + public String firstMatch(String pattern) { + return firstMatch(pattern, 0); + } + + /** * Verify the exit value of the process * * @param expectedExitValue Expected exit value from process diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/com/oracle/java/testlibrary/PerfCounter.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/com/oracle/java/testlibrary/PerfCounter.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.java.testlibrary; + +import sun.jvmstat.monitor.Monitor; + +/** + * Represents a performance counter in the JVM. + * + * See http://openjdk.java.net/groups/hotspot/docs/Serviceability.html#bjvmstat + * for more details about performance counters. + */ +public class PerfCounter { + private final Monitor monitor; + private final String name; + + PerfCounter(Monitor monitor, String name) { + this.monitor = monitor; + this.name = name; + } + + /** + * Returns the value of this performance counter as a long. + * + * @return The long value of this performance counter + * @throws RuntimeException If the value of the performance counter isn't a long + */ + public long longValue() { + Object value = monitor.getValue(); + if (value instanceof Long) { + return ((Long) value).longValue(); + } + throw new RuntimeException("Expected " + monitor.getName() + " to have a long value"); + } + + /** + * Returns the name of the performance counter. + * + * @return The name of the performance counter. + */ + public String getName() { + return name; + } + + @Override + public String toString() { + return name; + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/com/oracle/java/testlibrary/PerfCounters.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/com/oracle/java/testlibrary/PerfCounters.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.java.testlibrary; + +import sun.jvmstat.monitor.Monitor; +import sun.jvmstat.monitor.MonitorException; +import sun.jvmstat.monitor.MonitoredHost; +import sun.jvmstat.monitor.MonitoredVm; +import sun.jvmstat.monitor.VmIdentifier; + +/** + * PerfCounters can be used to get a performance counter from the currently + * executing VM. + * + * Throws a runtime exception if an error occurs while communicating with the + * currently executing VM. + */ +public class PerfCounters { + private final static MonitoredVm vm; + + static { + try { + String pid = Integer.toString(ProcessTools.getProcessId()); + VmIdentifier vmId = new VmIdentifier(pid); + MonitoredHost host = MonitoredHost.getMonitoredHost(vmId); + vm = host.getMonitoredVm(vmId); + } catch (Exception e) { + throw new RuntimeException("Could not connect to the VM"); + } + } + + /** + * Returns the performance counter with the given name. + * + * @param name The name of the performance counter. + * @throws IllegalArgumentException If no counter with the given name exists. + * @throws MonitorException If an error occurs while communicating with the VM. + * @return The performance counter with the given name. + */ + public static PerfCounter findByName(String name) + throws MonitorException, IllegalArgumentException { + Monitor m = vm.findByName(name); + if (m == null) { + throw new IllegalArgumentException("Did not find a performance counter with name " + name); + } + return new PerfCounter(m, name); + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/com/oracle/java/testlibrary/Platform.java --- a/test/testlibrary/com/oracle/java/testlibrary/Platform.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/testlibrary/com/oracle/java/testlibrary/Platform.java Fri Oct 11 10:38:03 2013 +0200 @@ -24,50 +24,80 @@ package com.oracle.java.testlibrary; public class Platform { - private static final String osName = System.getProperty("os.name"); - private static final String dataModel = System.getProperty("sun.arch.data.model"); - private static final String vmVersion = System.getProperty("java.vm.version"); - private static final String osArch = System.getProperty("os.arch"); + private static final String osName = System.getProperty("os.name"); + private static final String dataModel = System.getProperty("sun.arch.data.model"); + private static final String vmVersion = System.getProperty("java.vm.version"); + private static final String osArch = System.getProperty("os.arch"); - public static boolean is64bit() { - return dataModel.equals("64"); - } + public static boolean is32bit() { + return dataModel.equals("32"); + } + + public static boolean is64bit() { + return dataModel.equals("64"); + } + + public static boolean isSolaris() { + return isOs("sunos"); + } - public static boolean isSolaris() { - return osName.toLowerCase().startsWith("sunos"); - } + public static boolean isWindows() { + return isOs("win"); + } + + public static boolean isOSX() { + return isOs("mac"); + } - public static boolean isWindows() { - return osName.toLowerCase().startsWith("win"); - } + public static boolean isLinux() { + return isOs("linux"); + } - public static boolean isOSX() { - return osName.toLowerCase().startsWith("mac"); - } + private static boolean isOs(String osname) { + return osName.toLowerCase().startsWith(osname.toLowerCase()); + } + + public static String getOsName() { + return osName; + } - public static boolean isLinux() { - return osName.toLowerCase().startsWith("linux"); - } + public static boolean isDebugBuild() { + return vmVersion.toLowerCase().contains("debug"); + } + + public static String getVMVersion() { + return vmVersion; + } - public static String getOsName() { - return osName; - } + // Returns true for sparc and sparcv9. + public static boolean isSparc() { + return isArch("sparc"); + } - public static boolean isDebugBuild() { - return vmVersion.toLowerCase().contains("debug"); - } + public static boolean isARM() { + return isArch("arm"); + } - public static String getVMVersion() { - return vmVersion; - } + public static boolean isPPC() { + return isArch("ppc"); + } + + public static boolean isX86() { + // On Linux it's 'i386', Windows 'x86' + return (isArch("i386") || isArch("x86")); + } - // Returns true for sparc and sparcv9. - public static boolean isSparc() { - return osArch.toLowerCase().startsWith("sparc"); - } + public static boolean isX64() { + // On OSX it's 'x86_64' and on other (Linux, Windows and Solaris) platforms it's 'amd64' + return (isArch("amd64") || isArch("x86_64")); + } - public static String getOsArch() { - return osArch; - } + private static boolean isArch(String archname) { + return osArch.toLowerCase().startsWith(archname.toLowerCase()); + } + + public static String getOsArch() { + return osArch; + } } diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/Makefile --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/Makefile Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,73 @@ +# +# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# + +ifneq "x$(ALT_BOOTDIR)" "x" + BOOTDIR := $(ALT_BOOTDIR) +endif + +ifeq "x$(BOOTDIR)" "x" + JDK_HOME := $(shell dirname $(shell which java))/.. +else + JDK_HOME := $(BOOTDIR) +endif + +SRC_DIR = src +BUILD_DIR = build +OUTPUT_DIR = $(BUILD_DIR)/classes +WHITEBOX_DIR = ../whitebox + +JAVAC = $(JDK_HOME)/bin/javac +JAR = $(JDK_HOME)/bin/jar + +SRC_FILES = $(shell find $(SRC_DIR) -name '*.java') + +MAIN_CLASS = sun.hotspot.tools.ctw.CompileTheWorld + +.PHONY: clean cleantmp + +all: ctw.jar cleantmp + +clean: cleantmp + @rm -rf ctw.jar wb.jar + +cleantmp: + @rm -rf filelist manifest.mf + @rm -rf $(BUILD_DIR) + +ctw.jar: filelist wb.jar manifest.mf + @mkdir -p $(OUTPUT_DIR) + $(JAVAC) -sourcepath $(SRC_DIR) -d $(OUTPUT_DIR) -cp wb.jar @filelist + $(JAR) cfm ctw.jar manifest.mf -C $(OUTPUT_DIR) . + +wb.jar: + make -C ${WHITEBOX_DIR} wb.jar + cp ${WHITEBOX_DIR}/wb.jar ./ + make -C ${WHITEBOX_DIR} clean + +filelist: $(SRC_FILES) + @rm -f $@ + @echo $(SRC_FILES) > $@ + +manifest.mf: + @echo "Main-Class: ${MAIN_CLASS}" > manifest.mf diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/README --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/README Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,93 @@ +# +# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# + +DESCRIPTION + +This is replacement for CompileTheWorld (CTW) written on java. Its purpose is +to make possible the use of CTW in product builds. + +DEPENDENCES + +The tool depends on Whitebox API. Assumed, that the sources of whitebox are +located in '../whitebox' directory. + +BUILDING + +Simple way to build, just type 'make'. + +Makefile uses environment variables 'ALT_BOOTDIR', 'BOOTDIR' as root-dir of jdk +that will be used for compilation and creating jar. + +On successful building 'ctw.jar' will be created. + +RUNNING + +Since the tool uses WhiteBox API, options 'UnlockDiagnosticVMOptions' and +'WhiteBoxAPI' should be specified, and 'wb.jar' should be added to +boot-classpath: + $ java -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:wb.jar -jar ctw.jar + +Arguments can be paths to '.jar, '.zip', '.lst' files or directories with +classes, that define which classes will be compiled: + - '.jar', '.zip' files and directories are interpreted like in classpath +(including '/*' syntax) + - '.lst' files -- files with class names (in java notation) to compile. +CTW will try to find these classes with default class loader, so they should +be located in classpath. + +Without arguments it would work as old version of CTW: all classes in +boot-classpath will be compiled, excluding classes in 'rt.jar' if 'rt.jar' isn't +first in boot-classpath. + +Due CTW's flags also are not available in product builds, the tool uses +properties with the same names: + - 'CompileTheWorldPreloadClasses' -- type:boolean, default:true, description: +Preload all classes used by a class before start loading + - 'CompileTheWorldStartAt' -- type:long, default:1, description: First class +to consider + - 'CompileTheWorldStopAt' -- type:long, default:Long.MAX_VALUE, description: +Last class to consider + +Also it uses additional properties: + - 'sun.hotspot.tools.ctw.verbose' -- type:boolean, default:false, +description: Verbose output, adds additional information about compilation + - 'sun.hotspot.tools.ctw.logfile' -- type:string, default:null, +description: Path to logfile, if it's null, cout will be used. + +EXAMPLES + +compile classes from 'rt.jar': + $ java -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:wb.jar -jar ctw.jar ${JAVA_HOME}/jre/lib/rt.jar + +compile classes from all '.jar' in './testjars' directory: + $ java -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:wb.jar -jar ctw.jar ./testjars/* + +compile classes from './build/classes' directory: + $ java -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:wb.jar -jar ctw.jar ./build/classes + +compile only java.lang.String, java.lang.Object classes: + $ echo java.lang.String > classes.lst + $ echo java.lang.Object >> classes.lst + $ java -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:wb.jar -jar ctw.jar classes.lst + diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/src/sun/hotspot/tools/ctw/ClassPathDirEntry.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/ClassPathDirEntry.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.hotspot.tools.ctw; + +import java.net.MalformedURLException; +import java.net.URL; +import java.net.URLClassLoader; +import java.util.Set; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.concurrent.Executor; + +import java.io.*; +import java.nio.file.*; +import java.nio.file.attribute.*; + +/** + * Handler for dirs containing classes to compile. + */ +public class ClassPathDirEntry extends PathHandler { + + private final int rootLength = root.toString().length(); + + public ClassPathDirEntry(Path root, Executor executor) { + super(root, executor); + try { + URL url = root.toUri().toURL(); + setLoader(new URLClassLoader(new URL[]{url})); + } catch (MalformedURLException e) { + e.printStackTrace(); + } + } + + @Override + public void process() { + System.out.println("# dir: " + root); + if (!Files.exists(root)) { + return; + } + try { + Files.walkFileTree(root, EnumSet.of(FileVisitOption.FOLLOW_LINKS), + Integer.MAX_VALUE, new CompileFileVisitor()); + } catch (IOException ioe) { + ioe.printStackTrace(); + } + } + + private void processFile(Path file) { + if (Utils.isClassFile(file.toString())) { + processClass(pathToClassName(file)); + } + } + + private String pathToClassName(Path file) { + String fileString; + if (root == file) { + fileString = file.normalize().toString(); + } else { + fileString = file.normalize().toString().substring(rootLength + 1); + } + return Utils.fileNameToClassName(fileString); + } + + private class CompileFileVisitor extends SimpleFileVisitor { + + private final Set ready = new HashSet<>(); + + @Override + public FileVisitResult preVisitDirectory(Path dir, + BasicFileAttributes attrs) throws IOException { + if (ready.contains(dir)) { + return FileVisitResult.SKIP_SUBTREE; + } + ready.add(dir); + return super.preVisitDirectory(dir, attrs); + } + + @Override + public FileVisitResult visitFile(Path file, + BasicFileAttributes attrs) throws IOException { + if (!ready.contains(file)) { + processFile(file); + } + return isFinished() ? FileVisitResult.TERMINATE + : FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFileFailed(Path file, + IOException exc) throws IOException { + return FileVisitResult.CONTINUE; + } + } +} + diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/src/sun/hotspot/tools/ctw/ClassPathJarEntry.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/ClassPathJarEntry.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.hotspot.tools.ctw; + +import java.net.MalformedURLException; +import java.net.URL; +import java.net.URLClassLoader; +import java.util.*; +import java.util.jar.*; +import java.util.concurrent.Executor; + +import java.io.*; +import java.nio.file.*; + +/** + * Handler for jar-files containing classes to compile. + */ +public class ClassPathJarEntry extends PathHandler { + + public ClassPathJarEntry(Path root, Executor executor) { + super(root, executor); + try { + URL url = root.toUri().toURL(); + setLoader(new URLClassLoader(new URL[]{url})); + } catch (MalformedURLException e) { + e.printStackTrace(); + } + } + + @Override + public void process() { + System.out.println("# jar: " + root); + if (!Files.exists(root)) { + return; + } + try { + JarFile jarFile = new JarFile(root.toFile()); + JarEntry entry; + for (Enumeration e = jarFile.entries(); + e.hasMoreElements(); ) { + entry = e.nextElement(); + processJarEntry(entry); + if (isFinished()) { + return; + } + } + } catch (IOException ioe) { + ioe.printStackTrace(); + } + } + + private void processJarEntry(JarEntry entry) { + String filename = entry.getName(); + if (Utils.isClassFile(filename)) { + processClass(Utils.fileNameToClassName(filename)); + } + } +} + diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/src/sun/hotspot/tools/ctw/ClassPathJarInDirEntry.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/ClassPathJarInDirEntry.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.hotspot.tools.ctw; + +import java.io.IOException; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.concurrent.Executor; + +/** + * Handler for dirs containing jar-files with classes to compile. + */ +public class ClassPathJarInDirEntry extends PathHandler { + + public ClassPathJarInDirEntry(Path root, Executor executor) { + super(root, executor); + } + + @Override + public void process() { + System.out.println("# jar_in_dir: " + root); + if (!Files.exists(root)) { + return; + } + try (DirectoryStream ds + = Files.newDirectoryStream(root, "*.jar")) { + for (Path p : ds) { + new ClassPathJarEntry(p, executor).process(); + if (isFinished()) { + return; + } + } + } catch (IOException ioe) { + ioe.printStackTrace(); + } + } +} + diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/src/sun/hotspot/tools/ctw/ClassesListInFile.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/ClassesListInFile.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.hotspot.tools.ctw; + +import java.io.BufferedReader; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.concurrent.Executor; + +/** + * Handler for files containing a list of classes to compile. + */ +public class ClassesListInFile extends PathHandler { + public ClassesListInFile(Path root, Executor executor) { + super(root, executor); + } + + @Override + public void process() { + System.out.println("# list: " + root); + if (!Files.exists(root)) { + return; + } + try { + try (BufferedReader reader = Files.newBufferedReader(root, + StandardCharsets.UTF_8)) { + String line; + while (!isFinished() && ((line = reader.readLine()) != null)) { + processClass(line); + } + } + } catch (IOException e) { + e.printStackTrace(); + } + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/src/sun/hotspot/tools/ctw/CompileTheWorld.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/CompileTheWorld.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.hotspot.tools.ctw; + +import sun.management.ManagementFactoryHelper; + +import java.io.*; +import java.nio.file.Files; +import java.nio.file.Paths; + +import java.util.List; +import java.util.concurrent.*; + +public class CompileTheWorld { + /** + * Entry point. Compiles classes in {@code args}, or all classes in + * boot-classpath if args is empty + * + * @param args paths to jar/zip, dir contains classes, or to .lst file + * contains list of classes to compile + */ + public static void main(String[] args) { + String logfile = Utils.LOG_FILE; + PrintStream os = null; + if (logfile != null) { + try { + os = new PrintStream(Files.newOutputStream(Paths.get(logfile))); + } catch (IOException io) { + } + } + if (os != null) { + System.setOut(os); + } + + try { + try { + if (ManagementFactoryHelper.getCompilationMXBean() == null) { + throw new RuntimeException( + "CTW can not work in interpreted mode"); + } + } catch (java.lang.NoClassDefFoundError e) { + // compact1, compact2 support + } + String[] paths = args; + boolean skipRtJar = false; + if (args.length == 0) { + paths = getDefaultPaths(); + skipRtJar = true; + } + ExecutorService executor = createExecutor(); + long start = System.currentTimeMillis(); + try { + String path; + for (int i = 0, n = paths.length; i < n + && !PathHandler.isFinished(); ++i) { + path = paths[i]; + if (skipRtJar && i > 0 && isRtJar(path)) { + // rt.jar is not first, so skip it + continue; + } + PathHandler.create(path, executor).process(); + } + } finally { + await(executor); + } + System.out.printf("Done (%d classes, %d methods, %d ms)%n", + Compiler.getClassCount(), + Compiler.getMethodCount(), + System.currentTimeMillis() - start); + } finally { + if (os != null) { + os.close(); + } + } + } + + private static ExecutorService createExecutor() { + final int threadsCount = Math.min( + Runtime.getRuntime().availableProcessors(), + Utils.CI_COMPILER_COUNT); + ExecutorService result; + if (threadsCount > 1) { + result = new ThreadPoolExecutor(threadsCount, threadsCount, + /* keepAliveTime */ 0L, TimeUnit.MILLISECONDS, + new ArrayBlockingQueue<>(threadsCount), + new ThreadPoolExecutor.CallerRunsPolicy()); + } else { + result = new CurrentThreadExecutor(); + } + return result; + } + + private static String[] getDefaultPaths() { + String property = System.getProperty("sun.boot.class.path"); + System.out.println( + "# use 'sun.boot.class.path' as args: " + property); + return Utils.PATH_SEPARATOR.split(property); + } + + private static void await(ExecutorService executor) { + executor.shutdown(); + while (!executor.isTerminated()) { + try { + executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + break; + } + } + } + + private static boolean isRtJar(String path) { + return Utils.endsWithIgnoreCase(path, File.separator + "rt.jar"); + } + + private static class CurrentThreadExecutor extends AbstractExecutorService { + private boolean isShutdown; + + @Override + public void shutdown() { + this.isShutdown = true; + } + + @Override + public List shutdownNow() { + return null; + } + + @Override + public boolean isShutdown() { + return isShutdown; + } + + @Override + public boolean isTerminated() { + return isShutdown; + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) + throws InterruptedException { + return isShutdown; + } + + @Override + public void execute(Runnable command) { + command.run(); + } + } +} + diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/src/sun/hotspot/tools/ctw/Compiler.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/Compiler.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.hotspot.tools.ctw; + +import sun.hotspot.WhiteBox; +import sun.misc.SharedSecrets; +import sun.reflect.ConstantPool; + +import java.lang.reflect.Executable; + +import java.util.Objects; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Provide method to compile whole class. + * Also contains compiled methods and classes counters. + */ +public class Compiler { + private Compiler() { } + private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); + private static final AtomicLong CLASS_COUNT = new AtomicLong(0L); + private static final AtomicLong METHOD_COUNT = new AtomicLong(0L); + private static volatile boolean CLASSES_LIMIT_REACHED = false; + + /** + * @return count of processed classes + */ + public static long getClassCount() { + return CLASS_COUNT.get(); + } + + /** + * @return count of processed methods + */ + public static long getMethodCount() { + return METHOD_COUNT.get(); + } + + /** + * @return {@code true} if classes limit is reached + */ + public static boolean isLimitReached() { + return CLASSES_LIMIT_REACHED; + } + + /** + * Compiles all methods and constructors. + * + * @param aClass class to compile + * @param executor executor used for compile task invocation + * @throws NullPointerException if {@code class} or {@code executor} + * is {@code null} + */ + public static void compileClass(Class aClass, Executor executor) { + Objects.requireNonNull(aClass); + Objects.requireNonNull(executor); + long id = CLASS_COUNT.incrementAndGet(); + if (id > Utils.COMPILE_THE_WORLD_STOP_AT) { + CLASS_COUNT.decrementAndGet(); + CLASSES_LIMIT_REACHED = true; + return; + } + + if (id >= Utils.COMPILE_THE_WORLD_START_AT) { + String name = aClass.getName(); + try { + System.out.printf("[%d]\t%s%n", id, name); + ConstantPool constantPool = SharedSecrets.getJavaLangAccess(). + getConstantPool(aClass); + if (Utils.COMPILE_THE_WORLD_PRELOAD_CLASSES) { + preloadClasses(name, id, constantPool); + } + long methodCount = 0; + for (Executable e : aClass.getDeclaredConstructors()) { + ++methodCount; + executor.execute(new CompileMethodCommand(id, name, e)); + } + for (Executable e : aClass.getDeclaredMethods()) { + ++methodCount; + executor.execute(new CompileMethodCommand(id, name, e)); + } + METHOD_COUNT.addAndGet(methodCount); + + if (Utils.DEOPTIMIZE_ALL_CLASSES_RATE > 0 + && (id % Utils.DEOPTIMIZE_ALL_CLASSES_RATE == 0)) { + WHITE_BOX.deoptimizeAll(); + } + } catch (Throwable t) { + System.out.printf("[%d]\t%s\tskipping %s%n", id, name, t); + t.printStackTrace(); + } + } + } + + private static void preloadClasses(String className, long id, + ConstantPool constantPool) { + try { + for (int i = 0, n = constantPool.getSize(); i < n; ++i) { + try { + constantPool.getClassAt(i); + } catch (IllegalArgumentException ignore) { + } + } + } catch (Throwable t) { + System.out.printf("[%d]\t%s\tpreloading failed : %s%n", id, + className, t); + } + } + + + + /** + * Compilation of method. + * Will compile method on all available comp levels. + */ + private static class CompileMethodCommand implements Runnable { + private final long classId; + private final String className; + private final Executable method; + + /** + * @param classId id of class + * @param className name of class + * @param method compiled for compilation + */ + public CompileMethodCommand(long classId, String className, + Executable method) { + this.classId = classId; + this.className = className; + this.method = method; + } + + @Override + public final void run() { + int compLevel = Utils.INITIAL_COMP_LEVEL; + if (Utils.TIERED_COMPILATION) { + for (int i = compLevel; i <= Utils.TIERED_STOP_AT_LEVEL; ++i) { + WHITE_BOX.deoptimizeMethod(method); + compileMethod(method, i); + } + } else { + compileMethod(method, compLevel); + } + } + + private void waitCompilation() { + if (!Utils.BACKGROUND_COMPILATION) { + return; + } + final Object obj = new Object(); + synchronized (obj) { + for (int i = 0; + i < 10 && WHITE_BOX.isMethodQueuedForCompilation(method); + ++i) { + try { + obj.wait(1000); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + } + } + + private void compileMethod(Executable method, int compLevel) { + if (WHITE_BOX.isMethodCompilable(method, compLevel)) { + try { + WHITE_BOX.enqueueMethodForCompilation(method, compLevel); + waitCompilation(); + int tmp = WHITE_BOX.getMethodCompilationLevel(method); + if (tmp != compLevel) { + logMethod(method, "compilation level = " + tmp + + ", but not " + compLevel); + } else if (Utils.IS_VERBOSE) { + logMethod(method, "compilation level = " + tmp + ". OK"); + } + } catch (Throwable t) { + logMethod(method, "error on compile at " + compLevel + + " level"); + t.printStackTrace(); + } + } else if (Utils.IS_VERBOSE) { + logMethod(method, "not compilable at " + compLevel); + } + } + + private void logMethod(Executable method, String message) { + StringBuilder builder = new StringBuilder("["); + builder.append(classId); + builder.append("]\t"); + builder.append(className); + builder.append("::"); + builder.append(method.getName()); + builder.append('('); + Class[] params = method.getParameterTypes(); + for (int i = 0, n = params.length - 1; i < n; ++i) { + builder.append(params[i].getName()); + builder.append(", "); + } + if (params.length != 0) { + builder.append(params[params.length - 1].getName()); + } + builder.append(')'); + if (message != null) { + builder.append('\t'); + builder.append(message); + } + System.err.println(builder); + } + } + +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/src/sun/hotspot/tools/ctw/PathHandler.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/PathHandler.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.hotspot.tools.ctw; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.io.File; + +import java.util.Objects; +import java.util.regex.Pattern; +import java.util.regex.Matcher; +import java.util.concurrent.Executor; + +/** + * Abstract handler for path. + * Concrete subclasses should implement method {@link #process()}. + */ +public abstract class PathHandler { + private static final Pattern JAR_IN_DIR_PATTERN + = Pattern.compile("^(.*[/\\\\])?\\*$"); + protected final Path root; + protected final Executor executor; + private ClassLoader loader; + + /** + * @param root root path to process + * @param executor executor used for process task invocation + * @throws NullPointerException if {@code root} or {@code executor} is + * {@code null} + */ + protected PathHandler(Path root, Executor executor) { + Objects.requireNonNull(root); + Objects.requireNonNull(executor); + this.root = root.normalize(); + this.executor = executor; + this.loader = ClassLoader.getSystemClassLoader(); + } + + /** + * Factory method. Construct concrete handler in depends from {@code path}. + * + * @param path the path to process + * @param executor executor used for compile task invocation + * @throws NullPointerException if {@code path} or {@code executor} is + * {@code null} + */ + public static PathHandler create(String path, Executor executor) { + Objects.requireNonNull(path); + Objects.requireNonNull(executor); + Matcher matcher = JAR_IN_DIR_PATTERN.matcher(path); + if (matcher.matches()) { + path = matcher.group(1); + path = path.isEmpty() ? "." : path; + return new ClassPathJarInDirEntry(Paths.get(path), executor); + } else { + path = path.isEmpty() ? "." : path; + Path p = Paths.get(path); + if (isJarFile(p)) { + return new ClassPathJarEntry(p, executor); + } else if (isListFile(p)) { + return new ClassesListInFile(p, executor); + } else { + return new ClassPathDirEntry(p, executor); + } + } + } + + private static boolean isJarFile(Path path) { + if (Files.isRegularFile(path)) { + String name = path.toString(); + return Utils.endsWithIgnoreCase(name, ".zip") + || Utils.endsWithIgnoreCase(name, ".jar"); + } + return false; + } + + private static boolean isListFile(Path path) { + if (Files.isRegularFile(path)) { + String name = path.toString(); + return Utils.endsWithIgnoreCase(name, ".lst"); + } + return false; + } + + /** + * Processes all classes in specified path. + */ + public abstract void process(); + + /** + * Sets class loader, that will be used to define class at + * {@link #processClass(String)}. + * + * @param loader class loader + * @throws NullPointerException if {@code loader} is {@code null} + */ + protected final void setLoader(ClassLoader loader) { + Objects.requireNonNull(loader); + this.loader = loader; + } + + /** + * Processes specificed class. + * @param name fully qualified name of class to process + */ + protected final void processClass(String name) { + try { + Class aClass = Class.forName(name, true, loader); + Compiler.compileClass(aClass, executor); + } catch (ClassNotFoundException | LinkageError e) { + System.out.printf("Class %s loading failed : %s%n", name, + e.getMessage()); + } + } + + /** + * @return {@code true} if processing should be stopped + */ + public static boolean isFinished() { + return Compiler.isLimitReached(); + } + +} + diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/src/sun/hotspot/tools/ctw/Utils.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/Utils.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.hotspot.tools.ctw; + +import com.sun.management.HotSpotDiagnosticMXBean; +import sun.management.ManagementFactoryHelper; + +import java.io.File; +import java.util.regex.Pattern; + +/** + * Auxiliary methods. + */ +public class Utils { + /** + * Value of {@code -XX:CompileThreshold} + */ + public static final boolean TIERED_COMPILATION + = Boolean.parseBoolean(getVMOption("TieredCompilation", "false")); + /** + * Value of {@code -XX:BackgroundCompilation} + */ + public static final boolean BACKGROUND_COMPILATION + = Boolean.parseBoolean(getVMOption("BackgroundCompilation", + "false")); + /** + * Value of {@code -XX:TieredStopAtLevel} + */ + public static final int TIERED_STOP_AT_LEVEL; + /** + * Value of {@code -XX:CICompilerCount} + */ + public static final Integer CI_COMPILER_COUNT + = Integer.valueOf(getVMOption("CICompilerCount", "1")); + /** + * Initial compilation level. + */ + public static final int INITIAL_COMP_LEVEL; + /** + * Compiled path-separator regexp. + */ + public static final Pattern PATH_SEPARATOR = Pattern.compile( + File.pathSeparator, Pattern.LITERAL); + /** + * Value of {@code -DDeoptimizeAllClassesRate}. Frequency of + * {@code WB.deoptimizeAll()} invocation If it less that {@code 0}, + * {@code WB.deoptimizeAll()} will not be invoked. + */ + public static final int DEOPTIMIZE_ALL_CLASSES_RATE + = Integer.getInteger("DeoptimizeAllClassesRate", -1); + /** + * Value of {@code -DCompileTheWorldStopAt}. Last class to consider. + */ + public static final long COMPILE_THE_WORLD_STOP_AT + = Long.getLong("CompileTheWorldStopAt", Long.MAX_VALUE); + /** + * Value of {@code -DCompileTheWorldStartAt}. First class to consider. + */ + public static final long COMPILE_THE_WORLD_START_AT + = Long.getLong("CompileTheWorldStartAt", 1); + /** + * Value of {@code -DCompileTheWorldPreloadClasses}. Preload all classes + * used by a class before start loading. + */ + public static final boolean COMPILE_THE_WORLD_PRELOAD_CLASSES; + /** + * Value of {@code -Dsun.hotspot.tools.ctw.verbose}. Verbose output, + * adds additional information about compilation. + */ + public static final boolean IS_VERBOSE + = Boolean.getBoolean("sun.hotspot.tools.ctw.verbose"); + /** + * Value of {@code -Dsun.hotspot.tools.ctw.logfile}.Path to logfile, if + * it's null, cout will be used. + */ + public static final String LOG_FILE + = System.getProperty("sun.hotspot.tools.ctw.logfile"); + static { + if (Utils.TIERED_COMPILATION) { + INITIAL_COMP_LEVEL = 1; + } else { + String vmName = System.getProperty("java.vm.name"); + if (Utils.endsWithIgnoreCase(vmName, " Server VM")) { + INITIAL_COMP_LEVEL = 4; + } else if (Utils.endsWithIgnoreCase(vmName, " Client VM") + || Utils.endsWithIgnoreCase(vmName, " Minimal VM")) { + INITIAL_COMP_LEVEL = 1; + } else { + throw new RuntimeException("Unknown VM: " + vmName); + } + } + + TIERED_STOP_AT_LEVEL = Integer.parseInt(getVMOption("TieredStopAtLevel", + String.valueOf(INITIAL_COMP_LEVEL))); + } + + static { + String tmp = System.getProperty("CompileTheWorldPreloadClasses"); + if (tmp == null) { + COMPILE_THE_WORLD_PRELOAD_CLASSES = true; + } else { + COMPILE_THE_WORLD_PRELOAD_CLASSES = Boolean.parseBoolean(tmp); + } + } + + public static final String CLASSFILE_EXT = ".class"; + + private Utils() { + } + + /** + * Tests if the string ends with the suffix, ignoring case + * considerations + * + * @param string the tested string + * @param suffix the suffix + * @return {@code true} if {@code string} ends with the {@code suffix} + * @see String#endsWith(String) + */ + public static boolean endsWithIgnoreCase(String string, String suffix) { + if (string == null || suffix == null) { + return false; + } + int length = suffix.length(); + int toffset = string.length() - length; + if (toffset < 0) { + return false; + } + return string.regionMatches(true, toffset, suffix, 0, length); + } + + /** + * Returns value of VM option. + * + * @param name option's name + * @return value of option or {@code null}, if option doesn't exist + * @throws NullPointerException if name is null + */ + public static String getVMOption(String name) { + String result; + HotSpotDiagnosticMXBean diagnostic + = ManagementFactoryHelper.getDiagnosticMXBean(); + result = diagnostic.getVMOption(name).getValue(); + return result; + } + + /** + * Returns value of VM option or default value. + * + * @param name option's name + * @param defaultValue default value + * @return value of option or {@code defaultValue}, if option doesn't exist + * @throws NullPointerException if name is null + * @see #getVMOption(String) + */ + public static String getVMOption(String name, String defaultValue) { + String result; + try { + result = getVMOption(name); + } catch (NoClassDefFoundError e) { + // compact1, compact2 support + result = defaultValue; + } + return result == null ? defaultValue : result; + } + + /** + * Tests if the filename is valid filename for class file. + * + * @param filename tested filename + */ + public static boolean isClassFile(String filename) { + // If the filename has a period after removing '.class', it's not valid class file + return endsWithIgnoreCase(filename, CLASSFILE_EXT) + && (filename.indexOf('.') + == (filename.length() - CLASSFILE_EXT.length())); + } + + /** + * Converts the filename to classname. + * + * @param filename filename to convert + * @return corresponding classname. + * @throws AssertionError if filename isn't valid filename for class file - + * {@link #isClassFile(String)} + */ + public static String fileNameToClassName(String filename) { + assert isClassFile(filename); + return filename.substring(0, filename.length() - CLASSFILE_EXT.length()) + .replace(File.separatorChar, '.'); + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/test/Bar.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/test/Bar.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,5 @@ +public class Bar { + private static void staticMethod() { } + public void method() { } + protected Bar() { } +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/test/ClassesDirTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/test/ClassesDirTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test ClassesDirTest + * @bug 8012447 + * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src + * @build sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox ClassesDirTest Foo Bar + * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar + * @run main ClassesDirTest prepare + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld classes + * @run main ClassesDirTest check ctw.log + * @summary testing of CompileTheWorld :: classes in directory + * @author igor.ignatyev@oracle.com + */ + +import java.nio.file.Files; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; + +public class ClassesDirTest extends CtwTest { + private static final String[] SHOULD_CONTAIN + = {"# dir: classes", "Done (2 classes, 6 methods, "}; + + private ClassesDirTest() { + super(SHOULD_CONTAIN); + } + + public static void main(String[] args) throws Exception { + new ClassesDirTest().run(args); + } + + protected void prepare() throws Exception { + String path = "classes"; + Files.createDirectory(Paths.get(path)); + Files.move(Paths.get("Foo.class"), Paths.get(path, "Foo.class"), + StandardCopyOption.REPLACE_EXISTING); + Files.move(Paths.get("Bar.class"), Paths.get(path, "Bar.class"), + StandardCopyOption.REPLACE_EXISTING); + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/test/ClassesListTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/test/ClassesListTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test ClassesListTest + * @bug 8012447 + * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src + * @build sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox ClassesListTest Foo Bar + * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar + * @run main ClassesListTest prepare + * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld classes.lst + * @run main ClassesListTest check ctw.log + * @summary testing of CompileTheWorld :: list of classes in file + * @author igor.ignatyev@oracle.com + */ + +import java.nio.file.Files; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; + +public class ClassesListTest extends CtwTest { + private static final String[] SHOULD_CONTAIN + = {"# list: classes.lst", "Done (4 classes, "}; + + private ClassesListTest() { + super(SHOULD_CONTAIN); + } + + public static void main(String[] args) throws Exception { + new ClassesListTest().run(args); + } + + protected void prepare() throws Exception { + String path = "classes.lst"; + Files.copy(Paths.get(System.getProperty("test.src"), path), + Paths.get(path), StandardCopyOption.REPLACE_EXISTING); + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/test/CtwTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/test/CtwTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.util.List; +import java.util.Collections; +import java.util.ArrayList; + +import java.io.File; +import java.io.Writer; +import java.io.FileWriter; +import java.io.IOException; +import java.io.BufferedReader; + +import java.nio.file.Files; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.nio.charset.Charset; + +import com.oracle.java.testlibrary.JDKToolFinder; +import com.oracle.java.testlibrary.OutputAnalyzer; + +public abstract class CtwTest { + protected final String[] shouldContain; + protected CtwTest(String[] shouldContain) { + this.shouldContain = shouldContain; + } + + public void run(String[] args) throws Exception { + if (args.length == 0) { + throw new Error("args is empty"); + } + switch (args[0]) { + case "prepare": + prepare(); + break; + case "check": + check(args); + break; + default: + throw new Error("unregonized action -- " + args[0]); + } + } + + protected void prepare() throws Exception { } + + protected void check(String[] args) throws Exception { + if (args.length < 2) { + throw new Error("logfile isn't specified"); + } + String logfile = args[1]; + try (BufferedReader r = Files.newBufferedReader(Paths.get(logfile), + Charset.defaultCharset())) { + OutputAnalyzer output = readOutput(r); + for (String test : shouldContain) { + output.shouldContain(test); + } + } + } + + private static OutputAnalyzer readOutput(BufferedReader reader) + throws IOException { + StringBuilder builder = new StringBuilder(); + String eol = String.format("%n"); + String line; + + while ((line = reader.readLine()) != null) { + builder.append(line); + builder.append(eol); + } + return new OutputAnalyzer(builder.toString(), ""); + } + + protected void dump(OutputAnalyzer output, String name) { + try (Writer w = new FileWriter(name + ".out")) { + String s = output.getStdout(); + w.write(s, s.length(), 0); + } catch (IOException io) { + io.printStackTrace(); + } + try (Writer w = new FileWriter(name + ".err")) { + String s = output.getStderr(); + w.write(s, s.length(), 0); + } catch (IOException io) { + io.printStackTrace(); + } + } + + protected ProcessBuilder createJarProcessBuilder(String... command) + throws Exception { + String javapath = JDKToolFinder.getJDKTool("jar"); + + ArrayList args = new ArrayList<>(); + args.add(javapath); + Collections.addAll(args, command); + + return new ProcessBuilder(args.toArray(new String[args.size()])); + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/test/Foo.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/test/Foo.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,5 @@ +public class Foo { + private static void staticMethod() { } + public void method() { } + protected Foo() { } +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/test/JarDirTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/test/JarDirTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test JarDirTest + * @bug 8012447 + * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src + * @build sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox JarDirTest Foo Bar + * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar + * @run main JarDirTest prepare + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld jars/* + * @run main JarDirTest check ctw.log + * @summary testing of CompileTheWorld :: jars in directory + * @author igor.ignatyev@oracle.com + */ + +import java.io.File; +import java.nio.file.Files; +import java.nio.file.Paths; + +import com.oracle.java.testlibrary.OutputAnalyzer; + +public class JarDirTest extends CtwTest { + private static final String[] SHOULD_CONTAIN + = {"# jar_in_dir: jars", + "# jar: jars" + File.separator +"foo.jar", + "# jar: jars" + File.separator +"bar.jar", + "Done (4 classes, 12 methods, "}; + + private JarDirTest() { + super(SHOULD_CONTAIN); + } + + public static void main(String[] args) throws Exception { + new JarDirTest().run(args); + } + + protected void prepare() throws Exception { + String path = "jars"; + Files.createDirectory(Paths.get(path)); + + ProcessBuilder pb = createJarProcessBuilder("cf", "jars/foo.jar", + "Foo.class", "Bar.class"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + dump(output, "ctw-foo.jar"); + output.shouldHaveExitValue(0); + + pb = createJarProcessBuilder("cf", "jars/bar.jar", "Foo.class", + "Bar.class"); + output = new OutputAnalyzer(pb.start()); + dump(output, "ctw-bar.jar"); + output.shouldHaveExitValue(0); + } + +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/test/JarsTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/test/JarsTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test JarsTest + * @bug 8012447 + * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src + * @build sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox JarsTest Foo Bar + * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar + * @run main JarsTest prepare + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld foo.jar bar.jar + * @run main JarsTest check ctw.log + * @summary testing of CompileTheWorld :: jars + * @author igor.ignatyev@oracle.com + */ + +import com.oracle.java.testlibrary.OutputAnalyzer; + +public class JarsTest extends CtwTest { + private static final String[] SHOULD_CONTAIN + = {"# jar: foo.jar", "# jar: bar.jar", + "Done (4 classes, 12 methods, "}; + + private JarsTest() { + super(SHOULD_CONTAIN); + } + + public static void main(String[] args) throws Exception { + new JarsTest().run(args); + } + + protected void prepare() throws Exception { + ProcessBuilder pb = createJarProcessBuilder("cf", "foo.jar", + "Foo.class", "Bar.class"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + dump(output, "ctw-foo.jar"); + output.shouldHaveExitValue(0); + + pb = createJarProcessBuilder("cf", "bar.jar", "Foo.class", "Bar.class"); + output = new OutputAnalyzer(pb.start()); + dump(output, "ctw-bar.jar"); + output.shouldHaveExitValue(0); + } + +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/ctw/test/classes.lst --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/ctw/test/classes.lst Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,4 @@ +java.lang.String +java.lang.Object +Foo +Bar diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/whitebox/Makefile --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/whitebox/Makefile Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,63 @@ +# +# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# + +ifneq "x$(ALT_BOOTDIR)" "x" + BOOTDIR := $(ALT_BOOTDIR) +endif + +ifeq "x$(BOOTDIR)" "x" + JDK_HOME := $(shell dirname $(shell which java))/.. +else + JDK_HOME := $(BOOTDIR) +endif + +SRC_DIR = ./ +BUILD_DIR = build +OUTPUT_DIR = $(BUILD_DIR)/classes + +JAVAC = $(JDK_HOME)/bin/javac +JAR = $(JDK_HOME)/bin/jar + +SRC_FILES = $(shell find $(SRC_DIR) -name '*.java') + +.PHONY: filelist clean cleantmp + +all: wb.jar cleantmp + +wb.jar: filelist + @mkdir -p $(OUTPUT_DIR) + $(JAVAC) -sourcepath $(SRC_DIR) -d $(OUTPUT_DIR) -cp $(OUTPUT_DIR) @filelist + $(JAR) cf wb.jar -C $(OUTPUT_DIR) . + @rm -rf $(OUTPUT_DIR) + +filelist: $(SRC_FILES) + @rm -f $@ + @echo $(SRC_FILES) > $@ + +clean: cleantmp + @rm -rf wb.jar + +cleantmp: + @rm -rf filelist + @rm -rf $(BUILD_DIR) diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary/whitebox/sun/hotspot/WhiteBox.java --- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Thu Oct 10 18:26:22 2013 +0200 +++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Fri Oct 11 10:38:03 2013 +0200 @@ -61,6 +61,8 @@ registerNatives(); } + // Get the maximum heap size supporting COOPs + public native long getCompressedOopsMaxHeapSize(); // Arguments public native void printHeapSizes(); @@ -90,26 +92,49 @@ public native void NMTUncommitMemory(long addr, long size); public native void NMTReleaseMemory(long addr, long size); public native boolean NMTWaitForDataMerge(); + public native boolean NMTIsDetailSupported(); // Compiler public native void deoptimizeAll(); - public native boolean isMethodCompiled(Executable method); - public boolean isMethodCompilable(Executable method) { - return isMethodCompilable(method, -1 /*any*/); + public boolean isMethodCompiled(Executable method) { + return isMethodCompiled(method, false /*not osr*/); } - public native boolean isMethodCompilable(Executable method, int compLevel); + public native boolean isMethodCompiled(Executable method, boolean isOsr); + public boolean isMethodCompilable(Executable method) { + return isMethodCompilable(method, -1 /*any*/); + } + public boolean isMethodCompilable(Executable method, int compLevel) { + return isMethodCompilable(method, compLevel, false /*not osr*/); + } + public native boolean isMethodCompilable(Executable method, int compLevel, boolean isOsr); public native boolean isMethodQueuedForCompilation(Executable method); - public native int deoptimizeMethod(Executable method); - public void makeMethodNotCompilable(Executable method) { - makeMethodNotCompilable(method, -1 /*any*/); + public int deoptimizeMethod(Executable method) { + return deoptimizeMethod(method, false /*not osr*/); + } + public native int deoptimizeMethod(Executable method, boolean isOsr); + public void makeMethodNotCompilable(Executable method) { + makeMethodNotCompilable(method, -1 /*any*/); + } + public void makeMethodNotCompilable(Executable method, int compLevel) { + makeMethodNotCompilable(method, compLevel, false /*not osr*/); + } + public native void makeMethodNotCompilable(Executable method, int compLevel, boolean isOsr); + public int getMethodCompilationLevel(Executable method) { + return getMethodCompilationLevel(method, false /*not ost*/); } - public native void makeMethodNotCompilable(Executable method, int compLevel); - public native int getMethodCompilationLevel(Executable method); + public native int getMethodCompilationLevel(Executable method, boolean isOsr); public native boolean testSetDontInlineMethod(Executable method, boolean value); - public native int getCompileQueuesSize(); + public int getCompileQueuesSize() { + return getCompileQueueSize(-1 /*any*/); + } + public native int getCompileQueueSize(int compLevel); public native boolean testSetForceInlineMethod(Executable method, boolean value); - public native boolean enqueueMethodForCompilation(Executable method, int compLevel); + public boolean enqueueMethodForCompilation(Executable method, int compLevel) { + return enqueueMethodForCompilation(method, compLevel, -1 /*InvocationEntryBci*/); + } + public native boolean enqueueMethodForCompilation(Executable method, int compLevel, int entry_bci); public native void clearMethodState(Executable method); + public native int getMethodEntryBci(Executable method); // Intered strings public native boolean isInStringTable(String str); diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary_tests/AssertsTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary_tests/AssertsTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import static com.oracle.java.testlibrary.Asserts.*; + +/* @test + * @summary Tests the different assertions in the Assert class + * @library /testlibrary + */ +public class AssertsTest { + private static class Foo implements Comparable { + final int id; + public Foo(int id) { + this.id = id; + } + + public int compareTo(Foo f) { + return new Integer(id).compareTo(new Integer(f.id)); + } + } + + public static void main(String[] args) throws Exception { + testLessThan(); + testLessThanOrEqual(); + testEquals(); + testGreaterThanOrEqual(); + testGreaterThan(); + testNotEquals(); + testNull(); + testNotNull(); + testTrue(); + testFalse(); + } + + private static void testLessThan() throws Exception { + expectPass(Assertion.LT, 1, 2); + + expectFail(Assertion.LT, 2, 2); + expectFail(Assertion.LT, 2, 1); + expectFail(Assertion.LT, null, 2); + expectFail(Assertion.LT, 2, null); + } + + private static void testLessThanOrEqual() throws Exception { + expectPass(Assertion.LTE, 1, 2); + expectPass(Assertion.LTE, 2, 2); + + expectFail(Assertion.LTE, 3, 2); + expectFail(Assertion.LTE, null, 2); + expectFail(Assertion.LTE, 2, null); + } + + private static void testEquals() throws Exception { + expectPass(Assertion.EQ, 1, 1); + expectPass(Assertion.EQ, null, null); + + Foo f1 = new Foo(1); + expectPass(Assertion.EQ, f1, f1); + + Foo f2 = new Foo(1); + expectFail(Assertion.EQ, f1, f2); + expectFail(Assertion.LTE, null, 2); + expectFail(Assertion.LTE, 2, null); + } + + private static void testGreaterThanOrEqual() throws Exception { + expectPass(Assertion.GTE, 1, 1); + expectPass(Assertion.GTE, 2, 1); + + expectFail(Assertion.GTE, 1, 2); + expectFail(Assertion.GTE, null, 2); + expectFail(Assertion.GTE, 2, null); + } + + private static void testGreaterThan() throws Exception { + expectPass(Assertion.GT, 2, 1); + + expectFail(Assertion.GT, 1, 1); + expectFail(Assertion.GT, 1, 2); + expectFail(Assertion.GT, null, 2); + expectFail(Assertion.GT, 2, null); + } + + private static void testNotEquals() throws Exception { + expectPass(Assertion.NE, null, 1); + expectPass(Assertion.NE, 1, null); + + Foo f1 = new Foo(1); + Foo f2 = new Foo(1); + expectPass(Assertion.NE, f1, f2); + + expectFail(Assertion.NE, null, null); + expectFail(Assertion.NE, f1, f1); + expectFail(Assertion.NE, 1, 1); + } + + private static void testNull() throws Exception { + expectPass(Assertion.NULL, null); + + expectFail(Assertion.NULL, 1); + } + + private static void testNotNull() throws Exception { + expectPass(Assertion.NOTNULL, 1); + + expectFail(Assertion.NOTNULL, null); + } + + private static void testTrue() throws Exception { + expectPass(Assertion.TRUE, true); + + expectFail(Assertion.TRUE, false); + } + + private static void testFalse() throws Exception { + expectPass(Assertion.FALSE, false); + + expectFail(Assertion.FALSE, true); + } + + private static > void expectPass(Assertion assertion, T ... args) + throws Exception { + Assertion.run(assertion, args); + } + + private static > void expectFail(Assertion assertion, T ... args) + throws Exception { + try { + Assertion.run(assertion, args); + } catch (RuntimeException e) { + return; + } + throw new Exception("Expected " + Assertion.format(assertion, (Object[]) args) + + " to throw a RuntimeException"); + } + +} + +enum Assertion { + LT, LTE, EQ, GTE, GT, NE, NULL, NOTNULL, FALSE, TRUE; + + public static > void run(Assertion assertion, T ... args) { + String msg = "Expected " + format(assertion, args) + " to pass"; + switch (assertion) { + case LT: + assertLessThan(args[0], args[1], msg); + break; + case LTE: + assertLessThanOrEqual(args[0], args[1], msg); + break; + case EQ: + assertEquals(args[0], args[1], msg); + break; + case GTE: + assertGreaterThanOrEqual(args[0], args[1], msg); + break; + case GT: + assertGreaterThan(args[0], args[1], msg); + break; + case NE: + assertNotEquals(args[0], args[1], msg); + break; + case NULL: + assertNull(args == null ? args : args[0], msg); + break; + case NOTNULL: + assertNotNull(args == null ? args : args[0], msg); + break; + case FALSE: + assertFalse((Boolean) args[0], msg); + break; + case TRUE: + assertTrue((Boolean) args[0], msg); + break; + default: + // do nothing + } + } + + public static String format(Assertion assertion, Object ... args) { + switch (assertion) { + case LT: + return asString("assertLessThan", args); + case LTE: + return asString("assertLessThanOrEqual", args); + case EQ: + return asString("assertEquals", args); + case GTE: + return asString("assertGreaterThanOrEquals", args); + case GT: + return asString("assertGreaterThan", args); + case NE: + return asString("assertNotEquals", args); + case NULL: + return asString("assertNull", args); + case NOTNULL: + return asString("assertNotNull", args); + case FALSE: + return asString("assertFalse", args); + case TRUE: + return asString("assertTrue", args); + default: + return ""; + } + } + + private static String asString(String assertion, Object ... args) { + if (args == null) { + return String.format("%s(null)", assertion); + } + if (args.length == 1) { + return String.format("%s(%s)", assertion, args[0]); + } else { + return String.format("%s(%s, %s)", assertion, args[0], args[1]); + } + } +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary_tests/OutputAnalyzerReportingTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary_tests/OutputAnalyzerReportingTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + +/* + * @test + * @summary Test the OutputAnalyzer reporting functionality, + * such as printing additional diagnostic info + * (exit code, stdout, stderr, command line, etc.) + * @library /testlibrary + */ + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; + +import com.oracle.java.testlibrary.OutputAnalyzer; +import com.oracle.java.testlibrary.ProcessTools; + + +public class OutputAnalyzerReportingTest { + + public static void main(String[] args) throws Exception { + // Create the output analyzer under test + String stdout = "aaaaaa"; + String stderr = "bbbbbb"; + OutputAnalyzer output = new OutputAnalyzer(stdout, stderr); + + // Expected summary values should be the same for all cases, + // since the outputAnalyzer object is the same + String expectedExitValue = "-1"; + String expectedSummary = + " stdout: [" + stdout + "];\n" + + " stderr: [" + stderr + "]\n" + + " exitValue = " + expectedExitValue + "\n"; + + + DiagnosticSummaryTestRunner testRunner = + new DiagnosticSummaryTestRunner(); + + // should have exit value + testRunner.init(expectedSummary); + int unexpectedExitValue = 2; + try { + output.shouldHaveExitValue(unexpectedExitValue); + } catch (RuntimeException e) { } + testRunner.closeAndCheckResults(); + + // should not contain + testRunner.init(expectedSummary); + try { + output.shouldNotContain(stdout); + } catch (RuntimeException e) { } + testRunner.closeAndCheckResults(); + + // should contain + testRunner.init(expectedSummary); + try { + output.shouldContain("unexpected-stuff"); + } catch (RuntimeException e) { } + testRunner.closeAndCheckResults(); + + // should not match + testRunner.init(expectedSummary); + try { + output.shouldNotMatch("[a]"); + } catch (RuntimeException e) { } + testRunner.closeAndCheckResults(); + + // should match + testRunner.init(expectedSummary); + try { + output.shouldMatch("[qwerty]"); + } catch (RuntimeException e) { } + testRunner.closeAndCheckResults(); + + } + + private static class DiagnosticSummaryTestRunner { + private ByteArrayOutputStream byteStream = + new ByteArrayOutputStream(10000); + + private String expectedSummary = ""; + private PrintStream errStream; + + + public void init(String expectedSummary) { + this.expectedSummary = expectedSummary; + byteStream.reset(); + errStream = new PrintStream(byteStream); + System.setErr(errStream); + } + + public void closeAndCheckResults() { + // check results + errStream.close(); + String stdErrStr = byteStream.toString(); + if (!stdErrStr.contains(expectedSummary)) { + throw new RuntimeException("The output does not contain " + + "the diagnostic message, or the message is incorrect"); + } + } + } + +} diff -r ccb4f2af2319 -r cefad50507d8 test/testlibrary_tests/OutputAnalyzerTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary_tests/OutputAnalyzerTest.java Fri Oct 11 10:38:03 2013 +0200 @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test the OutputAnalyzer utility class + * @library /testlibrary + */ + +import com.oracle.java.testlibrary.OutputAnalyzer; + +public class OutputAnalyzerTest { + + public static void main(String args[]) throws Exception { + + String stdout = "aaaaaa"; + String stderr = "bbbbbb"; + + // Regexps used for testing pattern matching of the test input + String stdoutPattern = "[a]"; + String stderrPattern = "[b]"; + String nonExistingPattern = "[c]"; + + OutputAnalyzer output = new OutputAnalyzer(stdout, stderr); + + if (!stdout.equals(output.getStdout())) { + throw new Exception("getStdout() returned '" + output.getStdout() + "', expected '" + stdout + "'"); + } + + if (!stderr.equals(output.getStderr())) { + throw new Exception("getStderr() returned '" + output.getStderr() + "', expected '" + stderr + "'"); + } + + try { + output.shouldContain(stdout); + output.stdoutShouldContain(stdout); + output.shouldContain(stderr); + output.stderrShouldContain(stderr); + } catch (RuntimeException e) { + throw new Exception("shouldContain() failed", e); + } + + try { + output.shouldContain("cccc"); + throw new Exception("shouldContain() failed to throw exception"); + } catch (RuntimeException e) { + // expected + } + + try { + output.stdoutShouldContain(stderr); + throw new Exception("stdoutShouldContain() failed to throw exception"); + } catch (RuntimeException e) { + // expected + } + + try { + output.stderrShouldContain(stdout); + throw new Exception("stdoutShouldContain() failed to throw exception"); + } catch (RuntimeException e) { + // expected + } + + try { + output.shouldNotContain("cccc"); + output.stdoutShouldNotContain("cccc"); + output.stderrShouldNotContain("cccc"); + } catch (RuntimeException e) { + throw new Exception("shouldNotContain() failed", e); + } + + try { + output.shouldNotContain(stdout); + throw new Exception("shouldContain() failed to throw exception"); + } catch (RuntimeException e) { + // expected + } + + try { + output.stdoutShouldNotContain(stdout); + throw new Exception("shouldContain() failed to throw exception"); + } catch (RuntimeException e) { + // expected + } + + try { + output.stderrShouldNotContain(stderr); + throw new Exception("shouldContain() failed to throw exception"); + } catch (RuntimeException e) { + // expected + } + + // Should match + try { + output.shouldMatch(stdoutPattern); + output.stdoutShouldMatch(stdoutPattern); + output.shouldMatch(stderrPattern); + output.stderrShouldMatch(stderrPattern); + } catch (RuntimeException e) { + throw new Exception("shouldMatch() failed", e); + } + + try { + output.shouldMatch(nonExistingPattern); + throw new Exception("shouldMatch() failed to throw exception"); + } catch (RuntimeException e) { + // expected + } + + try { + output.stdoutShouldMatch(stderrPattern); + throw new Exception( + "stdoutShouldMatch() failed to throw exception"); + } catch (RuntimeException e) { + // expected + } + + try { + output.stderrShouldMatch(stdoutPattern); + throw new Exception( + "stderrShouldMatch() failed to throw exception"); + } catch (RuntimeException e) { + // expected + } + + // Should not match + try { + output.shouldNotMatch(nonExistingPattern); + output.stdoutShouldNotMatch(nonExistingPattern); + output.stderrShouldNotMatch(nonExistingPattern); + } catch (RuntimeException e) { + throw new Exception("shouldNotMatch() failed", e); + } + + try { + output.shouldNotMatch(stdoutPattern); + throw new Exception("shouldNotMatch() failed to throw exception"); + } catch (RuntimeException e) { + // expected + } + + try { + output.stdoutShouldNotMatch(stdoutPattern); + throw new Exception("shouldNotMatch() failed to throw exception"); + } catch (RuntimeException e) { + // expected + } + + try { + output.stderrShouldNotMatch(stderrPattern); + throw new Exception("shouldNotMatch() failed to throw exception"); + } catch (RuntimeException e) { + // expected + } + + { + String aaaa = "aaaa"; + String result = output.firstMatch(aaaa); + if (!aaaa.equals(result)) { + throw new Exception("firstMatch(String) faild to match. Expected: " + aaaa + " got: " + result); + } + } + + { + String aa = "aa"; + String aa_grouped_aa = aa + "(" + aa + ")"; + String result = output.firstMatch(aa_grouped_aa, 1); + if (!aa.equals(result)) { + throw new Exception("firstMatch(String, int) failed to match. Expected: " + aa + " got: " + result); + } + } + } +}