# HG changeset patch # User never # Date 1329509932 28800 # Node ID 15085a6eb50c03a0eeb68d910e0f16b2c3fb6649 # Parent 72c425c46102a0204285c1d84657aa2e80776f5c# Parent d3384450b64967964468290c7c2534de5c8880d3 Merge diff -r 72c425c46102 -r 15085a6eb50c .hgtags --- a/.hgtags Fri Feb 17 12:18:10 2012 -0800 +++ b/.hgtags Fri Feb 17 12:18:52 2012 -0800 @@ -218,3 +218,5 @@ a80fd4f45d7aaa154ed2f86a129f3c9c4035ec7a jdk8-b24 b22de824749922986ce4d442bed029916b832807 hs23-b13 64b46f975ab82948c1e021e17775ff4fab8bc40e hs23-b14 +9ad8feb5afbddec46d3cfe29fb5f73c2e99d5a43 jdk8-b25 +d71e662fe03741b6de498ca2077220148405a978 hs23-b15 diff -r 72c425c46102 -r 15085a6eb50c agent/src/os/linux/Makefile --- a/agent/src/os/linux/Makefile Fri Feb 17 12:18:10 2012 -0800 +++ b/agent/src/os/linux/Makefile Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,7 @@ LIBS = -lthread_db -CFLAGS = -c -fPIC -g -D_GNU_SOURCE -D$(ARCH) $(INCLUDES) +CFLAGS = -c -fPIC -g -D_GNU_SOURCE -D$(ARCH) $(INCLUDES) -D_FILE_OFFSET_BITS=64 LIBSA = $(ARCH)/libsaproc.so diff -r 72c425c46102 -r 15085a6eb50c agent/src/os/linux/libproc_impl.c --- a/agent/src/os/linux/libproc_impl.c Fri Feb 17 12:18:10 2012 -0800 +++ b/agent/src/os/linux/libproc_impl.c Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,10 +50,6 @@ char alt_path[PATH_MAX + 1]; init_alt_root(); - fd = open(name, O_RDONLY); - if (fd >= 0) { - return fd; - } if (alt_root_len > 0) { strcpy(alt_path, alt_root); @@ -73,6 +69,11 @@ return fd; } } + } else { + fd = open(name, O_RDONLY); + if (fd >= 0) { + return fd; + } } return -1; diff -r 72c425c46102 -r 15085a6eb50c make/hotspot_version --- a/make/hotspot_version Fri Feb 17 12:18:10 2012 -0800 +++ b/make/hotspot_version Fri Feb 17 12:18:52 2012 -0800 @@ -35,7 +35,7 @@ HS_MAJOR_VER=23 HS_MINOR_VER=0 -HS_BUILD_NUMBER=15 +HS_BUILD_NUMBER=16 JDK_MAJOR_VER=1 JDK_MINOR_VER=8 diff -r 72c425c46102 -r 15085a6eb50c make/jprt.properties --- a/make/jprt.properties Fri Feb 17 12:18:10 2012 -0800 +++ b/make/jprt.properties Fri Feb 17 12:18:52 2012 -0800 @@ -38,7 +38,9 @@ # This tells jprt what default release we want to build -jprt.tools.default.release=${jprt.submit.release} +jprt.hotspot.default.release=jdk7 + +jprt.tools.default.release=${jprt.submit.option.release?${jprt.submit.option.release}:${jprt.hotspot.default.release}} # Disable syncing the source after builds and tests are done. @@ -52,126 +54,46 @@ # Define the Solaris platforms we want for the various releases jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10 jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10 -jprt.my.solaris.sparc.jdk7b107=solaris_sparc_5.10 -jprt.my.solaris.sparc.jdk7temp=solaris_sparc_5.10 -jprt.my.solaris.sparc.jdk6=solaris_sparc_5.8 -jprt.my.solaris.sparc.jdk6perf=solaris_sparc_5.8 -jprt.my.solaris.sparc.jdk6u10=solaris_sparc_5.8 -jprt.my.solaris.sparc.jdk6u14=solaris_sparc_5.8 -jprt.my.solaris.sparc.jdk6u18=solaris_sparc_5.8 -jprt.my.solaris.sparc.jdk6u20=solaris_sparc_5.8 -jprt.my.solaris.sparc.ejdk7=${jprt.my.solaris.sparc.jdk7} -jprt.my.solaris.sparc.ejdk6=${jprt.my.solaris.sparc.jdk6} jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}} jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10 jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10 -jprt.my.solaris.sparcv9.jdk7b107=solaris_sparcv9_5.10 -jprt.my.solaris.sparcv9.jdk7temp=solaris_sparcv9_5.10 -jprt.my.solaris.sparcv9.jdk6=solaris_sparcv9_5.8 -jprt.my.solaris.sparcv9.jdk6perf=solaris_sparcv9_5.8 -jprt.my.solaris.sparcv9.jdk6u10=solaris_sparcv9_5.8 -jprt.my.solaris.sparcv9.jdk6u14=solaris_sparcv9_5.8 -jprt.my.solaris.sparcv9.jdk6u18=solaris_sparcv9_5.8 -jprt.my.solaris.sparcv9.jdk6u20=solaris_sparcv9_5.8 -jprt.my.solaris.sparcv9.ejdk7=${jprt.my.solaris.sparcv9.jdk7} -jprt.my.solaris.sparcv9.ejdk6=${jprt.my.solaris.sparcv9.jdk6} jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}} jprt.my.solaris.i586.jdk8=solaris_i586_5.10 jprt.my.solaris.i586.jdk7=solaris_i586_5.10 -jprt.my.solaris.i586.jdk7b107=solaris_i586_5.10 -jprt.my.solaris.i586.jdk7temp=solaris_i586_5.10 -jprt.my.solaris.i586.jdk6=solaris_i586_5.8 -jprt.my.solaris.i586.jdk6perf=solaris_i586_5.8 -jprt.my.solaris.i586.jdk6u10=solaris_i586_5.8 -jprt.my.solaris.i586.jdk6u14=solaris_i586_5.8 -jprt.my.solaris.i586.jdk6u18=solaris_i586_5.8 -jprt.my.solaris.i586.jdk6u20=solaris_i586_5.8 -jprt.my.solaris.i586.ejdk7=${jprt.my.solaris.i586.jdk7} -jprt.my.solaris.i586.ejdk6=${jprt.my.solaris.i586.jdk6} jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}} jprt.my.solaris.x64.jdk8=solaris_x64_5.10 jprt.my.solaris.x64.jdk7=solaris_x64_5.10 -jprt.my.solaris.x64.jdk7b107=solaris_x64_5.10 -jprt.my.solaris.x64.jdk7temp=solaris_x64_5.10 -jprt.my.solaris.x64.jdk6=solaris_x64_5.10 -jprt.my.solaris.x64.jdk6perf=solaris_x64_5.10 -jprt.my.solaris.x64.jdk6u10=solaris_x64_5.10 -jprt.my.solaris.x64.jdk6u14=solaris_x64_5.10 -jprt.my.solaris.x64.jdk6u18=solaris_x64_5.10 -jprt.my.solaris.x64.jdk6u20=solaris_x64_5.10 -jprt.my.solaris.x64.ejdk7=${jprt.my.solaris.x64.jdk7} -jprt.my.solaris.x64.ejdk6=${jprt.my.solaris.x64.jdk6} jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}} jprt.my.linux.i586.jdk8=linux_i586_2.6 jprt.my.linux.i586.jdk7=linux_i586_2.6 -jprt.my.linux.i586.jdk7b107=linux_i586_2.6 -jprt.my.linux.i586.jdk7temp=linux_i586_2.6 -jprt.my.linux.i586.jdk6=linux_i586_2.4 -jprt.my.linux.i586.jdk6perf=linux_i586_2.4 -jprt.my.linux.i586.jdk6u10=linux_i586_2.4 -jprt.my.linux.i586.jdk6u14=linux_i586_2.4 -jprt.my.linux.i586.jdk6u18=linux_i586_2.4 -jprt.my.linux.i586.jdk6u20=linux_i586_2.4 -jprt.my.linux.i586.ejdk7=linux_i586_2.6 -jprt.my.linux.i586.ejdk6=linux_i586_2.6 jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}} jprt.my.linux.x64.jdk8=linux_x64_2.6 jprt.my.linux.x64.jdk7=linux_x64_2.6 -jprt.my.linux.x64.jdk7b107=linux_x64_2.6 -jprt.my.linux.x64.jdk7temp=linux_x64_2.6 -jprt.my.linux.x64.jdk6=linux_x64_2.4 -jprt.my.linux.x64.jdk6perf=linux_x64_2.4 -jprt.my.linux.x64.jdk6u10=linux_x64_2.4 -jprt.my.linux.x64.jdk6u14=linux_x64_2.4 -jprt.my.linux.x64.jdk6u18=linux_x64_2.4 -jprt.my.linux.x64.jdk6u20=linux_x64_2.4 -jprt.my.linux.x64.ejdk7=${jprt.my.linux.x64.jdk7} -jprt.my.linux.x64.ejdk6=${jprt.my.linux.x64.jdk6} jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}} jprt.my.linux.ppc.jdk8=linux_ppc_2.6 jprt.my.linux.ppc.jdk7=linux_ppc_2.6 -jprt.my.linux.ppc.jdk7b107=linux_ppc_2.6 -jprt.my.linux.ppc.jdk7temp=linux_ppc_2.6 -jprt.my.linux.ppc.ejdk6=linux_ppc_2.6 -jprt.my.linux.ppc.ejdk7=linux_ppc_2.6 jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}} jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6 jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6 -jprt.my.linux.ppcv2.jdk7b107=linux_ppcv2_2.6 -jprt.my.linux.ppcv2.jdk7temp=linux_ppcv2_2.6 -jprt.my.linux.ppcv2.ejdk6=linux_ppcv2_2.6 -jprt.my.linux.ppcv2.ejdk7=linux_ppcv2_2.6 jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}} jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6 jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6 -jprt.my.linux.ppcsflt.jdk7b107=linux_ppcsflt_2.6 -jprt.my.linux.ppcsflt.jdk7temp=linux_ppcsflt_2.6 -jprt.my.linux.ppcsflt.ejdk6=linux_ppcsflt_2.6 -jprt.my.linux.ppcsflt.ejdk7=linux_ppcsflt_2.6 jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}} jprt.my.linux.armvfp.jdk8=linux_armvfp_2.6 jprt.my.linux.armvfp.jdk7=linux_armvfp_2.6 -jprt.my.linux.armvfp.jdk7b107=linux_armvfp_2.6 -jprt.my.linux.armvfp.jdk7temp=linux_armvfp_2.6 -jprt.my.linux.armvfp.ejdk6=linux_armvfp_2.6 -jprt.my.linux.armvfp.ejdk7=linux_armvfp_2.6 jprt.my.linux.armvfp=${jprt.my.linux.armvfp.${jprt.tools.default.release}} jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6 jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6 -jprt.my.linux.armsflt.jdk7b107=linux_armsflt_2.6 -jprt.my.linux.armsflt.jdk7temp=linux_armsflt_2.6 -jprt.my.linux.armsflt.ejdk6=linux_armsflt_2.6 -jprt.my.linux.armsflt.ejdk7=linux_armsflt_2.6 jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}} jprt.my.macosx.x64.jdk8=macosx_x64_10.7 @@ -180,30 +102,10 @@ jprt.my.windows.i586.jdk8=windows_i586_5.1 jprt.my.windows.i586.jdk7=windows_i586_5.1 -jprt.my.windows.i586.jdk7b107=windows_i586_5.0 -jprt.my.windows.i586.jdk7temp=windows_i586_5.0 -jprt.my.windows.i586.jdk6=windows_i586_5.0 -jprt.my.windows.i586.jdk6perf=windows_i586_5.0 -jprt.my.windows.i586.jdk6u10=windows_i586_5.0 -jprt.my.windows.i586.jdk6u14=windows_i586_5.0 -jprt.my.windows.i586.jdk6u18=windows_i586_5.0 -jprt.my.windows.i586.jdk6u20=windows_i586_5.0 -jprt.my.windows.i586.ejdk7=${jprt.my.windows.i586.jdk7} -jprt.my.windows.i586.ejdk6=${jprt.my.windows.i586.jdk6} jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}} jprt.my.windows.x64.jdk8=windows_x64_5.2 jprt.my.windows.x64.jdk7=windows_x64_5.2 -jprt.my.windows.x64.jdk7b107=windows_x64_5.2 -jprt.my.windows.x64.jdk7temp=windows_x64_5.2 -jprt.my.windows.x64.jdk6=windows_x64_5.2 -jprt.my.windows.x64.jdk6perf=windows_x64_5.2 -jprt.my.windows.x64.jdk6u10=windows_x64_5.2 -jprt.my.windows.x64.jdk6u14=windows_x64_5.2 -jprt.my.windows.x64.jdk6u18=windows_x64_5.2 -jprt.my.windows.x64.jdk6u20=windows_x64_5.2 -jprt.my.windows.x64.ejdk7=${jprt.my.windows.x64.jdk7} -jprt.my.windows.x64.ejdk6=${jprt.my.windows.x64.jdk6} jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}} # Standard list of jprt build targets for this source tree @@ -539,16 +441,6 @@ jprt.test.targets.jdk8=${jprt.test.targets.standard} jprt.test.targets.jdk7=${jprt.test.targets.standard} -jprt.test.targets.jdk7temp=${jprt.test.targets.standard} -jprt.test.targets.jdk7b105=${jprt.test.targets.standard} -jprt.test.targets.jdk6=${jprt.test.targets.standard} -jprt.test.targets.jdk6perf=${jprt.test.targets.standard} -jprt.test.targets.jdk6u10=${jprt.test.targets.standard} -jprt.test.targets.jdk6u14=${jprt.test.targets.standard} -jprt.test.targets.jdk6u18=${jprt.test.targets.standard} -jprt.test.targets.jdk6u20=${jprt.test.targets.standard} -jprt.test.targets.ejdk6=${jprt.test.targets.embedded} -jprt.test.targets.ejdk7=${jprt.test.targets.embedded} jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}} # The default test/Makefile targets that should be run @@ -593,15 +485,5 @@ jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard} jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard} -jprt.make.rule.test.targets.jdk7temp=${jprt.make.rule.test.targets.standard} -jprt.make.rule.test.targets.jdk7b107=${jprt.make.rule.test.targets.standard} -jprt.make.rule.test.targets.jdk6=${jprt.make.rule.test.targets.standard} -jprt.make.rule.test.targets.jdk6perf=${jprt.make.rule.test.targets.standard} -jprt.make.rule.test.targets.jdk6u10=${jprt.make.rule.test.targets.standard} -jprt.make.rule.test.targets.jdk6u14=${jprt.make.rule.test.targets.standard} -jprt.make.rule.test.targets.jdk6u18=${jprt.make.rule.test.targets.standard} -jprt.make.rule.test.targets.jdk6u20=${jprt.make.rule.test.targets.standard} -jprt.make.rule.test.targets.ejdk6=${jprt.make.rule.test.targets.embedded} -jprt.make.rule.test.targets.ejdk7=${jprt.make.rule.test.targets.embedded} jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}} diff -r 72c425c46102 -r 15085a6eb50c make/linux/makefiles/saproc.make --- a/make/linux/makefiles/saproc.make Fri Feb 17 12:18:10 2012 -0800 +++ b/make/linux/makefiles/saproc.make Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -75,6 +75,7 @@ fi @echo Making SA debugger back-end... $(QUIETLY) $(CC) -D$(BUILDARCH) -D_GNU_SOURCE \ + -D_FILE_OFFSET_BITS=64 \ $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \ -I$(SASRCDIR) \ -I$(GENERATED) \ diff -r 72c425c46102 -r 15085a6eb50c src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp --- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -472,7 +472,7 @@ __ load_klass(src_reg, tmp_reg); Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset()); - __ ld(ref_type_adr, tmp_reg); + __ ldub(ref_type_adr, tmp_reg); // _reference_type field is of type ReferenceType (enum) assert(REF_NONE == 0, "check this code"); diff -r 72c425c46102 -r 15085a6eb50c src/cpu/sparc/vm/globals_sparc.hpp --- a/src/cpu/sparc/vm/globals_sparc.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/cpu/sparc/vm/globals_sparc.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -56,14 +56,15 @@ // Stack slots are 2X larger in LP64 than in the 32 bit VM. define_pd_global(intx, ThreadStackSize, 1024); define_pd_global(intx, VMThreadStackSize, 1024); +define_pd_global(intx, StackShadowPages, 10 DEBUG_ONLY(+1)); #else define_pd_global(intx, ThreadStackSize, 512); define_pd_global(intx, VMThreadStackSize, 512); +define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1)); #endif define_pd_global(intx, StackYellowPages, 2); define_pd_global(intx, StackRedPages, 1); -define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1)); define_pd_global(intx, PreInflateSpin, 40); // Determined by running design center diff -r 72c425c46102 -r 15085a6eb50c src/cpu/x86/vm/c1_CodeStubs_x86.cpp --- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -520,7 +520,7 @@ __ load_klass(tmp_reg, src_reg); Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset()); - __ cmpl(ref_type_adr, REF_NONE); + __ cmpb(ref_type_adr, REF_NONE); __ jcc(Assembler::equal, _continuation); // Is marking active? diff -r 72c425c46102 -r 15085a6eb50c src/cpu/x86/vm/globals_x86.hpp --- a/src/cpu/x86/vm/globals_x86.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/cpu/x86/vm/globals_x86.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -60,7 +60,7 @@ #ifdef AMD64 // Very large C++ stack frames using solaris-amd64 optimized builds // due to lack of optimization caused by C++ compiler bugs -define_pd_global(intx, StackShadowPages, SOLARIS_ONLY(20) NOT_SOLARIS(6) DEBUG_ONLY(+2)); +define_pd_global(intx, StackShadowPages, NOT_WIN64(20) WIN64_ONLY(6) DEBUG_ONLY(+2)); #else define_pd_global(intx, StackShadowPages, 4 DEBUG_ONLY(+5)); #endif // AMD64 diff -r 72c425c46102 -r 15085a6eb50c src/cpu/x86/vm/vm_version_x86.hpp --- a/src/cpu/x86/vm/vm_version_x86.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/cpu/x86/vm/vm_version_x86.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -249,13 +249,18 @@ enum { // AMD - CPU_FAMILY_AMD_11H = 17, + CPU_FAMILY_AMD_11H = 0x11, // Intel CPU_FAMILY_INTEL_CORE = 6, - CPU_MODEL_NEHALEM_EP = 26, - CPU_MODEL_WESTMERE_EP = 44, -// CPU_MODEL_IVYBRIDGE_EP = ??, TODO - get real value - CPU_MODEL_SANDYBRIDGE_EP = 45 + CPU_MODEL_NEHALEM = 0x1e, + CPU_MODEL_NEHALEM_EP = 0x1a, + CPU_MODEL_NEHALEM_EX = 0x2e, + CPU_MODEL_WESTMERE = 0x25, + CPU_MODEL_WESTMERE_EP = 0x2c, + CPU_MODEL_WESTMERE_EX = 0x2f, + CPU_MODEL_SANDYBRIDGE = 0x2a, + CPU_MODEL_SANDYBRIDGE_EP = 0x2d, + CPU_MODEL_IVYBRIDGE_EP = 0x3a } cpuExtendedFamily; // cpuid information block. All info derived from executing cpuid with @@ -325,7 +330,7 @@ uint32_t proc_name_4, proc_name_5, proc_name_6, proc_name_7; uint32_t proc_name_8, proc_name_9, proc_name_10,proc_name_11; - // cpuid function 0x80000005 //AMD L1, Intel reserved + // cpuid function 0x80000005 // AMD L1, Intel reserved uint32_t ext_cpuid5_eax; // unused currently uint32_t ext_cpuid5_ebx; // reserved ExtCpuid5Ex ext_cpuid5_ecx; // L1 data cache info (AMD) @@ -547,15 +552,15 @@ static bool is_intel_tsc_synched_at_init() { if (is_intel_family_core()) { uint32_t ext_model = extended_cpu_model(); - if (ext_model == CPU_MODEL_NEHALEM_EP || - ext_model == CPU_MODEL_WESTMERE_EP || -// TODO ext_model == CPU_MODEL_IVYBRIDGE_EP || - ext_model == CPU_MODEL_SANDYBRIDGE_EP) { - // 2-socket invtsc support. EX versions with 4 sockets are not - // guaranteed to synchronize tscs at initialization via a double - // handshake. The tscs can be explicitly set in software. Code - // that uses tsc values must be prepared for them to arbitrarily - // jump backward or forward. + if (ext_model == CPU_MODEL_NEHALEM_EP || + ext_model == CPU_MODEL_WESTMERE_EP || + ext_model == CPU_MODEL_SANDYBRIDGE_EP || + ext_model == CPU_MODEL_IVYBRIDGE_EP) { + // <= 2-socket invariant tsc support. EX versions are usually used + // in > 2-socket systems and likely don't synchronize tscs at + // initialization. + // Code that uses tsc values must be prepared for them to arbitrarily + // jump forward or backward. return true; } } diff -r 72c425c46102 -r 15085a6eb50c src/os/bsd/vm/decoder_machO.hpp --- a/src/os/bsd/vm/decoder_machO.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/os/bsd/vm/decoder_machO.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,8 +29,9 @@ #include "utilities/decoder.hpp" -// Just a placehold for now -class MachODecoder: public NullDecoder { +// Just a placehold for now, a real implementation should derive +// from AbstractDecoder +class MachODecoder : public NullDecoder { public: MachODecoder() { } ~MachODecoder() { } diff -r 72c425c46102 -r 15085a6eb50c src/os/windows/vm/decoder_windows.hpp --- a/src/os/windows/vm/decoder_windows.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/os/windows/vm/decoder_windows.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -36,7 +36,7 @@ typedef BOOL (WINAPI *pfn_SymGetSymFromAddr64)(HANDLE, DWORD64, PDWORD64, PIMAGEHLP_SYMBOL64); typedef DWORD (WINAPI *pfn_UndecorateSymbolName)(const char*, char*, DWORD, DWORD); -class WindowsDecoder: public NullDecoder { +class WindowsDecoder : public AbstractDecoder { public: WindowsDecoder(); diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/classfile/vmSymbols.hpp --- a/src/share/vm/classfile/vmSymbols.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/classfile/vmSymbols.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -284,6 +284,7 @@ template(run_method_name, "run") \ template(exit_method_name, "exit") \ template(add_method_name, "add") \ + template(remove_method_name, "remove") \ template(parent_name, "parent") \ template(threads_name, "threads") \ template(groups_name, "groups") \ diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -6092,7 +6092,11 @@ _inter_sweep_timer.reset(); _inter_sweep_timer.start(); - update_time_of_last_gc(os::javaTimeMillis()); + // We need to use a monotonically non-deccreasing time in ms + // or we will see time-warp warnings and os::javaTimeMillis() + // does not guarantee monotonicity. + jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; + update_time_of_last_gc(now); // NOTE on abstract state transitions: // Mutators allocate-live and/or mark the mod-union table dirty diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/g1/collectionSetChooser.cpp --- a/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,6 +48,8 @@ #ifndef PRODUCT bool CSetChooserCache::verify() { + guarantee(false, "CSetChooserCache::verify(): don't call this any more"); + int index = _first; HeapRegion *prev = NULL; for (int i = 0; i < _occupancy; ++i) { @@ -75,6 +77,8 @@ #endif // PRODUCT void CSetChooserCache::insert(HeapRegion *hr) { + guarantee(false, "CSetChooserCache::insert(): don't call this any more"); + assert(!is_full(), "cache should not be empty"); hr->calc_gc_efficiency(); @@ -104,6 +108,9 @@ } HeapRegion *CSetChooserCache::remove_first() { + guarantee(false, "CSetChooserCache::remove_first(): " + "don't call this any more"); + if (_occupancy > 0) { assert(_cache[_first] != NULL, "cache should have at least one region"); HeapRegion *ret = _cache[_first]; @@ -118,16 +125,35 @@ } } -static inline int orderRegions(HeapRegion* hr1, HeapRegion* hr2) { +// Even though we don't use the GC efficiency in our heuristics as +// much as we used to, we still order according to GC efficiency. This +// will cause regions with a lot of live objects and large RSets to +// end up at the end of the array. Given that we might skip collecting +// the last few old regions, if after a few mixed GCs the remaining +// have reclaimable bytes under a certain threshold, the hope is that +// the ones we'll skip are ones with both large RSets and a lot of +// live objects, not the ones with just a lot of live objects if we +// ordered according to the amount of reclaimable bytes per region. +static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) { if (hr1 == NULL) { - if (hr2 == NULL) return 0; - else return 1; + if (hr2 == NULL) { + return 0; + } else { + return 1; + } } else if (hr2 == NULL) { return -1; } - if (hr2->gc_efficiency() < hr1->gc_efficiency()) return -1; - else if (hr1->gc_efficiency() < hr2->gc_efficiency()) return 1; - else return 0; + + double gc_eff1 = hr1->gc_efficiency(); + double gc_eff2 = hr2->gc_efficiency(); + if (gc_eff1 > gc_eff2) { + return -1; + } if (gc_eff1 < gc_eff2) { + return 1; + } else { + return 0; + } } static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) { @@ -151,51 +177,61 @@ // _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions, ResourceObj::C_HEAP), - 100), - true), - _curMarkedIndex(0), - _numMarkedRegions(0), - _unmarked_age_1_returned_as_new(false), - _first_par_unreserved_idx(0) -{} - - + 100), true /* C_Heap */), + _curr_index(0), _length(0), + _regionLiveThresholdBytes(0), _remainingReclaimableBytes(0), + _first_par_unreserved_idx(0) { + _regionLiveThresholdBytes = + HeapRegion::GrainBytes * (size_t) G1OldCSetRegionLiveThresholdPercent / 100; +} #ifndef PRODUCT bool CollectionSetChooser::verify() { + guarantee(_length >= 0, err_msg("_length: %d", _length)); + guarantee(0 <= _curr_index && _curr_index <= _length, + err_msg("_curr_index: %d _length: %d", _curr_index, _length)); int index = 0; - guarantee(_curMarkedIndex <= _numMarkedRegions, - "_curMarkedIndex should be within bounds"); - while (index < _curMarkedIndex) { - guarantee(_markedRegions.at(index++) == NULL, - "all entries before _curMarkedIndex should be NULL"); + size_t sum_of_reclaimable_bytes = 0; + while (index < _curr_index) { + guarantee(_markedRegions.at(index) == NULL, + "all entries before _curr_index should be NULL"); + index += 1; } HeapRegion *prev = NULL; - while (index < _numMarkedRegions) { + while (index < _length) { HeapRegion *curr = _markedRegions.at(index++); guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL"); int si = curr->sort_index(); guarantee(!curr->is_young(), "should not be young!"); + guarantee(!curr->isHumongous(), "should not be humongous!"); guarantee(si > -1 && si == (index-1), "sort index invariant"); if (prev != NULL) { - guarantee(orderRegions(prev, curr) != 1, "regions should be sorted"); + guarantee(orderRegions(prev, curr) != 1, + err_msg("GC eff prev: %1.4f GC eff curr: %1.4f", + prev->gc_efficiency(), curr->gc_efficiency())); } + sum_of_reclaimable_bytes += curr->reclaimable_bytes(); prev = curr; } - return _cache.verify(); + guarantee(sum_of_reclaimable_bytes == _remainingReclaimableBytes, + err_msg("reclaimable bytes inconsistent, " + "remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT, + _remainingReclaimableBytes, sum_of_reclaimable_bytes)); + return true; } #endif -void -CollectionSetChooser::fillCache() { - while (!_cache.is_full() && (_curMarkedIndex < _numMarkedRegions)) { - HeapRegion* hr = _markedRegions.at(_curMarkedIndex); +void CollectionSetChooser::fillCache() { + guarantee(false, "fillCache: don't call this any more"); + + while (!_cache.is_full() && (_curr_index < _length)) { + HeapRegion* hr = _markedRegions.at(_curr_index); assert(hr != NULL, err_msg("Unexpected NULL hr in _markedRegions at index %d", - _curMarkedIndex)); - _curMarkedIndex += 1; + _curr_index)); + _curr_index += 1; assert(!hr->is_young(), "should not be young!"); - assert(hr->sort_index() == _curMarkedIndex-1, "sort_index invariant"); + assert(hr->sort_index() == _curr_index-1, "sort_index invariant"); _markedRegions.at_put(hr->sort_index(), NULL); _cache.insert(hr); assert(!_cache.is_empty(), "cache should not be empty"); @@ -203,9 +239,7 @@ assert(verify(), "cache should be consistent"); } -void -CollectionSetChooser::sortMarkedHeapRegions() { - guarantee(_cache.is_empty(), "cache should be empty"); +void CollectionSetChooser::sortMarkedHeapRegions() { // First trim any unused portion of the top in the parallel case. if (_first_par_unreserved_idx > 0) { if (G1PrintParCleanupStats) { @@ -217,43 +251,78 @@ _markedRegions.trunc_to(_first_par_unreserved_idx); } _markedRegions.sort(orderRegions); - assert(_numMarkedRegions <= _markedRegions.length(), "Requirement"); - assert(_numMarkedRegions == 0 - || _markedRegions.at(_numMarkedRegions-1) != NULL, - "Testing _numMarkedRegions"); - assert(_numMarkedRegions == _markedRegions.length() - || _markedRegions.at(_numMarkedRegions) == NULL, - "Testing _numMarkedRegions"); + assert(_length <= _markedRegions.length(), "Requirement"); + assert(_length == 0 || _markedRegions.at(_length - 1) != NULL, + "Testing _length"); + assert(_length == _markedRegions.length() || + _markedRegions.at(_length) == NULL, "Testing _length"); if (G1PrintParCleanupStats) { - gclog_or_tty->print_cr(" Sorted %d marked regions.", _numMarkedRegions); + gclog_or_tty->print_cr(" Sorted %d marked regions.", _length); } - for (int i = 0; i < _numMarkedRegions; i++) { + for (int i = 0; i < _length; i++) { assert(_markedRegions.at(i) != NULL, "Should be true by sorting!"); _markedRegions.at(i)->set_sort_index(i); } if (G1PrintRegionLivenessInfo) { G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting"); - for (int i = 0; i < _numMarkedRegions; ++i) { + for (int i = 0; i < _length; ++i) { HeapRegion* r = _markedRegions.at(i); cl.doHeapRegion(r); } } - assert(verify(), "should now be sorted"); + assert(verify(), "CSet chooser verification"); } -void -CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) { +size_t CollectionSetChooser::calcMinOldCSetLength() { + // The min old CSet region bound is based on the maximum desired + // number of mixed GCs after a cycle. I.e., even if some old regions + // look expensive, we should add them to the CSet anyway to make + // sure we go through the available old regions in no more than the + // maximum desired number of mixed GCs. + // + // The calculation is based on the number of marked regions we added + // to the CSet chooser in the first place, not how many remain, so + // that the result is the same during all mixed GCs that follow a cycle. + + const size_t region_num = (size_t) _length; + const size_t gc_num = (size_t) G1MaxMixedGCNum; + size_t result = region_num / gc_num; + // emulate ceiling + if (result * gc_num < region_num) { + result += 1; + } + return result; +} + +size_t CollectionSetChooser::calcMaxOldCSetLength() { + // The max old CSet region bound is based on the threshold expressed + // as a percentage of the heap size. I.e., it should bound the + // number of old regions added to the CSet irrespective of how many + // of them are available. + + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + const size_t region_num = g1h->n_regions(); + const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; + size_t result = region_num * perc / 100; + // emulate ceiling + if (100 * result < region_num * perc) { + result += 1; + } + return result; +} + +void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) { assert(!hr->isHumongous(), "Humongous regions shouldn't be added to the collection set"); assert(!hr->is_young(), "should not be young!"); _markedRegions.append(hr); - _numMarkedRegions++; + _length++; + _remainingReclaimableBytes += hr->reclaimable_bytes(); hr->calc_gc_efficiency(); } -void -CollectionSetChooser:: -prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize) { +void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions, + size_t chunkSize) { _first_par_unreserved_idx = 0; int n_threads = ParallelGCThreads; if (UseDynamicNumberOfGCThreads) { @@ -274,8 +343,7 @@ _markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL); } -jint -CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) { +jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) { // Don't do this assert because this can be called at a point // where the loop up stream will not execute again but might // try to claim more chunks (loop test has not been done yet). @@ -287,83 +355,37 @@ return res - n_regions; } -void -CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) { +void CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) { assert(_markedRegions.at(index) == NULL, "precondition"); assert(!hr->is_young(), "should not be young!"); _markedRegions.at_put(index, hr); hr->calc_gc_efficiency(); } -void -CollectionSetChooser::incNumMarkedHeapRegions(jint inc_by) { - (void)Atomic::add(inc_by, &_numMarkedRegions); -} - -void -CollectionSetChooser::clearMarkedHeapRegions(){ - for (int i = 0; i < _markedRegions.length(); i++) { - HeapRegion* r = _markedRegions.at(i); - if (r != NULL) r->set_sort_index(-1); +void CollectionSetChooser::updateTotals(jint region_num, + size_t reclaimable_bytes) { + // Only take the lock if we actually need to update the totals. + if (region_num > 0) { + assert(reclaimable_bytes > 0, "invariant"); + // We could have just used atomics instead of taking the + // lock. However, we currently don't have an atomic add for size_t. + MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); + _length += (int) region_num; + _remainingReclaimableBytes += reclaimable_bytes; + } else { + assert(reclaimable_bytes == 0, "invariant"); } - _markedRegions.clear(); - _curMarkedIndex = 0; - _numMarkedRegions = 0; - _cache.clear(); -}; - -void -CollectionSetChooser::updateAfterFullCollection() { - clearMarkedHeapRegions(); } -// if time_remaining < 0.0, then this method should try to return -// a region, whether it fits within the remaining time or not -HeapRegion* -CollectionSetChooser::getNextMarkedRegion(double time_remaining, - double avg_prediction) { - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - G1CollectorPolicy* g1p = g1h->g1_policy(); - fillCache(); - if (_cache.is_empty()) { - assert(_curMarkedIndex == _numMarkedRegions, - "if cache is empty, list should also be empty"); - ergo_verbose0(ErgoCSetConstruction, - "stop adding old regions to CSet", - ergo_format_reason("cache is empty")); - return NULL; - } - - HeapRegion *hr = _cache.get_first(); - assert(hr != NULL, "if cache not empty, first entry should be non-null"); - double predicted_time = g1h->predict_region_elapsed_time_ms(hr, false); - - if (g1p->adaptive_young_list_length()) { - if (time_remaining - predicted_time < 0.0) { - g1h->check_if_region_is_too_expensive(predicted_time); - ergo_verbose2(ErgoCSetConstruction, - "stop adding old regions to CSet", - ergo_format_reason("predicted old region time higher than remaining time") - ergo_format_ms("predicted old region time") - ergo_format_ms("remaining time"), - predicted_time, time_remaining); - return NULL; - } - } else { - double threshold = 2.0 * avg_prediction; - if (predicted_time > threshold) { - ergo_verbose2(ErgoCSetConstruction, - "stop adding old regions to CSet", - ergo_format_reason("predicted old region time higher than threshold") - ergo_format_ms("predicted old region time") - ergo_format_ms("threshold"), - predicted_time, threshold); - return NULL; +void CollectionSetChooser::clearMarkedHeapRegions() { + for (int i = 0; i < _markedRegions.length(); i++) { + HeapRegion* r = _markedRegions.at(i); + if (r != NULL) { + r->set_sort_index(-1); } } - - HeapRegion *hr2 = _cache.remove_first(); - assert(hr == hr2, "cache contents should not have changed"); - - return hr; -} + _markedRegions.clear(); + _curr_index = 0; + _length = 0; + _remainingReclaimableBytes = 0; +}; diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/g1/collectionSetChooser.hpp --- a/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,28 +28,6 @@ #include "gc_implementation/g1/heapRegion.hpp" #include "utilities/growableArray.hpp" -// We need to sort heap regions by collection desirability. -// This sorting is currently done in two "stages". An initial sort is -// done following a cleanup pause as soon as all of the marked but -// non-empty regions have been identified and the completely empty -// ones reclaimed. -// This gives us a global sort on a GC efficiency metric -// based on predictive data available at that time. However, -// any of these regions that are collected will only be collected -// during a future GC pause, by which time it is possible that newer -// data might allow us to revise and/or refine the earlier -// pause predictions, leading to changes in expected gc efficiency -// order. To somewhat mitigate this obsolescence, more so in the -// case of regions towards the end of the list, which will be -// picked later, these pre-sorted regions from the _markedRegions -// array are not used as is, but a small prefix thereof is -// insertion-sorted again into a small cache, based on more -// recent remembered set information. Regions are then drawn -// from this cache to construct the collection set at each -// incremental GC. -// This scheme and/or its implementation may be subject to -// revision in the future. - class CSetChooserCache VALUE_OBJ_CLASS_SPEC { private: enum { @@ -103,24 +81,82 @@ class CollectionSetChooser: public CHeapObj { GrowableArray _markedRegions; - int _curMarkedIndex; - int _numMarkedRegions; - CSetChooserCache _cache; + + // The index of the next candidate old region to be considered for + // addition to the CSet. + int _curr_index; + + // The number of candidate old regions added to the CSet chooser. + int _length; - // True iff last collection pause ran of out new "age 0" regions, and - // returned an "age 1" region. - bool _unmarked_age_1_returned_as_new; + CSetChooserCache _cache; + jint _first_par_unreserved_idx; - jint _first_par_unreserved_idx; + // If a region has more live bytes than this threshold, it will not + // be added to the CSet chooser and will not be a candidate for + // collection. + size_t _regionLiveThresholdBytes; + + // The sum of reclaimable bytes over all the regions in the CSet chooser. + size_t _remainingReclaimableBytes; public: - HeapRegion* getNextMarkedRegion(double time_so_far, double avg_prediction); + // Return the current candidate region to be considered for + // collection without removing it from the CSet chooser. + HeapRegion* peek() { + HeapRegion* res = NULL; + if (_curr_index < _length) { + res = _markedRegions.at(_curr_index); + assert(res != NULL, + err_msg("Unexpected NULL hr in _markedRegions at index %d", + _curr_index)); + } + return res; + } + + // Remove the given region from the CSet chooser and move to the + // next one. The given region should be the current candidate region + // in the CSet chooser. + void remove_and_move_to_next(HeapRegion* hr) { + assert(hr != NULL, "pre-condition"); + assert(_curr_index < _length, "pre-condition"); + assert(_markedRegions.at(_curr_index) == hr, "pre-condition"); + hr->set_sort_index(-1); + _markedRegions.at_put(_curr_index, NULL); + assert(hr->reclaimable_bytes() <= _remainingReclaimableBytes, + err_msg("remaining reclaimable bytes inconsistent " + "from region: "SIZE_FORMAT" remaining: "SIZE_FORMAT, + hr->reclaimable_bytes(), _remainingReclaimableBytes)); + _remainingReclaimableBytes -= hr->reclaimable_bytes(); + _curr_index += 1; + } CollectionSetChooser(); void sortMarkedHeapRegions(); void fillCache(); + + // Determine whether to add the given region to the CSet chooser or + // not. Currently, we skip humongous regions (we never add them to + // the CSet, we only reclaim them during cleanup) and regions whose + // live bytes are over the threshold. + bool shouldAdd(HeapRegion* hr) { + assert(hr->is_marked(), "pre-condition"); + assert(!hr->is_young(), "should never consider young regions"); + return !hr->isHumongous() && + hr->live_bytes() < _regionLiveThresholdBytes; + } + + // Calculate the minimum number of old regions we'll add to the CSet + // during a mixed GC. + size_t calcMinOldCSetLength(); + + // Calculate the maximum number of old regions we'll add to the CSet + // during a mixed GC. + size_t calcMaxOldCSetLength(); + + // Serial version. void addMarkedHeapRegion(HeapRegion *hr); // Must be called before calls to getParMarkedHeapRegionChunk. @@ -133,14 +169,21 @@ // Set the marked array entry at index to hr. Careful to claim the index // first if in parallel. void setMarkedHeapRegion(jint index, HeapRegion* hr); - // Atomically increment the number of claimed regions by "inc_by". - void incNumMarkedHeapRegions(jint inc_by); + // Atomically increment the number of added regions by region_num + // and the amount of reclaimable bytes by reclaimable_bytes. + void updateTotals(jint region_num, size_t reclaimable_bytes); void clearMarkedHeapRegions(); - void updateAfterFullCollection(); + // Return the number of candidate regions that remain to be collected. + size_t remainingRegions() { return _length - _curr_index; } - bool unmarked_age_1_returned_as_new() { return _unmarked_age_1_returned_as_new; } + // Determine whether the CSet chooser has more candidate regions or not. + bool isEmpty() { return remainingRegions() == 0; } + + // Return the reclaimable bytes that remain to be collected on + // all the candidate regions in the CSet chooser. + size_t remainingReclaimableBytes () { return _remainingReclaimableBytes; } // Returns true if the used portion of "_markedRegions" is properly // sorted, otherwise asserts false. @@ -148,9 +191,17 @@ bool verify(void); bool regionProperlyOrdered(HeapRegion* r) { int si = r->sort_index(); - return (si == -1) || - (si > -1 && _markedRegions.at(si) == r) || - (si < -1 && _cache.region_in_cache(r)); + if (si > -1) { + guarantee(_curr_index <= si && si < _length, + err_msg("curr: %d sort index: %d: length: %d", + _curr_index, si, _length)); + guarantee(_markedRegions.at(si) == r, + err_msg("sort index: %d at: "PTR_FORMAT" r: "PTR_FORMAT, + si, _markedRegions.at(si), r)); + } else { + guarantee(si == -1, err_msg("sort index: %d", si)); + } + return true; } #endif diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -958,7 +958,7 @@ should_try_gc = false; } else { // Read the GC count while still holding the Heap_lock. - gc_count_before = SharedHeap::heap()->total_collections(); + gc_count_before = total_collections(); should_try_gc = true; } } @@ -976,7 +976,7 @@ // failed to allocate. No point in trying to allocate // further. We'll just return NULL. MutexLockerEx x(Heap_lock); - *gc_count_before_ret = SharedHeap::heap()->total_collections(); + *gc_count_before_ret = total_collections(); return NULL; } } else { @@ -1031,7 +1031,8 @@ // the check before we do the actual allocation. The reason for doing it // before the allocation is that we avoid having to keep track of the newly // allocated memory while we do a GC. - if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) { + if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation", + word_size)) { collect(GCCause::_g1_humongous_allocation); } @@ -1059,7 +1060,7 @@ should_try_gc = false; } else { // Read the GC count while still holding the Heap_lock. - gc_count_before = SharedHeap::heap()->total_collections(); + gc_count_before = total_collections(); should_try_gc = true; } } @@ -1081,7 +1082,7 @@ // failed to allocate. No point in trying to allocate // further. We'll just return NULL. MutexLockerEx x(Heap_lock); - *gc_count_before_ret = SharedHeap::heap()->total_collections(); + *gc_count_before_ret = total_collections(); return NULL; } } else { @@ -2311,10 +2312,12 @@ } bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { - return - ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || - (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) || - cause == GCCause::_g1_humongous_allocation); + switch (cause) { + case GCCause::_gc_locker: return GCLockerInvokesConcurrent; + case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent; + case GCCause::_g1_humongous_allocation: return true; + default: return false; + } } #ifndef PRODUCT @@ -2408,47 +2411,66 @@ } void G1CollectedHeap::collect(GCCause::Cause cause) { - // The caller doesn't have the Heap_lock - assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); + assert_heap_not_locked(); unsigned int gc_count_before; unsigned int full_gc_count_before; - { - MutexLocker ml(Heap_lock); - - // Read the GC count while holding the Heap_lock - gc_count_before = SharedHeap::heap()->total_collections(); - full_gc_count_before = SharedHeap::heap()->total_full_collections(); - } - - if (should_do_concurrent_full_gc(cause)) { - // Schedule an initial-mark evacuation pause that will start a - // concurrent cycle. We're setting word_size to 0 which means that - // we are not requesting a post-GC allocation. - VM_G1IncCollectionPause op(gc_count_before, - 0, /* word_size */ - true, /* should_initiate_conc_mark */ - g1_policy()->max_pause_time_ms(), - cause); - VMThread::execute(&op); - } else { - if (cause == GCCause::_gc_locker - DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { - - // Schedule a standard evacuation pause. We're setting word_size - // to 0 which means that we are not requesting a post-GC allocation. + bool retry_gc; + + do { + retry_gc = false; + + { + MutexLocker ml(Heap_lock); + + // Read the GC count while holding the Heap_lock + gc_count_before = total_collections(); + full_gc_count_before = total_full_collections(); + } + + if (should_do_concurrent_full_gc(cause)) { + // Schedule an initial-mark evacuation pause that will start a + // concurrent cycle. We're setting word_size to 0 which means that + // we are not requesting a post-GC allocation. VM_G1IncCollectionPause op(gc_count_before, 0, /* word_size */ - false, /* should_initiate_conc_mark */ + true, /* should_initiate_conc_mark */ g1_policy()->max_pause_time_ms(), cause); VMThread::execute(&op); + if (!op.pause_succeeded()) { + // Another GC got scheduled and prevented us from scheduling + // the initial-mark GC. It's unlikely that the GC that + // pre-empted us was also an initial-mark GC. So, we'll retry + // the initial-mark GC. + + if (full_gc_count_before == total_full_collections()) { + retry_gc = true; + } else { + // A Full GC happened while we were trying to schedule the + // initial-mark GC. No point in starting a new cycle given + // that the whole heap was collected anyway. + } + } } else { - // Schedule a Full GC. - VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); - VMThread::execute(&op); + if (cause == GCCause::_gc_locker + DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { + + // Schedule a standard evacuation pause. We're setting word_size + // to 0 which means that we are not requesting a post-GC allocation. + VM_G1IncCollectionPause op(gc_count_before, + 0, /* word_size */ + false, /* should_initiate_conc_mark */ + g1_policy()->max_pause_time_ms(), + cause); + VMThread::execute(&op); + } else { + // Schedule a Full GC. + VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); + VMThread::execute(&op); + } } - } + } while (retry_gc); } bool G1CollectedHeap::is_in(const void* p) const { @@ -3149,12 +3171,12 @@ // We apply the relevant closures to all the oops in the // system dictionary, the string table and the code cache. - const int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; + const int so = SO_AllClasses | SO_Strings | SO_CodeCache; process_strong_roots(true, // activate StrongRootsScope true, // we set "collecting perm gen" to true, // so we don't reset the dirty cards in the perm gen. - SharedHeap::ScanningOption(so), // roots scanning options + ScanningOption(so), // roots scanning options &rootsCl, &blobsCl, &rootsCl); @@ -3425,16 +3447,6 @@ } } -double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, - bool young) { - return _g1_policy->predict_region_elapsed_time_ms(hr, young); -} - -void G1CollectedHeap::check_if_region_is_too_expensive(double - predicted_time_ms) { - _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); -} - size_t G1CollectedHeap::pending_card_num() { size_t extra_cards = 0; JavaThread *curr = Threads::first(); @@ -3706,12 +3718,12 @@ g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE - g1_policy()->choose_collection_set(target_pause_time_ms); + g1_policy()->finalize_cset(target_pause_time_ms); _cm->note_start_of_gc(); // We should not verify the per-thread SATB buffers given that // we have not filtered them yet (we'll do so during the - // GC). We also call this after choose_collection_set() to + // GC). We also call this after finalize_cset() to // ensure that the CSet has been finalized. _cm->verify_no_cset_oops(true /* verify_stacks */, true /* verify_enqueued_buffers */, @@ -4734,7 +4746,7 @@ void G1CollectedHeap:: g1_process_strong_roots(bool collecting_perm_gen, - SharedHeap::ScanningOption so, + ScanningOption so, OopClosure* scan_non_heap_roots, OopsInHeapRegionClosure* scan_rs, OopsInGenClosure* scan_perm, diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -770,7 +770,7 @@ // the "i" of the calling parallel worker thread's work(i) function. // In the sequential case this param will be ignored. void g1_process_strong_roots(bool collecting_perm_gen, - SharedHeap::ScanningOption so, + ScanningOption so, OopClosure* scan_non_heap_roots, OopsInHeapRegionClosure* scan_rs, OopsInGenClosure* scan_perm, @@ -1182,6 +1182,12 @@ bool free_regions_coming() { return _free_regions_coming; } void wait_while_free_regions_coming(); + // Determine whether the given region is one that we are using as an + // old GC alloc region. + bool is_old_gc_alloc_region(HeapRegion* hr) { + return hr == _retained_old_gc_alloc_region; + } + // Perform a collection of the heap; intended for use in implementing // "System.gc". This probably implies as full a collection as the // "CollectedHeap" supports. @@ -1662,8 +1668,6 @@ public: void stop_conc_gc_threads(); - double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); - void check_if_region_is_too_expensive(double predicted_time_ms); size_t pending_card_num(); size_t max_pending_card_num(); size_t cards_scanned(); diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -206,7 +206,6 @@ _initiate_conc_mark_if_possible(false), _during_initial_mark_pause(false), - _should_revert_to_young_gcs(false), _last_young_gc(false), _last_gc_was_young(false), @@ -295,9 +294,6 @@ _par_last_gc_worker_times_ms = new double[_parallel_gc_threads]; _par_last_gc_worker_other_times_ms = new double[_parallel_gc_threads]; - // start conservatively - _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis; - int index; if (ParallelGCThreads == 0) index = 0; @@ -629,16 +625,9 @@ // possible to maximize how many old regions we can add to it. } } else { - if (gcs_are_young()) { - young_list_target_length = _young_list_fixed_length; - } else { - // A bit arbitrary: during mixed GCs we allocate half - // the young regions to try to add old regions to the CSet. - young_list_target_length = _young_list_fixed_length / 2; - // We choose to accept that we might go under the desired min - // length given that we intentionally ask for a smaller young gen. - desired_min_length = absolute_min_length; - } + // The user asked for a fixed young gen so we'll fix the young gen + // whether the next GC is young or mixed. + young_list_target_length = _young_list_fixed_length; } // Make sure we don't go over the desired max length, nor under the @@ -872,7 +861,6 @@ // transitions and make sure we start with young GCs after the Full GC. set_gcs_are_young(true); _last_young_gc = false; - _should_revert_to_young_gcs = false; clear_initiate_conc_mark_if_possible(); clear_during_initial_mark_pause(); _known_garbage_bytes = 0; @@ -889,7 +877,7 @@ // Reset survivors SurvRateGroup. _survivor_surv_rate_group->reset(); update_young_list_target_length(); - _collectionSetChooser->updateAfterFullCollection(); + _collectionSetChooser->clearMarkedHeapRegions(); } void G1CollectorPolicy::record_stop_world_start() { @@ -1000,7 +988,6 @@ } void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { - _should_revert_to_young_gcs = false; _last_young_gc = true; _in_marking_window = false; } @@ -1205,9 +1192,7 @@ last_pause_included_initial_mark = during_initial_mark_pause(); if (last_pause_included_initial_mark) { record_concurrent_mark_init_end(0.0); - } - - if (!_last_young_gc && need_to_start_conc_mark("end of GC")) { + } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) { // Note: this might have already been set, if during the last // pause we decided to start a cycle but at the beginning of // this pause we decided to postpone it. That's OK. @@ -1492,12 +1477,14 @@ } if (_last_young_gc) { + // This is supposed to to be the "last young GC" before we start + // doing mixed GCs. Here we decide whether to start mixed GCs or not. + if (!last_pause_included_initial_mark) { - ergo_verbose2(ErgoMixedGCs, - "start mixed GCs", - ergo_format_byte_perc("known garbage"), - _known_garbage_bytes, _known_garbage_ratio * 100.0); - set_gcs_are_young(false); + if (next_gc_should_be_mixed("start mixed GCs", + "do not start mixed GCs")) { + set_gcs_are_young(false); + } } else { ergo_verbose0(ErgoMixedGCs, "do not start mixed GCs", @@ -1507,39 +1494,14 @@ } if (!_last_gc_was_young) { - if (_should_revert_to_young_gcs) { - ergo_verbose2(ErgoMixedGCs, - "end mixed GCs", - ergo_format_reason("mixed GCs end requested") - ergo_format_byte_perc("known garbage"), - _known_garbage_bytes, _known_garbage_ratio * 100.0); - set_gcs_are_young(true); - } else if (_known_garbage_ratio < 0.05) { - ergo_verbose3(ErgoMixedGCs, - "end mixed GCs", - ergo_format_reason("known garbage percent lower than threshold") - ergo_format_byte_perc("known garbage") - ergo_format_perc("threshold"), - _known_garbage_bytes, _known_garbage_ratio * 100.0, - 0.05 * 100.0); - set_gcs_are_young(true); - } else if (adaptive_young_list_length() && - (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) { - ergo_verbose5(ErgoMixedGCs, - "end mixed GCs", - ergo_format_reason("current GC efficiency lower than " - "predicted young GC efficiency") - ergo_format_double("GC efficiency factor") - ergo_format_double("current GC efficiency") - ergo_format_double("predicted young GC efficiency") - ergo_format_byte_perc("known garbage"), - get_gc_eff_factor(), cur_efficiency, - predict_young_gc_eff(), - _known_garbage_bytes, _known_garbage_ratio * 100.0); + // This is a mixed GC. Here we decide whether to continue doing + // mixed GCs or not. + + if (!next_gc_should_be_mixed("continue mixed GCs", + "do not continue mixed GCs")) { set_gcs_are_young(true); } } - _should_revert_to_young_gcs = false; if (_last_gc_was_young && !_during_marking) { _young_gc_eff_seq->add(cur_efficiency); @@ -1648,15 +1610,6 @@ _pending_cards_seq->add((double) _pending_cards); _rs_lengths_seq->add((double) _max_rs_lengths); - - double expensive_region_limit_ms = - (double) MaxGCPauseMillis - predict_constant_other_time_ms(); - if (expensive_region_limit_ms < 0.0) { - // this means that the other time was predicted to be longer than - // than the max pause time - expensive_region_limit_ms = (double) MaxGCPauseMillis; - } - _expensive_region_limit_ms = expensive_region_limit_ms; } _in_marking_window = new_in_marking_window; @@ -1838,13 +1791,11 @@ if (hr->is_marked()) bytes_to_copy = hr->max_live_bytes(); else { - guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1, - "invariant" ); + assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); int age = hr->age_in_surv_rate_group(); double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate); } - return bytes_to_copy; } @@ -1860,22 +1811,6 @@ _recorded_rs_lengths = rs_lengths; } -void G1CollectorPolicy::check_if_region_is_too_expensive(double - predicted_time_ms) { - // I don't think we need to do this when in young GC mode since - // marking will be initiated next time we hit the soft limit anyway... - if (predicted_time_ms > _expensive_region_limit_ms) { - ergo_verbose2(ErgoMixedGCs, - "request mixed GCs end", - ergo_format_reason("predicted region time higher than threshold") - ergo_format_ms("predicted region time") - ergo_format_ms("threshold"), - predicted_time_ms, _expensive_region_limit_ms); - // no point in doing another mixed GC - _should_revert_to_young_gcs = true; - } -} - void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, double elapsed_ms) { _recent_gc_times_ms->add(elapsed_ms); @@ -2274,12 +2209,12 @@ } class KnownGarbageClosure: public HeapRegionClosure { + G1CollectedHeap* _g1h; CollectionSetChooser* _hrSorted; public: KnownGarbageClosure(CollectionSetChooser* hrSorted) : - _hrSorted(hrSorted) - {} + _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { } bool doHeapRegion(HeapRegion* r) { // We only include humongous regions in collection @@ -2288,11 +2223,10 @@ // Do we have any marking information for this region? if (r->is_marked()) { - // We don't include humongous regions in collection - // sets because we collect them immediately at the end of a marking - // cycle. We also don't include young regions because we *must* - // include them in the next collection pause. - if (!r->isHumongous() && !r->is_young()) { + // We will skip any region that's currently used as an old GC + // alloc region (we should not consider those for collection + // before we fill them up). + if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) { _hrSorted->addMarkedHeapRegion(r); } } @@ -2301,8 +2235,10 @@ }; class ParKnownGarbageHRClosure: public HeapRegionClosure { + G1CollectedHeap* _g1h; CollectionSetChooser* _hrSorted; jint _marked_regions_added; + size_t _reclaimable_bytes_added; jint _chunk_size; jint _cur_chunk_idx; jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end) @@ -2320,6 +2256,7 @@ assert(_cur_chunk_idx < _cur_chunk_end, "postcondition"); _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r); _marked_regions_added++; + _reclaimable_bytes_added += r->reclaimable_bytes(); _cur_chunk_idx++; } @@ -2327,10 +2264,10 @@ ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, jint chunk_size, int worker) : - _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker), - _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0), - _invokes(0) - {} + _g1h(G1CollectedHeap::heap()), + _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker), + _marked_regions_added(0), _reclaimable_bytes_added(0), + _cur_chunk_idx(0), _cur_chunk_end(0), _invokes(0) { } bool doHeapRegion(HeapRegion* r) { // We only include humongous regions in collection @@ -2340,17 +2277,17 @@ // Do we have any marking information for this region? if (r->is_marked()) { - // We don't include humongous regions in collection - // sets because we collect them immediately at the end of a marking - // cycle. - // We also do not include young regions in collection sets - if (!r->isHumongous() && !r->is_young()) { + // We will skip any region that's currently used as an old GC + // alloc region (we should not consider those for collection + // before we fill them up). + if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) { add_region(r); } } return false; } jint marked_regions_added() { return _marked_regions_added; } + size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; } int invokes() { return _invokes; } }; @@ -2362,8 +2299,7 @@ ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) : AbstractGangTask("ParKnownGarbageTask"), _hrSorted(hrSorted), _chunk_size(chunk_size), - _g1(G1CollectedHeap::heap()) - {} + _g1(G1CollectedHeap::heap()) { } void work(uint worker_id) { ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, @@ -2374,7 +2310,9 @@ _g1->workers()->active_workers(), HeapRegion::InitialClaimValue); jint regions_added = parKnownGarbageCl.marked_regions_added(); - _hrSorted->incNumMarkedHeapRegions(regions_added); + size_t reclaimable_bytes_added = + parKnownGarbageCl.reclaimable_bytes_added(); + _hrSorted->updateTotals(regions_added, reclaimable_bytes_added); if (G1PrintParCleanupStats) { gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.", worker_id, parKnownGarbageCl.invokes(), regions_added); @@ -2658,7 +2596,43 @@ } #endif // !PRODUCT -void G1CollectorPolicy::choose_collection_set(double target_pause_time_ms) { +bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, + const char* false_action_str) { + CollectionSetChooser* cset_chooser = _collectionSetChooser; + if (cset_chooser->isEmpty()) { + ergo_verbose0(ErgoMixedGCs, + false_action_str, + ergo_format_reason("candidate old regions not available")); + return false; + } + size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes(); + size_t capacity_bytes = _g1->capacity(); + double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; + double threshold = (double) G1OldReclaimableThresholdPercent; + if (perc < threshold) { + ergo_verbose4(ErgoMixedGCs, + false_action_str, + ergo_format_reason("reclaimable percentage lower than threshold") + ergo_format_region("candidate old regions") + ergo_format_byte_perc("reclaimable") + ergo_format_perc("threshold"), + cset_chooser->remainingRegions(), + reclaimable_bytes, perc, threshold); + return false; + } + + ergo_verbose4(ErgoMixedGCs, + true_action_str, + ergo_format_reason("candidate old regions available") + ergo_format_region("candidate old regions") + ergo_format_byte_perc("reclaimable") + ergo_format_perc("threshold"), + cset_chooser->remainingRegions(), + reclaimable_bytes, perc, threshold); + return true; +} + +void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { // Set this here - in case we're not doing young collections. double non_young_start_time_sec = os::elapsedTime(); @@ -2672,7 +2646,6 @@ double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); double predicted_pause_time_ms = base_time_ms; - double time_remaining_ms = target_pause_time_ms - base_time_ms; ergo_verbose3(ErgoCSetConstruction | ErgoHigh, @@ -2682,22 +2655,6 @@ ergo_format_ms("target pause time"), base_time_ms, time_remaining_ms, target_pause_time_ms); - // the 10% and 50% values are arbitrary... - double threshold = 0.10 * target_pause_time_ms; - if (time_remaining_ms < threshold) { - double prev_time_remaining_ms = time_remaining_ms; - time_remaining_ms = 0.50 * target_pause_time_ms; - ergo_verbose3(ErgoCSetConstruction, - "adjust remaining time", - ergo_format_reason("remaining time lower than threshold") - ergo_format_ms("remaining time") - ergo_format_ms("threshold") - ergo_format_ms("adjusted remaining time"), - prev_time_remaining_ms, threshold, time_remaining_ms); - } - - size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes; - HeapRegion* hr; double young_start_time_sec = os::elapsedTime(); @@ -2752,78 +2709,97 @@ non_young_start_time_sec = young_end_time_sec; if (!gcs_are_young()) { - bool should_continue = true; - NumberSeq seq; - double avg_prediction = 100000000000000000.0; // something very large - - double prev_predicted_pause_time_ms = predicted_pause_time_ms; - do { - // Note that add_old_region_to_cset() increments the - // _old_cset_region_length field and cset_region_length() returns the - // sum of _eden_cset_region_length, _survivor_cset_region_length, and - // _old_cset_region_length. So, as old regions are added to the - // CSet, _old_cset_region_length will be incremented and - // cset_region_length(), which is used below, will always reflect - // the the total number of regions added up to this point to the CSet. - - hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms, - avg_prediction); - if (hr != NULL) { - _g1->old_set_remove(hr); - double predicted_time_ms = predict_region_elapsed_time_ms(hr, false); - time_remaining_ms -= predicted_time_ms; - predicted_pause_time_ms += predicted_time_ms; - add_old_region_to_cset(hr); - seq.add(predicted_time_ms); - avg_prediction = seq.avg() + seq.sd(); + CollectionSetChooser* cset_chooser = _collectionSetChooser; + assert(cset_chooser->verify(), "CSet Chooser verification - pre"); + const size_t min_old_cset_length = cset_chooser->calcMinOldCSetLength(); + const size_t max_old_cset_length = cset_chooser->calcMaxOldCSetLength(); + + size_t expensive_region_num = 0; + bool check_time_remaining = adaptive_young_list_length(); + HeapRegion* hr = cset_chooser->peek(); + while (hr != NULL) { + if (old_cset_region_length() >= max_old_cset_length) { + // Added maximum number of old regions to the CSet. + ergo_verbose2(ErgoCSetConstruction, + "finish adding old regions to CSet", + ergo_format_reason("old CSet region num reached max") + ergo_format_region("old") + ergo_format_region("max"), + old_cset_region_length(), max_old_cset_length); + break; } - should_continue = true; - if (hr == NULL) { - // No need for an ergo verbose message here, - // getNextMarkRegion() does this when it returns NULL. - should_continue = false; + double predicted_time_ms = predict_region_elapsed_time_ms(hr, false); + if (check_time_remaining) { + if (predicted_time_ms > time_remaining_ms) { + // Too expensive for the current CSet. + + if (old_cset_region_length() >= min_old_cset_length) { + // We have added the minimum number of old regions to the CSet, + // we are done with this CSet. + ergo_verbose4(ErgoCSetConstruction, + "finish adding old regions to CSet", + ergo_format_reason("predicted time is too high") + ergo_format_ms("predicted time") + ergo_format_ms("remaining time") + ergo_format_region("old") + ergo_format_region("min"), + predicted_time_ms, time_remaining_ms, + old_cset_region_length(), min_old_cset_length); + break; + } + + // We'll add it anyway given that we haven't reached the + // minimum number of old regions. + expensive_region_num += 1; + } } else { - if (adaptive_young_list_length()) { - if (time_remaining_ms < 0.0) { - ergo_verbose1(ErgoCSetConstruction, - "stop adding old regions to CSet", - ergo_format_reason("remaining time is lower than 0") - ergo_format_ms("remaining time"), - time_remaining_ms); - should_continue = false; - } - } else { - if (cset_region_length() >= _young_list_fixed_length) { - ergo_verbose2(ErgoCSetConstruction, - "stop adding old regions to CSet", - ergo_format_reason("CSet length reached target") - ergo_format_region("CSet") - ergo_format_region("young target"), - cset_region_length(), _young_list_fixed_length); - should_continue = false; - } + if (old_cset_region_length() >= min_old_cset_length) { + // In the non-auto-tuning case, we'll finish adding regions + // to the CSet if we reach the minimum. + ergo_verbose2(ErgoCSetConstruction, + "finish adding old regions to CSet", + ergo_format_reason("old CSet region num reached min") + ergo_format_region("old") + ergo_format_region("min"), + old_cset_region_length(), min_old_cset_length); + break; } } - } while (should_continue); - - if (!adaptive_young_list_length() && - cset_region_length() < _young_list_fixed_length) { - ergo_verbose2(ErgoCSetConstruction, - "request mixed GCs end", - ergo_format_reason("CSet length lower than target") - ergo_format_region("CSet") - ergo_format_region("young target"), - cset_region_length(), _young_list_fixed_length); - _should_revert_to_young_gcs = true; + + // We will add this region to the CSet. + time_remaining_ms -= predicted_time_ms; + predicted_pause_time_ms += predicted_time_ms; + cset_chooser->remove_and_move_to_next(hr); + _g1->old_set_remove(hr); + add_old_region_to_cset(hr); + + hr = cset_chooser->peek(); + } + if (hr == NULL) { + ergo_verbose0(ErgoCSetConstruction, + "finish adding old regions to CSet", + ergo_format_reason("candidate old regions not available")); } - ergo_verbose2(ErgoCSetConstruction | ErgoHigh, - "add old regions to CSet", - ergo_format_region("old") - ergo_format_ms("predicted old region time"), - old_cset_region_length(), - predicted_pause_time_ms - prev_predicted_pause_time_ms); + if (expensive_region_num > 0) { + // We print the information once here at the end, predicated on + // whether we added any apparently expensive regions or not, to + // avoid generating output per region. + ergo_verbose4(ErgoCSetConstruction, + "added expensive regions to CSet", + ergo_format_reason("old CSet region num not reached min") + ergo_format_region("old") + ergo_format_region("expensive") + ergo_format_region("min") + ergo_format_ms("remaining time"), + old_cset_region_length(), + expensive_region_num, + min_old_cset_length, + time_remaining_ms); + } + + assert(cset_chooser->verify(), "CSet Chooser verification - post"); } stop_incremental_cset_building(); diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -312,16 +312,13 @@ double _recorded_non_young_free_cset_time_ms; double _sigma; - double _expensive_region_limit_ms; size_t _rs_lengths_prediction; size_t _known_garbage_bytes; double _known_garbage_ratio; - double sigma() { - return _sigma; - } + double sigma() { return _sigma; } // A function that prevents us putting too much stock in small sample // sets. Returns a number between 2.0 and 1.0, depending on the number @@ -491,8 +488,6 @@ get_new_prediction(_non_young_other_cost_per_region_ms_seq); } - void check_if_region_is_too_expensive(double predicted_time_ms); - double predict_young_collection_elapsed_time_ms(size_t adjustment); double predict_base_elapsed_time_ms(size_t pending_cards); double predict_base_elapsed_time_ms(size_t pending_cards, @@ -707,7 +702,6 @@ // initial-mark work. volatile bool _during_initial_mark_pause; - bool _should_revert_to_young_gcs; bool _last_young_gc; // This set of variables tracks the collector efficiency, in order to @@ -946,10 +940,17 @@ return _bytes_copied_during_gc; } + // Determine whether the next GC should be mixed. Called to determine + // whether to start mixed GCs or whether to carry on doing mixed + // GCs. The two action strings are used in the ergo output when the + // method returns true or false. + bool next_gc_should_be_mixed(const char* true_action_str, + const char* false_action_str); + // Choose a new collection set. Marks the chosen regions as being // "in_collection_set", and links them together. The head and number of // the collection set are available via access methods. - void choose_collection_set(double target_pause_time_ms); + void finalize_cset(double target_pause_time_ms); // The head of the list (via "next_in_collection_set()") representing the // current collection set. diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp --- a/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -131,8 +131,8 @@ ", " _name_ ": "SIZE_FORMAT" bytes (%1.2f %%)" // Generates the format string -#define ergo_format(_action_, _extra_format_) \ - " %1.3f: [G1Ergonomics (%s) " _action_ _extra_format_ "]" +#define ergo_format(_extra_format_) \ + " %1.3f: [G1Ergonomics (%s) %s" _extra_format_ "]" // Conditionally, prints an ergonomic decision record. _extra_format_ // is the format string for the optional items we'd like to print @@ -145,20 +145,21 @@ // them to the print method. For convenience, we have wrapper macros // below which take a specific number of arguments and set the rest to // a default value. -#define ergo_verbose_common(_tag_, _action_, _extra_format_, \ +#define ergo_verbose_common(_tag_, _action_, _extra_format_, \ _arg0_, _arg1_, _arg2_, _arg3_, _arg4_, _arg5_) \ - do { \ - if (G1ErgoVerbose::enabled((_tag_))) { \ - gclog_or_tty->print_cr(ergo_format(_action_, _extra_format_), \ - os::elapsedTime(), \ - G1ErgoVerbose::to_string((_tag_)), \ - (_arg0_), (_arg1_), (_arg2_), \ - (_arg3_), (_arg4_), (_arg5_)); \ - } \ + do { \ + if (G1ErgoVerbose::enabled((_tag_))) { \ + gclog_or_tty->print_cr(ergo_format(_extra_format_), \ + os::elapsedTime(), \ + G1ErgoVerbose::to_string((_tag_)), \ + (_action_), \ + (_arg0_), (_arg1_), (_arg2_), \ + (_arg3_), (_arg4_), (_arg5_)); \ + } \ } while (0) -#define ergo_verbose(_tag_, _action_) \ +#define ergo_verbose(_tag_, _action_) \ ergo_verbose_common(_tag_, _action_, "", 0, 0, 0, 0, 0, 0) #define ergo_verbose0(_tag_, _action_, _extra_format_) \ diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/g1/g1_globals.hpp --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -297,7 +297,23 @@ \ develop(uintx, G1DefaultMaxNewGenPercent, 80, \ "Percentage (0-100) of the heap size to use as maximum " \ - "young gen size.") + "young gen size.") \ + \ + develop(uintx, G1OldCSetRegionLiveThresholdPercent, 95, \ + "Threshold for regions to be added to the collection set. " \ + "Regions with more live bytes that this will not be collected.") \ + \ + develop(uintx, G1OldReclaimableThresholdPercent, 1, \ + "Threshold for the remaining old reclaimable bytes, expressed " \ + "as a percentage of the heap size. If the old reclaimable bytes " \ + "are under this we will not collect them with more mixed GCs.") \ + \ + develop(uintx, G1MaxMixedGCNum, 4, \ + "The maximum desired number of mixed GCs after a marking cycle.") \ + \ + develop(uintx, G1OldCSetRegionThresholdPercent, 10, \ + "An upper bound for the number of old CSet regions expressed " \ + "as a percentage of the heap size.") G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG) diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/g1/heapRegion.cpp --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -387,13 +387,12 @@ ct_bs->clear(MemRegion(bottom(), end())); } -// void HeapRegion::calc_gc_efficiency() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); - _gc_efficiency = (double) garbage_bytes() / - g1h->predict_region_elapsed_time_ms(this, false); + G1CollectorPolicy* g1p = g1h->g1_policy(); + _gc_efficiency = (double) reclaimable_bytes() / + g1p->predict_region_elapsed_time_ms(this, false); } -// void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) { assert(!isHumongous(), "sanity / pre-condition"); diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/g1/heapRegion.hpp --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -415,6 +415,16 @@ return used_at_mark_start_bytes - marked_bytes(); } + // Return the amount of bytes we'll reclaim if we collect this + // region. This includes not only the known garbage bytes in the + // region but also any unallocated space in it, i.e., [top, end), + // since it will also be reclaimed if we collect the region. + size_t reclaimable_bytes() { + size_t known_live_bytes = live_bytes(); + assert(known_live_bytes <= capacity(), "sanity"); + return capacity() - known_live_bytes; + } + // An upper bound on the number of live bytes in the region. size_t max_live_bytes() { return used() - garbage_bytes(); } @@ -648,10 +658,8 @@ init_top_at_mark_start(); } - // void calc_gc_efficiency(void); double gc_efficiency() { return _gc_efficiency;} - // bool is_young() const { return _young_type != NotYoung; } bool is_survivor() const { return _young_type == Survivor; } diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/parNew/parNewGeneration.cpp --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1042,7 +1042,11 @@ size_policy->avg_survived()->sample(from()->used()); } - update_time_of_last_gc(os::javaTimeMillis()); + // We need to use a monotonically non-deccreasing time in ms + // or we will see time-warp warnings and os::javaTimeMillis() + // does not guarantee monotonicity. + jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; + update_time_of_last_gc(now); SpecializationStats::print(); diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -418,25 +418,17 @@ gc_count = Universe::heap()->total_collections(); result = young_gen()->allocate(size); - - // (1) If the requested object is too large to easily fit in the - // young_gen, or - // (2) If GC is locked out via GCLocker, young gen is full and - // the need for a GC already signalled to GCLocker (done - // at a safepoint), - // ... then, rather than force a safepoint and (a potentially futile) - // collection (attempt) for each allocation, try allocation directly - // in old_gen. For case (2) above, we may in the future allow - // TLAB allocation directly in the old gen. if (result != NULL) { return result; } - if (size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) { - result = old_gen()->allocate(size); - if (result != NULL) { - return result; - } + + // If certain conditions hold, try allocating from the old gen. + result = mem_allocate_old_gen(size); + if (result != NULL) { + return result; } + + // Failed to allocate without a gc. if (GC_locker::is_active_and_needs_gc()) { // If this thread is not in a jni critical section, we stall // the requestor until the critical section has cleared and @@ -460,7 +452,6 @@ } if (result == NULL) { - // Generate a VM operation VM_ParallelGCFailedAllocation op(size, gc_count); VMThread::execute(&op); @@ -523,6 +514,42 @@ return result; } +// A "death march" is a series of ultra-slow allocations in which a full gc is +// done before each allocation, and after the full gc the allocation still +// cannot be satisfied from the young gen. This routine detects that condition; +// it should be called after a full gc has been done and the allocation +// attempted from the young gen. The parameter 'addr' should be the result of +// that young gen allocation attempt. +void +ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) { + if (addr != NULL) { + _death_march_count = 0; // death march has ended + } else if (_death_march_count == 0) { + if (should_alloc_in_eden(size)) { + _death_march_count = 1; // death march has started + } + } +} + +HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) { + if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) { + // Size is too big for eden, or gc is locked out. + return old_gen()->allocate(size); + } + + // If a "death march" is in progress, allocate from the old gen a limited + // number of times before doing a GC. + if (_death_march_count > 0) { + if (_death_march_count < 64) { + ++_death_march_count; + return old_gen()->allocate(size); + } else { + _death_march_count = 0; + } + } + return NULL; +} + // Failed allocation policy. Must be called from the VM thread, and // only at a safepoint! Note that this method has policy for allocation // flow, and NOT collection policy. So we do not check for gc collection @@ -535,27 +562,22 @@ assert(!Universe::heap()->is_gc_active(), "not reentrant"); assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); - size_t mark_sweep_invocation_count = total_invocations(); - - // We assume (and assert!) that an allocation at this point will fail - // unless we collect. + // We assume that allocation in eden will fail unless we collect. // First level allocation failure, scavenge and allocate in young gen. GCCauseSetter gccs(this, GCCause::_allocation_failure); - PSScavenge::invoke(); + const bool invoked_full_gc = PSScavenge::invoke(); HeapWord* result = young_gen()->allocate(size); // Second level allocation failure. // Mark sweep and allocate in young generation. - if (result == NULL) { - // There is some chance the scavenge method decided to invoke mark_sweep. - // Don't mark sweep twice if so. - if (mark_sweep_invocation_count == total_invocations()) { - invoke_full_gc(false); - result = young_gen()->allocate(size); - } + if (result == NULL && !invoked_full_gc) { + invoke_full_gc(false); + result = young_gen()->allocate(size); } + death_march_check(result, size); + // Third level allocation failure. // After mark sweep and young generation allocation failure, // allocate in old generation. diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,6 +64,7 @@ // Collection of generations that are adjacent in the // space reserved for the heap. AdjoiningGenerations* _gens; + unsigned int _death_march_count; static GCTaskManager* _gc_task_manager; // The task manager. @@ -71,8 +72,13 @@ static inline size_t total_invocations(); HeapWord* allocate_new_tlab(size_t size); + inline bool should_alloc_in_eden(size_t size) const; + inline void death_march_check(HeapWord* const result, size_t size); + HeapWord* mem_allocate_old_gen(size_t size); + public: ParallelScavengeHeap() : CollectedHeap() { + _death_march_count = 0; set_alignment(_perm_gen_alignment, intra_heap_alignment()); set_alignment(_young_gen_alignment, intra_heap_alignment()); set_alignment(_old_gen_alignment, intra_heap_alignment()); diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,12 @@ PSMarkSweep::total_invocations(); } +inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const +{ + const size_t eden_size = young_gen()->eden_space()->capacity_in_words(); + return size < eden_size / 2; +} + inline void ParallelScavengeHeap::invoke_scavenge() { PSScavenge::invoke(); diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -100,12 +100,12 @@ // This method contains no policy. You should probably // be calling invoke() instead. -void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { +bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(ref_processor() != NULL, "Sanity"); if (GC_locker::check_active_before_gc()) { - return; + return false; } ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); @@ -382,6 +382,8 @@ #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif + + return true; } bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,7 +78,7 @@ public: static void invoke(bool clear_all_softrefs); - static void invoke_no_policy(bool clear_all_softrefs); + static bool invoke_no_policy(bool clear_all_softrefs); static void initialize(); diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -1993,12 +1993,12 @@ // This method contains no policy. You should probably // be calling invoke() instead. -void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { +bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(ref_processor() != NULL, "Sanity"); if (GC_locker::check_active_before_gc()) { - return; + return false; } TimeStamp marking_start; @@ -2248,6 +2248,8 @@ #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif + + return true; } bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1057,7 +1057,7 @@ } static void invoke(bool maximum_heap_compaction); - static void invoke_no_policy(bool maximum_heap_compaction); + static bool invoke_no_policy(bool maximum_heap_compaction); static void post_initialize(); // Perform initialization for PSParallelCompact that requires diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -247,167 +247,6 @@ } } -// -// This method is pretty bulky. It would be nice to split it up -// into smaller submethods, but we need to be careful not to hurt -// performance. -// - -oop PSPromotionManager::copy_to_survivor_space(oop o) { - assert(PSScavenge::should_scavenge(&o), "Sanity"); - - oop new_obj = NULL; - - // NOTE! We must be very careful with any methods that access the mark - // in o. There may be multiple threads racing on it, and it may be forwarded - // at any time. Do not use oop methods for accessing the mark! - markOop test_mark = o->mark(); - - // The same test as "o->is_forwarded()" - if (!test_mark->is_marked()) { - bool new_obj_is_tenured = false; - size_t new_obj_size = o->size(); - - // Find the objects age, MT safe. - int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ? - test_mark->displaced_mark_helper()->age() : test_mark->age(); - - // Try allocating obj in to-space (unless too old) - if (age < PSScavenge::tenuring_threshold()) { - new_obj = (oop) _young_lab.allocate(new_obj_size); - if (new_obj == NULL && !_young_gen_is_full) { - // Do we allocate directly, or flush and refill? - if (new_obj_size > (YoungPLABSize / 2)) { - // Allocate this object directly - new_obj = (oop)young_space()->cas_allocate(new_obj_size); - } else { - // Flush and fill - _young_lab.flush(); - - HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize); - if (lab_base != NULL) { - _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); - // Try the young lab allocation again. - new_obj = (oop) _young_lab.allocate(new_obj_size); - } else { - _young_gen_is_full = true; - } - } - } - } - - // Otherwise try allocating obj tenured - if (new_obj == NULL) { -#ifndef PRODUCT - if (Universe::heap()->promotion_should_fail()) { - return oop_promotion_failed(o, test_mark); - } -#endif // #ifndef PRODUCT - - new_obj = (oop) _old_lab.allocate(new_obj_size); - new_obj_is_tenured = true; - - if (new_obj == NULL) { - if (!_old_gen_is_full) { - // Do we allocate directly, or flush and refill? - if (new_obj_size > (OldPLABSize / 2)) { - // Allocate this object directly - new_obj = (oop)old_gen()->cas_allocate(new_obj_size); - } else { - // Flush and fill - _old_lab.flush(); - - HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize); - if(lab_base != NULL) { - _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); - // Try the old lab allocation again. - new_obj = (oop) _old_lab.allocate(new_obj_size); - } - } - } - - // This is the promotion failed test, and code handling. - // The code belongs here for two reasons. It is slightly - // different thatn the code below, and cannot share the - // CAS testing code. Keeping the code here also minimizes - // the impact on the common case fast path code. - - if (new_obj == NULL) { - _old_gen_is_full = true; - return oop_promotion_failed(o, test_mark); - } - } - } - - assert(new_obj != NULL, "allocation should have succeeded"); - - // Copy obj - Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size); - - // Now we have to CAS in the header. - if (o->cas_forward_to(new_obj, test_mark)) { - // We won any races, we "own" this object. - assert(new_obj == o->forwardee(), "Sanity"); - - // Increment age if obj still in new generation. Now that - // we're dealing with a markOop that cannot change, it is - // okay to use the non mt safe oop methods. - if (!new_obj_is_tenured) { - new_obj->incr_age(); - assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); - } - - // Do the size comparison first with new_obj_size, which we - // already have. Hopefully, only a few objects are larger than - // _min_array_size_for_chunking, and most of them will be arrays. - // So, the is->objArray() test would be very infrequent. - if (new_obj_size > _min_array_size_for_chunking && - new_obj->is_objArray() && - PSChunkLargeArrays) { - // we'll chunk it - oop* const masked_o = mask_chunked_array_oop(o); - push_depth(masked_o); - TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); - } else { - // we'll just push its contents - new_obj->push_contents(this); - } - } else { - // We lost, someone else "owns" this object - guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed."); - - // Try to deallocate the space. If it was directly allocated we cannot - // deallocate it, so we have to test. If the deallocation fails, - // overwrite with a filler object. - if (new_obj_is_tenured) { - if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { - CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); - } - } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { - CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); - } - - // don't update this before the unallocation! - new_obj = o->forwardee(); - } - } else { - assert(o->is_forwarded(), "Sanity"); - new_obj = o->forwardee(); - } - -#ifdef DEBUG - // This code must come after the CAS test, or it will print incorrect - // information. - if (TraceScavenge) { - gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}", - PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring", - new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size()); - } -#endif - - return new_obj; -} - template void PSPromotionManager::process_array_chunk_work( oop obj, int start, int end) { diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -171,7 +171,7 @@ void set_old_gen_is_full(bool state) { _old_gen_is_full = state; } // Promotion methods - oop copy_to_survivor_space(oop o); + template oop copy_to_survivor_space(oop o); oop oop_promotion_failed(oop obj, markOop obj_mark); void reset(); diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +61,170 @@ claim_or_forward_internal_depth(p); } +// +// This method is pretty bulky. It would be nice to split it up +// into smaller submethods, but we need to be careful not to hurt +// performance. +// +template +oop PSPromotionManager::copy_to_survivor_space(oop o) { + assert(PSScavenge::should_scavenge(&o), "Sanity"); + + oop new_obj = NULL; + + // NOTE! We must be very careful with any methods that access the mark + // in o. There may be multiple threads racing on it, and it may be forwarded + // at any time. Do not use oop methods for accessing the mark! + markOop test_mark = o->mark(); + + // The same test as "o->is_forwarded()" + if (!test_mark->is_marked()) { + bool new_obj_is_tenured = false; + size_t new_obj_size = o->size(); + + if (!promote_immediately) { + // Find the objects age, MT safe. + int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ? + test_mark->displaced_mark_helper()->age() : test_mark->age(); + + // Try allocating obj in to-space (unless too old) + if (age < PSScavenge::tenuring_threshold()) { + new_obj = (oop) _young_lab.allocate(new_obj_size); + if (new_obj == NULL && !_young_gen_is_full) { + // Do we allocate directly, or flush and refill? + if (new_obj_size > (YoungPLABSize / 2)) { + // Allocate this object directly + new_obj = (oop)young_space()->cas_allocate(new_obj_size); + } else { + // Flush and fill + _young_lab.flush(); + + HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize); + if (lab_base != NULL) { + _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); + // Try the young lab allocation again. + new_obj = (oop) _young_lab.allocate(new_obj_size); + } else { + _young_gen_is_full = true; + } + } + } + } + } + + // Otherwise try allocating obj tenured + if (new_obj == NULL) { +#ifndef PRODUCT + if (Universe::heap()->promotion_should_fail()) { + return oop_promotion_failed(o, test_mark); + } +#endif // #ifndef PRODUCT + + new_obj = (oop) _old_lab.allocate(new_obj_size); + new_obj_is_tenured = true; + + if (new_obj == NULL) { + if (!_old_gen_is_full) { + // Do we allocate directly, or flush and refill? + if (new_obj_size > (OldPLABSize / 2)) { + // Allocate this object directly + new_obj = (oop)old_gen()->cas_allocate(new_obj_size); + } else { + // Flush and fill + _old_lab.flush(); + + HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize); + if(lab_base != NULL) { + _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); + // Try the old lab allocation again. + new_obj = (oop) _old_lab.allocate(new_obj_size); + } + } + } + + // This is the promotion failed test, and code handling. + // The code belongs here for two reasons. It is slightly + // different thatn the code below, and cannot share the + // CAS testing code. Keeping the code here also minimizes + // the impact on the common case fast path code. + + if (new_obj == NULL) { + _old_gen_is_full = true; + return oop_promotion_failed(o, test_mark); + } + } + } + + assert(new_obj != NULL, "allocation should have succeeded"); + + // Copy obj + Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size); + + // Now we have to CAS in the header. + if (o->cas_forward_to(new_obj, test_mark)) { + // We won any races, we "own" this object. + assert(new_obj == o->forwardee(), "Sanity"); + + // Increment age if obj still in new generation. Now that + // we're dealing with a markOop that cannot change, it is + // okay to use the non mt safe oop methods. + if (!new_obj_is_tenured) { + new_obj->incr_age(); + assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); + } + + // Do the size comparison first with new_obj_size, which we + // already have. Hopefully, only a few objects are larger than + // _min_array_size_for_chunking, and most of them will be arrays. + // So, the is->objArray() test would be very infrequent. + if (new_obj_size > _min_array_size_for_chunking && + new_obj->is_objArray() && + PSChunkLargeArrays) { + // we'll chunk it + oop* const masked_o = mask_chunked_array_oop(o); + push_depth(masked_o); + TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); + } else { + // we'll just push its contents + new_obj->push_contents(this); + } + } else { + // We lost, someone else "owns" this object + guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed."); + + // Try to deallocate the space. If it was directly allocated we cannot + // deallocate it, so we have to test. If the deallocation fails, + // overwrite with a filler object. + if (new_obj_is_tenured) { + if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { + CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); + } + } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { + CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); + } + + // don't update this before the unallocation! + new_obj = o->forwardee(); + } + } else { + assert(o->is_forwarded(), "Sanity"); + new_obj = o->forwardee(); + } + +#ifdef DEBUG + // This code must come after the CAS test, or it will print incorrect + // information. + if (TraceScavenge) { + gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}", + PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring", + new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size()); + } +#endif + + return new_obj; +} + + inline void PSPromotionManager::process_popped_location_depth(StarTask p) { if (is_oop_masked(p)) { assert(PSChunkLargeArrays, "invariant"); @@ -69,9 +233,9 @@ } else { if (p.is_narrow()) { assert(UseCompressedOops, "Error"); - PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p); + PSScavenge::copy_and_push_safe_barrier(this, p); } else { - PSScavenge::copy_and_push_safe_barrier(this, (oop*)p); + PSScavenge::copy_and_push_safe_barrier(this, p); } } } diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "classfile/symbolTable.hpp" +#include "code/codeCache.hpp" #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" #include "gc_implementation/parallelScavenge/generationSizer.hpp" @@ -100,7 +101,7 @@ // Weak refs may be visited more than once. if (PSScavenge::should_scavenge(p, _to_space)) { - PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); + PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); } } virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } @@ -214,36 +215,41 @@ // // Note that this method should only be called from the vm_thread while // at a safepoint! -void PSScavenge::invoke() { +bool PSScavenge::invoke() { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(!Universe::heap()->is_gc_active(), "not reentrant"); - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSAdaptiveSizePolicy* policy = heap->size_policy(); IsGCActiveMark mark; - bool scavenge_was_done = PSScavenge::invoke_no_policy(); + const bool scavenge_done = PSScavenge::invoke_no_policy(); + const bool need_full_gc = !scavenge_done || + policy->should_full_GC(heap->old_gen()->free_in_bytes()); + bool full_gc_done = false; - PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); - if (UsePerfData) - counters->update_full_follows_scavenge(0); - if (!scavenge_was_done || - policy->should_full_GC(heap->old_gen()->free_in_bytes())) { - if (UsePerfData) - counters->update_full_follows_scavenge(full_follows_scavenge); + if (UsePerfData) { + PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters(); + const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped; + counters->update_full_follows_scavenge(ffs_val); + } + + if (need_full_gc) { GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); CollectorPolicy* cp = heap->collector_policy(); const bool clear_all_softrefs = cp->should_clear_all_soft_refs(); if (UseParallelOldGC) { - PSParallelCompact::invoke_no_policy(clear_all_softrefs); + full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs); } else { - PSMarkSweep::invoke_no_policy(clear_all_softrefs); + full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs); } } + + return full_gc_done; } // This method contains no policy. You should probably @@ -602,6 +608,8 @@ NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); + CodeCache::prune_scavenge_root_nmethods(); + // Re-verify object start arrays if (VerifyObjectStartArray && VerifyAfterGC) { diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -117,10 +117,9 @@ // Called by parallelScavengeHeap to init the tenuring threshold static void initialize(); - // Scavenge entry point - static void invoke(); - // Return true is a collection was done. Return - // false if the collection was skipped. + // Scavenge entry point. This may invoke a full gc; return true if so. + static bool invoke(); + // Return true if a collection was done; false otherwise. static bool invoke_no_policy(); // If an attempt to promote fails, this method is invoked @@ -135,7 +134,8 @@ template static inline bool should_scavenge(T* p, MutableSpace* to_space); template static inline bool should_scavenge(T* p, bool check_to_space); - template inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p); + template + inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p); // Is an object in the young generation // This assumes that the HeapWord argument is in the heap, diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" +#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" inline void PSScavenge::save_to_space_top_before_gc() { @@ -65,7 +66,7 @@ // Attempt to "claim" oop at p via CAS, push the new obj if successful // This version tests the oop* to make sure it is within the heap before // attempting marking. -template +template inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm, T* p) { assert(should_scavenge(p, true), "revisiting object?"); @@ -73,7 +74,7 @@ oop o = oopDesc::load_decode_heap_oop_not_null(p); oop new_obj = o->is_forwarded() ? o->forwardee() - : pm->copy_to_survivor_space(o); + : pm->copy_to_survivor_space(o); oopDesc::encode_store_heap_oop_not_null(p, new_obj); // We cannot mark without test, as some code passes us pointers @@ -86,7 +87,8 @@ } } -class PSScavengeRootsClosure: public OopClosure { +template +class PSRootsClosure: public OopClosure { private: PSPromotionManager* _promotion_manager; @@ -94,13 +96,16 @@ template void do_oop_work(T *p) { if (PSScavenge::should_scavenge(p)) { // We never card mark roots, maybe call a func without test? - PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); + PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); } } public: - PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { } - void do_oop(oop* p) { PSScavengeRootsClosure::do_oop_work(p); } - void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); } + PSRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { } + void do_oop(oop* p) { PSRootsClosure::do_oop_work(p); } + void do_oop(narrowOop* p) { PSRootsClosure::do_oop_work(p); } }; +typedef PSRootsClosure PSScavengeRootsClosure; +typedef PSRootsClosure PSPromoteRootsClosure; + #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSSCAVENGE_INLINE_HPP diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,6 +51,7 @@ PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which); PSScavengeRootsClosure roots_closure(pm); + PSPromoteRootsClosure roots_to_old_closure(pm); switch (_root_type) { case universe: @@ -91,7 +92,7 @@ case code_cache: { - CodeBlobToOopClosure each_scavengable_code_blob(&roots_closure, /*do_marking=*/ true); + CodeBlobToOopClosure each_scavengable_code_blob(&roots_to_old_closure, /*do_marking=*/ true); CodeCache::scavenge_root_nmethods_do(&each_scavengable_code_blob); } break; diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/memory/compactingPermGenGen.cpp --- a/src/share/vm/memory/compactingPermGenGen.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/memory/compactingPermGenGen.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -240,9 +240,6 @@ if (_ro_space == NULL || _rw_space == NULL) vm_exit_during_initialization("Could not allocate a shared space"); - // Cover both shared spaces entirely with cards. - _rs->resize_covered_region(MemRegion(readonly_bottom, readwrite_end)); - if (UseSharedSpaces) { // Map in the regions in the shared file. @@ -279,10 +276,14 @@ delete _rw_space; _rw_space = NULL; shared_end = (HeapWord*)(rs.base() + rs.size()); - _rs->resize_covered_region(MemRegion(shared_bottom, shared_bottom)); } } + if (spec()->enable_shared_spaces()) { + // Cover both shared spaces entirely with cards. + _rs->resize_covered_region(MemRegion(readonly_bottom, readwrite_end)); + } + // Reserved region includes shared spaces for oop.is_in_reserved(). _reserved.set_end(shared_end); diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/memory/defNewGeneration.cpp --- a/src/share/vm/memory/defNewGeneration.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/memory/defNewGeneration.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -655,7 +655,12 @@ from()->set_concurrent_iteration_safe_limit(from()->top()); to()->set_concurrent_iteration_safe_limit(to()->top()); SpecializationStats::print(); - update_time_of_last_gc(os::javaTimeMillis()); + + // We need to use a monotonically non-deccreasing time in ms + // or we will see time-warp warnings and os::javaTimeMillis() + // does not guarantee monotonicity. + jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; + update_time_of_last_gc(now); } class RemoveForwardPointerClosure: public ObjectClosure { diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/memory/genMarkSweep.cpp --- a/src/share/vm/memory/genMarkSweep.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/memory/genMarkSweep.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -176,7 +176,11 @@ // Update time of last gc for all generations we collected // (which curently is all the generations in the heap). - gch->update_time_of_last_gc(os::javaTimeMillis()); + // We need to use a monotonically non-deccreasing time in ms + // or we will see time-warp warnings and os::javaTimeMillis() + // does not guarantee monotonicity. + jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; + gch->update_time_of_last_gc(now); } void GenMarkSweep::allocate_stacks() { diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/oops/instanceKlass.hpp --- a/src/share/vm/oops/instanceKlass.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/oops/instanceKlass.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -240,7 +240,6 @@ Thread* _init_thread; // Pointer to current thread doing initialization (to handle recusive initialization) int _vtable_len; // length of Java vtable (in words) int _itable_len; // length of Java itable (in words) - ReferenceType _reference_type; // reference type OopMapCache* volatile _oop_map_cache; // OopMapCache for all methods in the klass (allocated lazily) JNIid* _jni_ids; // First JNI identifier for static fields in this class jmethodID* _methods_jmethod_ids; // jmethodIDs corresponding to method_idnum, or NULL if none @@ -265,6 +264,8 @@ // _idnum_allocated_count. u1 _init_state; // state of class + u1 _reference_type; // reference type + // embedded Java vtable follows here // embedded Java itables follows here // embedded static fields follows here @@ -407,8 +408,11 @@ void eager_initialize(Thread *thread); // reference type - ReferenceType reference_type() const { return _reference_type; } - void set_reference_type(ReferenceType t) { _reference_type = t; } + ReferenceType reference_type() const { return (ReferenceType)_reference_type; } + void set_reference_type(ReferenceType t) { + assert(t == (u1)t, "overflow"); + _reference_type = (u1)t; + } static ByteSize reference_type_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _reference_type)); } diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/opto/library_call.cpp --- a/src/share/vm/opto/library_call.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/opto/library_call.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -2153,7 +2153,7 @@ // // if (offset == java_lang_ref_Reference::_reference_offset) { // if (base != null) { - // if (klass(base)->reference_type() != REF_NONE)) { + // if (instance_of(base, java.lang.ref.Reference)) { // pre_barrier(_, pre_val, ...); // } // } @@ -2165,8 +2165,6 @@ IdealKit ideal(this); #define __ ideal. - const int reference_type_offset = in_bytes(instanceKlass::reference_type_offset()); - Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset); __ if_then(offset, BoolTest::eq, referent_off, unlikely); { diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/prims/jvmtiTagMap.cpp --- a/src/share/vm/prims/jvmtiTagMap.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/prims/jvmtiTagMap.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2999,7 +2999,8 @@ char type = field->field_type(); if (!is_primitive_field_type(type)) { oop fld_o = o->obj_field(field->field_offset()); - if (fld_o != NULL) { + // ignore any objects that aren't visible to profiler + if (fld_o != NULL && ServiceUtil::visible_oop(fld_o)) { // reflection code may have a reference to a klassOop. // - see sun.reflect.UnsafeStaticFieldAccessorImpl and sun.misc.Unsafe if (fld_o->is_klass()) { diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/runtime/arguments.cpp --- a/src/share/vm/runtime/arguments.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/runtime/arguments.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -1368,12 +1368,9 @@ // by ergonomics. if (MaxHeapSize <= max_heap_for_compressed_oops()) { #if !defined(COMPILER1) || defined(TIERED) -// disable UseCompressedOops by default on MacOS X until 7118647 is fixed -#ifndef __APPLE__ if (FLAG_IS_DEFAULT(UseCompressedOops)) { FLAG_SET_ERGO(bool, UseCompressedOops, true); } -#endif // !__APPLE__ #endif #ifdef _WIN64 if (UseLargePages && UseCompressedOops) { diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/runtime/dtraceJSDT.cpp --- a/src/share/vm/runtime/dtraceJSDT.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/runtime/dtraceJSDT.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -82,7 +82,7 @@ int handle = pd_activate((void*)probes, module_name, providers_count, providers); - if (handle <= 0) { + if (handle < 0) { delete probes; THROW_MSG_0(vmSymbols::java_lang_RuntimeException(), "Unable to register DTrace probes (internal error)."); diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/runtime/thread.cpp --- a/src/share/vm/runtime/thread.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/runtime/thread.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -3220,11 +3220,6 @@ return status; } - // Must be run after init_ft which initializes ft_enabled - if (TRACE_INITIALIZE() != JNI_OK) { - vm_exit_during_initialization("Failed to initialize tracing backend"); - } - // Should be done after the heap is fully created main_thread->cache_global_variables(); @@ -3366,6 +3361,7 @@ initialize_class(vmSymbols::java_lang_ArithmeticException(), CHECK_0); initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK_0); initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK_0); + initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK_0); } else { warning("java.lang.OutOfMemoryError has not been initialized"); warning("java.lang.NullPointerException has not been initialized"); @@ -3373,6 +3369,7 @@ warning("java.lang.ArrayStoreException has not been initialized"); warning("java.lang.ArithmeticException has not been initialized"); warning("java.lang.StackOverflowError has not been initialized"); + warning("java.lang.IllegalArgumentException has not been initialized"); } } @@ -3402,6 +3399,11 @@ quicken_jni_functions(); + // Must be run after init_ft which initializes ft_enabled + if (TRACE_INITIALIZE() != JNI_OK) { + vm_exit_during_initialization("Failed to initialize tracing backend"); + } + // Set flag that basic initialization has completed. Used by exceptions and various // debug stuff, that does not work until all basic classes have been initialized. set_init_completed(); diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/runtime/virtualspace.cpp --- a/src/share/vm/runtime/virtualspace.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/runtime/virtualspace.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -455,7 +455,7 @@ void ReservedSpace::protect_noaccess_prefix(const size_t size) { assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL && - (size_t(_base + _size) > OopEncodingHeapMax) && + (Universe::narrow_oop_base() != NULL) && Universe::narrow_oop_use_implicit_null_checks()), "noaccess_prefix should be used only with non zero based compressed oops"); diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/runtime/vmStructs.cpp --- a/src/share/vm/runtime/vmStructs.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/runtime/vmStructs.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -314,7 +314,7 @@ nonstatic_field(instanceKlass, _init_thread, Thread*) \ nonstatic_field(instanceKlass, _vtable_len, int) \ nonstatic_field(instanceKlass, _itable_len, int) \ - nonstatic_field(instanceKlass, _reference_type, ReferenceType) \ + nonstatic_field(instanceKlass, _reference_type, u1) \ volatile_nonstatic_field(instanceKlass, _oop_map_cache, OopMapCache*) \ nonstatic_field(instanceKlass, _jni_ids, JNIid*) \ nonstatic_field(instanceKlass, _osr_nmethods_head, nmethod*) \ diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/services/diagnosticArgument.cpp --- a/src/share/vm/services/diagnosticArgument.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/services/diagnosticArgument.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -28,9 +28,16 @@ #include "services/diagnosticArgument.hpp" void GenDCmdArgument::read_value(const char* str, size_t len, TRAPS) { - if (is_set()) { + /* NOTE:Some argument types doesn't require a value, + * for instance boolean arguments: "enableFeatureX". is + * equivalent to "enableFeatureX=true". In these cases, + * str will be null. This is perfectly valid. + * All argument types must perform null checks on str. + */ + + if (is_set() && !allow_multiple()) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), - "Duplicates in diagnostic command arguments"); + "Duplicates in diagnostic command arguments\n"); } parse_value(str, len, CHECK); set_is_set(true); @@ -38,9 +45,9 @@ template <> void DCmdArgument::parse_value(const char* str, size_t len, TRAPS) { - if (sscanf(str, INT64_FORMAT, &_value) != 1) { + if (str == NULL || sscanf(str, INT64_FORMAT, &_value) != 1) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), - "Integer parsing error in diagnostic command arguments"); + "Integer parsing error in diagnostic command arguments\n"); } } @@ -89,16 +96,20 @@ template <> void DCmdArgument::parse_value(const char* str, size_t len, TRAPS) { - _value = NEW_C_HEAP_ARRAY(char, len+1); - strncpy(_value, str, len); - _value[len] = 0; + if (str == NULL) { + _value = NULL; + } else { + _value = NEW_C_HEAP_ARRAY(char, len+1); + strncpy(_value, str, len); + _value[len] = 0; + } } template <> void DCmdArgument::init_value(TRAPS) { - if (has_default()) { + if (has_default() && _default_string != NULL) { this->parse_value(_default_string, strlen(_default_string), THREAD); if (HAS_PENDING_EXCEPTION) { - fatal("Default string must be parsable"); + fatal("Default string must be parsable"); } } else { set_value(NULL); @@ -111,3 +122,153 @@ set_value(NULL); } } + +template <> void DCmdArgument::parse_value(const char* str, + size_t len, TRAPS) { + if (str == NULL) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), + "Integer parsing error nanotime value: syntax error"); + } + + int argc = sscanf(str, INT64_FORMAT , &_value._time); + if (argc != 1) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), + "Integer parsing error nanotime value: syntax error"); + } + size_t idx = 0; + while(idx < len && isdigit(str[idx])) { + idx++; + } + if (idx == len) { + // only accept missing unit if the value is 0 + if (_value._time != 0) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), + "Integer parsing error nanotime value: unit required"); + } else { + _value._nanotime = 0; + strcpy(_value._unit, "ns"); + return; + } + } else if(len - idx > 2) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), + "Integer parsing error nanotime value: illegal unit"); + } else { + strncpy(_value._unit, &str[idx], len - idx); + /*Write an extra null termination. This is safe because _value._unit + * is declared as char[3], and length is checked to be not larger than + * two above. Also, this is necessary, since length might be 1, and the + * default value already in the string is ns, which is two chars. + */ + _value._unit[len-idx] = '\0'; + } + + if (strcmp(_value._unit, "ns") == 0) { + _value._nanotime = _value._time; + } else if (strcmp(_value._unit, "us") == 0) { + _value._nanotime = _value._time * 1000; + } else if (strcmp(_value._unit, "ms") == 0) { + _value._nanotime = _value._time * 1000 * 1000; + } else if (strcmp(_value._unit, "s") == 0) { + _value._nanotime = _value._time * 1000 * 1000 * 1000; + } else if (strcmp(_value._unit, "m") == 0) { + _value._nanotime = _value._time * 60 * 1000 * 1000 * 1000; + } else if (strcmp(_value._unit, "h") == 0) { + _value._nanotime = _value._time * 60 * 60 * 1000 * 1000 * 1000; + } else if (strcmp(_value._unit, "d") == 0) { + _value._nanotime = _value._time * 24 * 60 * 60 * 1000 * 1000 * 1000; + } else { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), + "Integer parsing error nanotime value: illegal unit"); + } +} + +template <> void DCmdArgument::init_value(TRAPS) { + if (has_default()) { + this->parse_value(_default_string, strlen(_default_string), THREAD); + if (HAS_PENDING_EXCEPTION) { + fatal("Default string must be parsable"); + } + } else { + _value._time = 0; + _value._nanotime = 0; + strcmp(_value._unit, "ns"); + } +} + +template <> void DCmdArgument::destroy_value() { } + +// WARNING StringArrayArgument can only be used as an option, it cannot be +// used as an argument with the DCmdParser + +template <> void DCmdArgument::parse_value(const char* str, + size_t len, TRAPS) { + _value->add(str,len); +} + +template <> void DCmdArgument::init_value(TRAPS) { + _value = new StringArrayArgument(); + _allow_multiple = true; + if (has_default()) { + fatal("StringArrayArgument cannot have default value"); + } +} + +template <> void DCmdArgument::destroy_value() { + if (_value != NULL) { + delete _value; + set_value(NULL); + } +} + +template <> void DCmdArgument::parse_value(const char* str, + size_t len, TRAPS) { + if (str == NULL) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), + "Integer parsing error nanotime value: syntax error"); + } + + if (*str == '-') { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), + "Parsing error memory size value: negative values not allowed"); + } + int res = sscanf(str, UINT64_FORMAT "%c", &_value._val, &_value._multiplier); + if (res == 2) { + switch (_value._multiplier) { + case 'k': case 'K': + _value._size = _value._val * 1024; + break; + case 'm': case 'M': + _value._size = _value._val * 1024 * 1024; + break; + case 'g': case 'G': + _value._size = _value._val * 1024 * 1024 * 1024; + break; + default: + _value._size = _value._val; + _value._multiplier = ' '; + //default case should be to break with no error, since user + //can write size in bytes, or might have a delimiter and next arg + break; + } + } else if (res == 1) { + _value._size = _value._val; + } else { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), + "Parsing error memory size value: invalid value"); + } +} + +template <> void DCmdArgument::init_value(TRAPS) { + if (has_default()) { + this->parse_value(_default_string, strlen(_default_string), THREAD); + if (HAS_PENDING_EXCEPTION) { + fatal("Default string must be parsable"); + } + } else { + _value._size = 0; + _value._val = 0; + _value._multiplier = ' '; + } +} + +template <> void DCmdArgument::destroy_value() { } diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/services/diagnosticArgument.hpp --- a/src/share/vm/services/diagnosticArgument.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/services/diagnosticArgument.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,49 @@ #include "runtime/thread.hpp" #include "utilities/exceptions.hpp" +class StringArrayArgument : public CHeapObj { +private: + GrowableArray* _array; +public: + StringArrayArgument() { + _array = new(ResourceObj::C_HEAP)GrowableArray(32, true); + assert(_array != NULL, "Sanity check"); + } + void add(const char* str, size_t len) { + if (str != NULL) { + char* ptr = NEW_C_HEAP_ARRAY(char, len+1); + strncpy(ptr, str, len); + ptr[len] = 0; + _array->append(ptr); + } + } + GrowableArray* array() { + return _array; + } + ~StringArrayArgument() { + for (int i=0; i<_array->length(); i++) { + if(_array->at(i) != NULL) { // Safety check + FREE_C_HEAP_ARRAY(char, _array->at(i)); + } + } + delete _array; + } +}; + +class NanoTimeArgument { +public: + jlong _nanotime; + jlong _time; + char _unit[3]; +}; + +class MemorySizeArgument { +public: + u8 _size; + u8 _val; + char _multiplier; +}; + class GenDCmdArgument : public ResourceObj { protected: GenDCmdArgument* _next; @@ -40,6 +83,7 @@ const char* _default_string; bool _is_set; bool _is_mandatory; + bool _allow_multiple; GenDCmdArgument(const char* name, const char* description, const char* type, const char* default_string, bool mandatory) { _name = name; @@ -48,6 +92,7 @@ _default_string = default_string; _is_mandatory = mandatory; _is_set = false; + _allow_multiple = false; }; public: const char* name() { return _name; } @@ -56,6 +101,7 @@ const char* default_string() { return _default_string; } bool is_set() { return _is_set; } void set_is_set(bool b) { _is_set = b; } + bool allow_multiple() { return _allow_multiple; } bool is_mandatory() { return _is_mandatory; } bool has_value() { return _is_set || _default_string != NULL; } bool has_default() { return _default_string != NULL; } diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/services/diagnosticFramework.cpp --- a/src/share/vm/services/diagnosticFramework.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/services/diagnosticFramework.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,7 +61,7 @@ bool DCmdArgIter::next(TRAPS) { if (_len == 0) return false; // skipping spaces - while (_cursor < _len - 1 && isspace(_buffer[_cursor])) { + while (_cursor < _len - 1 && _buffer[_cursor] == _delim) { _cursor++; } // handling end of command line diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/services/diagnosticFramework.hpp --- a/src/share/vm/services/diagnosticFramework.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/services/diagnosticFramework.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -195,6 +195,7 @@ DCmdParser() { _options = NULL; _arguments_list = NULL; + _delim = ' '; } void add_dcmd_option(GenDCmdArgument* arg); void add_dcmd_argument(GenDCmdArgument* arg); diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/services/gcNotifier.cpp --- a/src/share/vm/services/gcNotifier.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/services/gcNotifier.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -180,17 +180,43 @@ } void GCNotifier::sendNotification(TRAPS) { + GCNotifier::sendNotificationInternal(THREAD); + // Clearing pending exception to avoid premature termination of + // the service thread + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + } +} + +class NotificationMark : public StackObj { + // This class is used in GCNotifier::sendNotificationInternal to ensure that + // the GCNotificationRequest object is properly cleaned up, whatever path + // is used to exit the method. + GCNotificationRequest* _request; +public: + NotificationMark(GCNotificationRequest* r) { + _request = r; + } + ~NotificationMark() { + assert(_request != NULL, "Sanity check"); + delete _request; + } +}; + +void GCNotifier::sendNotificationInternal(TRAPS) { ResourceMark rm(THREAD); + HandleMark hm(THREAD); GCNotificationRequest *request = getRequest(); - if(request != NULL) { - Handle objGcInfo = createGcInfo(request->gcManager,request->gcStatInfo,THREAD); + if (request != NULL) { + NotificationMark nm(request); + Handle objGcInfo = createGcInfo(request->gcManager, request->gcStatInfo, THREAD); Handle objName = java_lang_String::create_from_platform_dependent_str(request->gcManager->name(), CHECK); Handle objAction = java_lang_String::create_from_platform_dependent_str(request->gcAction, CHECK); Handle objCause = java_lang_String::create_from_platform_dependent_str(request->gcCause, CHECK); klassOop k = Management::sun_management_GarbageCollectorImpl_klass(CHECK); - instanceKlassHandle gc_mbean_klass (THREAD, k); + instanceKlassHandle gc_mbean_klass(THREAD, k); instanceOop gc_mbean = request->gcManager->get_memory_manager_instance(THREAD); instanceHandle gc_mbean_h(THREAD, gc_mbean); @@ -213,11 +239,6 @@ vmSymbols::createGCNotification_signature(), &args, CHECK); - if (HAS_PENDING_EXCEPTION) { - CLEAR_PENDING_EXCEPTION; - } - - delete request; } } diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/services/gcNotifier.hpp --- a/src/share/vm/services/gcNotifier.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/services/gcNotifier.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,6 +60,7 @@ static GCNotificationRequest *last_request; static void addRequest(GCNotificationRequest *request); static GCNotificationRequest *getRequest(); + static void sendNotificationInternal(TRAPS); public: static void pushNotification(GCMemoryManager *manager, const char *action, const char *cause); static bool has_event(); diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/utilities/decoder.cpp --- a/src/share/vm/utilities/decoder.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/utilities/decoder.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -25,7 +25,9 @@ #include "precompiled.hpp" #include "prims/jvm.h" #include "runtime/mutexLocker.hpp" +#include "runtime/os.hpp" #include "utilities/decoder.hpp" +#include "utilities/vmError.hpp" #if defined(_WINDOWS) #include "decoder_windows.hpp" @@ -35,74 +37,94 @@ #include "decoder_elf.hpp" #endif -NullDecoder* Decoder::_decoder = NULL; -NullDecoder Decoder::_do_nothing_decoder; -Mutex* Decoder::_decoder_lock = new Mutex(Mutex::safepoint, - "DecoderLock"); +AbstractDecoder* Decoder::_shared_decoder = NULL; +AbstractDecoder* Decoder::_error_handler_decoder = NULL; +NullDecoder Decoder::_do_nothing_decoder; +Mutex* Decoder::_shared_decoder_lock = new Mutex(Mutex::native, + "SharedDecoderLock"); -// _decoder_lock should already acquired before enter this method -NullDecoder* Decoder::get_decoder() { - assert(_decoder_lock != NULL && _decoder_lock->owned_by_self(), +AbstractDecoder* Decoder::get_shared_instance() { + assert(_shared_decoder_lock != NULL && _shared_decoder_lock->owned_by_self(), "Require DecoderLock to enter"); - if (_decoder != NULL) { - return _decoder; + if (_shared_decoder == NULL) { + _shared_decoder = create_decoder(); } + return _shared_decoder; +} - // Decoder is a secondary service. Although, it is good to have, - // but we can live without it. +AbstractDecoder* Decoder::get_error_handler_instance() { + if (_error_handler_decoder == NULL) { + _error_handler_decoder = create_decoder(); + } + return _error_handler_decoder; +} + + +AbstractDecoder* Decoder::create_decoder() { + AbstractDecoder* decoder; #if defined(_WINDOWS) - _decoder = new (std::nothrow) WindowsDecoder(); + decoder = new (std::nothrow) WindowsDecoder(); #elif defined (__APPLE__) - _decoder = new (std::nothrow)MachODecoder(); + decoder = new (std::nothrow)MachODecoder(); #else - _decoder = new (std::nothrow)ElfDecoder(); + decoder = new (std::nothrow)ElfDecoder(); #endif - if (_decoder == NULL || _decoder->has_error()) { - if (_decoder != NULL) { - delete _decoder; + if (decoder == NULL || decoder->has_error()) { + if (decoder != NULL) { + delete decoder; } - _decoder = &_do_nothing_decoder; + decoder = &_do_nothing_decoder; } - return _decoder; + return decoder; } bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const char* modulepath) { - assert(_decoder_lock != NULL, "Just check"); - MutexLockerEx locker(_decoder_lock, true); - NullDecoder* decoder = get_decoder(); + assert(_shared_decoder_lock != NULL, "Just check"); + bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid; + MutexLockerEx locker(error_handling_thread ? NULL : _shared_decoder_lock, true); + AbstractDecoder* decoder = error_handling_thread ? + get_error_handler_instance(): get_shared_instance(); assert(decoder != NULL, "null decoder"); return decoder->decode(addr, buf, buflen, offset, modulepath); } bool Decoder::demangle(const char* symbol, char* buf, int buflen) { - assert(_decoder_lock != NULL, "Just check"); - MutexLockerEx locker(_decoder_lock, true); - NullDecoder* decoder = get_decoder(); + assert(_shared_decoder_lock != NULL, "Just check"); + bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid; + MutexLockerEx locker(error_handling_thread ? NULL : _shared_decoder_lock, true); + AbstractDecoder* decoder = error_handling_thread ? + get_error_handler_instance(): get_shared_instance(); assert(decoder != NULL, "null decoder"); return decoder->demangle(symbol, buf, buflen); } bool Decoder::can_decode_C_frame_in_vm() { - assert(_decoder_lock != NULL, "Just check"); - MutexLockerEx locker(_decoder_lock, true); - NullDecoder* decoder = get_decoder(); + assert(_shared_decoder_lock != NULL, "Just check"); + bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid; + MutexLockerEx locker(error_handling_thread ? NULL : _shared_decoder_lock, true); + AbstractDecoder* decoder = error_handling_thread ? + get_error_handler_instance(): get_shared_instance(); assert(decoder != NULL, "null decoder"); return decoder->can_decode_C_frame_in_vm(); } -// shutdown real decoder and replace it with -// _do_nothing_decoder +/* + * Shutdown shared decoder and replace it with + * _do_nothing_decoder. Do nothing with error handler + * instance, since the JVM is going down. + */ void Decoder::shutdown() { - assert(_decoder_lock != NULL, "Just check"); - MutexLockerEx locker(_decoder_lock, true); + assert(_shared_decoder_lock != NULL, "Just check"); + MutexLockerEx locker(_shared_decoder_lock, true); - if (_decoder != NULL && _decoder != &_do_nothing_decoder) { - delete _decoder; + if (_shared_decoder != NULL && + _shared_decoder != &_do_nothing_decoder) { + delete _shared_decoder; } - _decoder = &_do_nothing_decoder; + _shared_decoder = &_do_nothing_decoder; } diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/utilities/decoder.hpp --- a/src/share/vm/utilities/decoder.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/utilities/decoder.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ #include "memory/allocation.hpp" #include "runtime/mutex.hpp" -class NullDecoder: public CHeapObj { +class AbstractDecoder : public CHeapObj { public: // status code for decoding native C frame enum decoder_status { @@ -43,6 +43,34 @@ helper_init_error // SymInitialize failed (Windows only) }; + // decode an pc address to corresponding function name and an offset from the beginning of + // the function + virtual bool decode(address pc, char* buf, int buflen, int* offset, + const char* modulepath = NULL) = 0; + // demangle a C++ symbol + virtual bool demangle(const char* symbol, char* buf, int buflen) = 0; + // if the decoder can decode symbols in vm + virtual bool can_decode_C_frame_in_vm() const = 0; + + virtual decoder_status status() const { + return _decoder_status; + } + + virtual bool has_error() const { + return is_error(_decoder_status); + } + + static bool is_error(decoder_status status) { + return (status > 0); + } + +protected: + decoder_status _decoder_status; +}; + +// Do nothing decoder +class NullDecoder : public AbstractDecoder { +public: NullDecoder() { _decoder_status = not_available; } @@ -61,40 +89,34 @@ virtual bool can_decode_C_frame_in_vm() const { return false; } - - virtual decoder_status status() const { - return _decoder_status; - } - - virtual bool has_error() const { - return is_error(_decoder_status); - } - - static bool is_error(decoder_status status) { - return (status > 0); - } - -protected: - decoder_status _decoder_status; }; -class Decoder: AllStatic { +class Decoder : AllStatic { public: static bool decode(address pc, char* buf, int buflen, int* offset, const char* modulepath = NULL); static bool demangle(const char* symbol, char* buf, int buflen); static bool can_decode_C_frame_in_vm(); + // shutdown shared instance static void shutdown(); protected: - static NullDecoder* get_decoder(); + // shared decoder instance, _shared_instance_lock is needed + static AbstractDecoder* get_shared_instance(); + // a private instance for error handler. Error handler can be + // triggered almost everywhere, including signal handler, where + // no lock can be taken. So the shared decoder can not be used + // in this scenario. + static AbstractDecoder* get_error_handler_instance(); + static AbstractDecoder* create_decoder(); private: - static NullDecoder* _decoder; - static NullDecoder _do_nothing_decoder; + static AbstractDecoder* _shared_decoder; + static AbstractDecoder* _error_handler_decoder; + static NullDecoder _do_nothing_decoder; protected: - static Mutex* _decoder_lock; + static Mutex* _shared_decoder_lock; }; #endif // SHARE_VM_UTILITIES_DECODER_HPP diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/utilities/decoder_elf.hpp --- a/src/share/vm/utilities/decoder_elf.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/utilities/decoder_elf.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ #include "utilities/decoder.hpp" #include "utilities/elfFile.hpp" -class ElfDecoder: public NullDecoder { +class ElfDecoder : public AbstractDecoder { public: ElfDecoder() { diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/utilities/preserveException.cpp --- a/src/share/vm/utilities/preserveException.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/utilities/preserveException.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -32,9 +32,9 @@ thread = Thread::current(); _thread = thread; _preserved_exception_oop = Handle(thread, _thread->pending_exception()); - _thread->clear_pending_exception(); // Needed to avoid infinite recursion _preserved_exception_line = _thread->exception_line(); _preserved_exception_file = _thread->exception_file(); + _thread->clear_pending_exception(); // Needed to avoid infinite recursion } diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/utilities/vmError.hpp --- a/src/share/vm/utilities/vmError.hpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/utilities/vmError.hpp Fri Feb 17 12:18:52 2012 -0800 @@ -27,11 +27,12 @@ #include "utilities/globalDefinitions.hpp" - +class Decoder; class VM_ReportJavaOutOfMemory; class VMError : public StackObj { friend class VM_ReportJavaOutOfMemory; + friend class Decoder; enum ErrorType { internal_error = 0xe0000000, diff -r 72c425c46102 -r 15085a6eb50c src/share/vm/utilities/xmlstream.cpp --- a/src/share/vm/utilities/xmlstream.cpp Fri Feb 17 12:18:10 2012 -0800 +++ b/src/share/vm/utilities/xmlstream.cpp Fri Feb 17 12:18:52 2012 -0800 @@ -192,8 +192,11 @@ _element_close_stack_ptr = cur_tag + strlen(cur_tag) + 1; _element_depth -= 1; } - if (bad_tag && !VMThread::should_terminate() && !is_error_reported()) + if (bad_tag && !VMThread::should_terminate() && !VM_Exit::vm_exited() && + !is_error_reported()) + { assert(false, "bad tag in log"); + } } #endif