# HG changeset patch # User iveresov # Date 1304546924 25200 # Node ID 567c87d484a05649c250fc4891a1a4d3007b00d9 # Parent a1d5f532838de85ac78120444bb94cc9cb8e8898 7041501: NUMA: Expand the old gen more aggressively Summary: Expand the old gen in bigger increments Reviewed-by: jmasa diff -r a1d5f532838d -r 567c87d484a0 src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp Fri Apr 29 09:11:03 2011 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp Wed May 04 15:08:44 2011 -0700 @@ -224,6 +224,12 @@ const size_t alignment = virtual_space()->alignment(); size_t aligned_bytes = align_size_up(bytes, alignment); size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment); + + if (UseNUMA) { + // With NUMA we use round-robin page allocation for the old gen. Expand by at least + // providing a page per lgroup. Alignment is larger or equal to the page size. + aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num()); + } if (aligned_bytes == 0){ // The alignment caused the number of bytes to wrap. An expand_by(0) will // return true with the implication that and expansion was done when it diff -r a1d5f532838d -r 567c87d484a0 src/share/vm/runtime/arguments.cpp --- a/src/share/vm/runtime/arguments.cpp Fri Apr 29 09:11:03 2011 +0200 +++ b/src/share/vm/runtime/arguments.cpp Wed May 04 15:08:44 2011 -0700 @@ -1423,6 +1423,11 @@ } } } + if (UseNUMA) { + if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) { + FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M); + } + } } void Arguments::set_g1_gc_flags() {