comparison src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp @ 642:660978a2a31a

6791178: Specialize for zero as the compressed oop vm heap base Summary: Use zero based compressed oops if java heap is below 32gb and unscaled compressed oops if java heap is below 4gb. Reviewed-by: never, twisti, jcoomes, coleenp
author kvn
date Thu, 12 Mar 2009 10:37:46 -0700
parents a4b729f5b611
children c89f86385056
comparison
equal deleted inserted replaced
641:6af0a709d52b 642:660978a2a31a
102 trace_gen_sizes("ps heap rnd", 102 trace_gen_sizes("ps heap rnd",
103 pg_min_size, pg_max_size, 103 pg_min_size, pg_max_size,
104 og_min_size, og_max_size, 104 og_min_size, og_max_size,
105 yg_min_size, yg_max_size); 105 yg_min_size, yg_max_size);
106 106
107 const size_t total_reserved = pg_max_size + og_max_size + yg_max_size;
108 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
109
107 // The main part of the heap (old gen + young gen) can often use a larger page 110 // The main part of the heap (old gen + young gen) can often use a larger page
108 // size than is needed or wanted for the perm gen. Use the "compound 111 // size than is needed or wanted for the perm gen. Use the "compound
109 // alignment" ReservedSpace ctor to avoid having to use the same page size for 112 // alignment" ReservedSpace ctor to avoid having to use the same page size for
110 // all gens. 113 // all gens.
114
111 ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size, 115 ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
112 og_align); 116 og_align, addr);
117
118 if (UseCompressedOops) {
119 if (addr != NULL && !heap_rs.is_reserved()) {
120 // Failed to reserve at specified address - the requested memory
121 // region is taken already, for example, by 'java' launcher.
122 // Try again to reserver heap higher.
123 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
124 ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size,
125 og_align, addr);
126 if (addr != NULL && !heap_rs0.is_reserved()) {
127 // Failed to reserve at specified address again - give up.
128 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
129 assert(addr == NULL, "");
130 ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size,
131 og_align, addr);
132 heap_rs = heap_rs1;
133 } else {
134 heap_rs = heap_rs0;
135 }
136 }
137 }
138
113 os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz, 139 os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
114 heap_rs.base(), pg_max_size); 140 heap_rs.base(), pg_max_size);
115 os::trace_page_sizes("ps main", og_min_size + yg_min_size, 141 os::trace_page_sizes("ps main", og_min_size + yg_min_size,
116 og_max_size + yg_max_size, og_page_sz, 142 og_max_size + yg_max_size, og_page_sz,
117 heap_rs.base() + pg_max_size, 143 heap_rs.base() + pg_max_size,