Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp @ 6725:da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author | coleenp |
---|---|
date | Sat, 01 Sep 2012 13:25:18 -0400 |
parents | d2a62e0f25eb |
children | 82657b6a8cc0 |
comparison
equal
deleted
inserted
replaced
6724:36d1d483d5d6 | 6725:da91efe96a93 |
---|---|
43 #include "services/memTracker.hpp" | 43 #include "services/memTracker.hpp" |
44 #include "utilities/vmError.hpp" | 44 #include "utilities/vmError.hpp" |
45 | 45 |
46 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; | 46 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; |
47 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; | 47 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; |
48 PSPermGen* ParallelScavengeHeap::_perm_gen = NULL; | |
49 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; | 48 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; |
50 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; | 49 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; |
51 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; | 50 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; |
52 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; | 51 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; |
53 | 52 |
54 static void trace_gen_sizes(const char* const str, | 53 static void trace_gen_sizes(const char* const str, |
55 size_t pg_min, size_t pg_max, | |
56 size_t og_min, size_t og_max, | 54 size_t og_min, size_t og_max, |
57 size_t yg_min, size_t yg_max) | 55 size_t yg_min, size_t yg_max) |
58 { | 56 { |
59 if (TracePageSizes) { | 57 if (TracePageSizes) { |
60 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " | 58 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " |
61 SIZE_FORMAT "," SIZE_FORMAT " " | 59 SIZE_FORMAT "," SIZE_FORMAT " " |
62 SIZE_FORMAT "," SIZE_FORMAT " " | |
63 SIZE_FORMAT, | 60 SIZE_FORMAT, |
64 str, pg_min / K, pg_max / K, | 61 str, |
65 og_min / K, og_max / K, | 62 og_min / K, og_max / K, |
66 yg_min / K, yg_max / K, | 63 yg_min / K, yg_max / K, |
67 (pg_max + og_max + yg_max) / K); | 64 (og_max + yg_max) / K); |
68 } | 65 } |
69 } | 66 } |
70 | 67 |
71 jint ParallelScavengeHeap::initialize() { | 68 jint ParallelScavengeHeap::initialize() { |
72 CollectedHeap::pre_initialize(); | 69 CollectedHeap::pre_initialize(); |
77 | 74 |
78 size_t yg_min_size = _collector_policy->min_young_gen_size(); | 75 size_t yg_min_size = _collector_policy->min_young_gen_size(); |
79 size_t yg_max_size = _collector_policy->max_young_gen_size(); | 76 size_t yg_max_size = _collector_policy->max_young_gen_size(); |
80 size_t og_min_size = _collector_policy->min_old_gen_size(); | 77 size_t og_min_size = _collector_policy->min_old_gen_size(); |
81 size_t og_max_size = _collector_policy->max_old_gen_size(); | 78 size_t og_max_size = _collector_policy->max_old_gen_size(); |
82 // Why isn't there a min_perm_gen_size()? | |
83 size_t pg_min_size = _collector_policy->perm_gen_size(); | |
84 size_t pg_max_size = _collector_policy->max_perm_gen_size(); | |
85 | 79 |
86 trace_gen_sizes("ps heap raw", | 80 trace_gen_sizes("ps heap raw", |
87 pg_min_size, pg_max_size, | |
88 og_min_size, og_max_size, | 81 og_min_size, og_max_size, |
89 yg_min_size, yg_max_size); | 82 yg_min_size, yg_max_size); |
90 | 83 |
91 // The ReservedSpace ctor used below requires that the page size for the perm | |
92 // gen is <= the page size for the rest of the heap (young + old gens). | |
93 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size, | 84 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size, |
94 yg_max_size + og_max_size, | 85 yg_max_size + og_max_size, |
95 8); | 86 8); |
96 const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size, | 87 |
97 pg_max_size, 16), | |
98 og_page_sz); | |
99 | |
100 const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz); | |
101 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz); | 88 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz); |
102 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz); | 89 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz); |
103 | 90 |
104 // Update sizes to reflect the selected page size(s). | 91 // Update sizes to reflect the selected page size(s). |
105 // | 92 // |
119 og_max_size = MAX2(og_max_size, og_min_size); | 106 og_max_size = MAX2(og_max_size, og_min_size); |
120 size_t og_cur_size = | 107 size_t og_cur_size = |
121 align_size_down(_collector_policy->old_gen_size(), og_align); | 108 align_size_down(_collector_policy->old_gen_size(), og_align); |
122 og_cur_size = MAX2(og_cur_size, og_min_size); | 109 og_cur_size = MAX2(og_cur_size, og_min_size); |
123 | 110 |
124 pg_min_size = align_size_up(pg_min_size, pg_align); | |
125 pg_max_size = align_size_up(pg_max_size, pg_align); | |
126 size_t pg_cur_size = pg_min_size; | |
127 | |
128 trace_gen_sizes("ps heap rnd", | 111 trace_gen_sizes("ps heap rnd", |
129 pg_min_size, pg_max_size, | |
130 og_min_size, og_max_size, | 112 og_min_size, og_max_size, |
131 yg_min_size, yg_max_size); | 113 yg_min_size, yg_max_size); |
132 | 114 |
133 const size_t total_reserved = pg_max_size + og_max_size + yg_max_size; | 115 const size_t heap_size = og_max_size + yg_max_size; |
134 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); | 116 |
135 | 117 ReservedSpace heap_rs = Universe::reserve_heap(heap_size, og_align); |
136 // The main part of the heap (old gen + young gen) can often use a larger page | |
137 // size than is needed or wanted for the perm gen. Use the "compound | |
138 // alignment" ReservedSpace ctor to avoid having to use the same page size for | |
139 // all gens. | |
140 | |
141 ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size, | |
142 og_align, addr); | |
143 | |
144 if (UseCompressedOops) { | |
145 if (addr != NULL && !heap_rs.is_reserved()) { | |
146 // Failed to reserve at specified address - the requested memory | |
147 // region is taken already, for example, by 'java' launcher. | |
148 // Try again to reserver heap higher. | |
149 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); | |
150 ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size, | |
151 og_align, addr); | |
152 if (addr != NULL && !heap_rs0.is_reserved()) { | |
153 // Failed to reserve at specified address again - give up. | |
154 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); | |
155 assert(addr == NULL, ""); | |
156 ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size, | |
157 og_align, addr); | |
158 heap_rs = heap_rs1; | |
159 } else { | |
160 heap_rs = heap_rs0; | |
161 } | |
162 } | |
163 } | |
164 | 118 |
165 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap); | 119 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap); |
166 | 120 |
167 os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz, | |
168 heap_rs.base(), pg_max_size); | |
169 os::trace_page_sizes("ps main", og_min_size + yg_min_size, | 121 os::trace_page_sizes("ps main", og_min_size + yg_min_size, |
170 og_max_size + yg_max_size, og_page_sz, | 122 og_max_size + yg_max_size, og_page_sz, |
171 heap_rs.base() + pg_max_size, | 123 heap_rs.base(), |
172 heap_rs.size() - pg_max_size); | 124 heap_rs.size()); |
173 if (!heap_rs.is_reserved()) { | 125 if (!heap_rs.is_reserved()) { |
174 vm_shutdown_during_initialization( | 126 vm_shutdown_during_initialization( |
175 "Could not reserve enough space for object heap"); | 127 "Could not reserve enough space for object heap"); |
176 return JNI_ENOMEM; | 128 return JNI_ENOMEM; |
177 } | 129 } |
191 // Initial young gen size is 4 Mb | 143 // Initial young gen size is 4 Mb |
192 // | 144 // |
193 // XXX - what about flag_parser.young_gen_size()? | 145 // XXX - what about flag_parser.young_gen_size()? |
194 const size_t init_young_size = align_size_up(4 * M, yg_align); | 146 const size_t init_young_size = align_size_up(4 * M, yg_align); |
195 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size); | 147 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size); |
196 | |
197 // Split the reserved space into perm gen and the main heap (everything else). | |
198 // The main heap uses a different alignment. | |
199 ReservedSpace perm_rs = heap_rs.first_part(pg_max_size); | |
200 ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align); | |
201 | 148 |
202 // Make up the generations | 149 // Make up the generations |
203 // Calculate the maximum size that a generation can grow. This | 150 // Calculate the maximum size that a generation can grow. This |
204 // includes growth into the other generation. Note that the | 151 // includes growth into the other generation. Note that the |
205 // parameter _max_gen_size is kept as the maximum | 152 // parameter _max_gen_size is kept as the maximum |
206 // size of the generation as the boundaries currently stand. | 153 // size of the generation as the boundaries currently stand. |
207 // _max_gen_size is still used as that value. | 154 // _max_gen_size is still used as that value. |
208 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; | 155 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; |
209 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; | 156 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; |
210 | 157 |
211 _gens = new AdjoiningGenerations(main_rs, | 158 _gens = new AdjoiningGenerations(heap_rs, |
212 og_cur_size, | 159 og_cur_size, |
213 og_min_size, | 160 og_min_size, |
214 og_max_size, | 161 og_max_size, |
215 yg_cur_size, | 162 yg_cur_size, |
216 yg_min_size, | 163 yg_min_size, |
231 max_gc_pause_sec, | 178 max_gc_pause_sec, |
232 max_gc_minor_pause_sec, | 179 max_gc_minor_pause_sec, |
233 GCTimeRatio | 180 GCTimeRatio |
234 ); | 181 ); |
235 | 182 |
236 _perm_gen = new PSPermGen(perm_rs, | |
237 pg_align, | |
238 pg_cur_size, | |
239 pg_cur_size, | |
240 pg_max_size, | |
241 "perm", 2); | |
242 | |
243 assert(!UseAdaptiveGCBoundary || | 183 assert(!UseAdaptiveGCBoundary || |
244 (old_gen()->virtual_space()->high_boundary() == | 184 (old_gen()->virtual_space()->high_boundary() == |
245 young_gen()->virtual_space()->low_boundary()), | 185 young_gen()->virtual_space()->low_boundary()), |
246 "Boundaries must meet"); | 186 "Boundaries must meet"); |
247 // initialize the policy counters - 2 collectors, 3 generations | 187 // initialize the policy counters - 2 collectors, 3 generations |
271 } | 211 } |
272 | 212 |
273 void ParallelScavengeHeap::update_counters() { | 213 void ParallelScavengeHeap::update_counters() { |
274 young_gen()->update_counters(); | 214 young_gen()->update_counters(); |
275 old_gen()->update_counters(); | 215 old_gen()->update_counters(); |
276 perm_gen()->update_counters(); | 216 MetaspaceCounters::update_performance_counters(); |
277 } | 217 } |
278 | 218 |
279 size_t ParallelScavengeHeap::capacity() const { | 219 size_t ParallelScavengeHeap::capacity() const { |
280 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); | 220 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); |
281 return value; | 221 return value; |
289 bool ParallelScavengeHeap::is_maximal_no_gc() const { | 229 bool ParallelScavengeHeap::is_maximal_no_gc() const { |
290 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); | 230 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); |
291 } | 231 } |
292 | 232 |
293 | 233 |
294 size_t ParallelScavengeHeap::permanent_capacity() const { | |
295 return perm_gen()->capacity_in_bytes(); | |
296 } | |
297 | |
298 size_t ParallelScavengeHeap::permanent_used() const { | |
299 return perm_gen()->used_in_bytes(); | |
300 } | |
301 | |
302 size_t ParallelScavengeHeap::max_capacity() const { | 234 size_t ParallelScavengeHeap::max_capacity() const { |
303 size_t estimated = reserved_region().byte_size(); | 235 size_t estimated = reserved_region().byte_size(); |
304 estimated -= perm_gen()->reserved().byte_size(); | |
305 if (UseAdaptiveSizePolicy) { | 236 if (UseAdaptiveSizePolicy) { |
306 estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); | 237 estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); |
307 } else { | 238 } else { |
308 estimated -= young_gen()->to_space()->capacity_in_bytes(); | 239 estimated -= young_gen()->to_space()->capacity_in_bytes(); |
309 } | 240 } |
317 | 248 |
318 if (old_gen()->is_in(p)) { | 249 if (old_gen()->is_in(p)) { |
319 return true; | 250 return true; |
320 } | 251 } |
321 | 252 |
322 if (perm_gen()->is_in(p)) { | |
323 return true; | |
324 } | |
325 | |
326 return false; | 253 return false; |
327 } | 254 } |
328 | 255 |
329 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { | 256 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { |
330 if (young_gen()->is_in_reserved(p)) { | 257 if (young_gen()->is_in_reserved(p)) { |
331 return true; | 258 return true; |
332 } | 259 } |
333 | 260 |
334 if (old_gen()->is_in_reserved(p)) { | 261 if (old_gen()->is_in_reserved(p)) { |
335 return true; | |
336 } | |
337 | |
338 if (perm_gen()->is_in_reserved(p)) { | |
339 return true; | 262 return true; |
340 } | 263 } |
341 | 264 |
342 return false; | 265 return false; |
343 } | 266 } |
350 // Don't implement this by using is_in_young(). This method is used | 273 // Don't implement this by using is_in_young(). This method is used |
351 // in some cases to check that is_in_young() is correct. | 274 // in some cases to check that is_in_young() is correct. |
352 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) { | 275 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) { |
353 assert(is_in_reserved(p) || p == NULL, | 276 assert(is_in_reserved(p) || p == NULL, |
354 "Does not work if address is non-null and outside of the heap"); | 277 "Does not work if address is non-null and outside of the heap"); |
355 // The order of the generations is perm (low addr), old, young (high addr) | 278 // The order of the generations is old (low addr), young (high addr) |
356 return p >= old_gen()->reserved().end(); | 279 return p >= old_gen()->reserved().end(); |
357 } | 280 } |
358 #endif | 281 #endif |
359 | 282 |
360 // There are two levels of allocation policy here. | 283 // There are two levels of allocation policy here. |
551 } | 474 } |
552 } | 475 } |
553 return NULL; | 476 return NULL; |
554 } | 477 } |
555 | 478 |
479 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) { | |
480 if (UseParallelOldGC) { | |
481 // The do_full_collection() parameter clear_all_soft_refs | |
482 // is interpreted here as maximum_compaction which will | |
483 // cause SoftRefs to be cleared. | |
484 bool maximum_compaction = clear_all_soft_refs; | |
485 PSParallelCompact::invoke(maximum_compaction); | |
486 } else { | |
487 PSMarkSweep::invoke(clear_all_soft_refs); | |
488 } | |
489 } | |
490 | |
556 // Failed allocation policy. Must be called from the VM thread, and | 491 // Failed allocation policy. Must be called from the VM thread, and |
557 // only at a safepoint! Note that this method has policy for allocation | 492 // only at a safepoint! Note that this method has policy for allocation |
558 // flow, and NOT collection policy. So we do not check for gc collection | 493 // flow, and NOT collection policy. So we do not check for gc collection |
559 // time over limit here, that is the responsibility of the heap specific | 494 // time over limit here, that is the responsibility of the heap specific |
560 // collection methods. This method decides where to attempt allocations, | 495 // collection methods. This method decides where to attempt allocations, |
573 HeapWord* result = young_gen()->allocate(size); | 508 HeapWord* result = young_gen()->allocate(size); |
574 | 509 |
575 // Second level allocation failure. | 510 // Second level allocation failure. |
576 // Mark sweep and allocate in young generation. | 511 // Mark sweep and allocate in young generation. |
577 if (result == NULL && !invoked_full_gc) { | 512 if (result == NULL && !invoked_full_gc) { |
578 invoke_full_gc(false); | 513 do_full_collection(false); |
579 result = young_gen()->allocate(size); | 514 result = young_gen()->allocate(size); |
580 } | 515 } |
581 | 516 |
582 death_march_check(result, size); | 517 death_march_check(result, size); |
583 | 518 |
589 } | 524 } |
590 | 525 |
591 // Fourth level allocation failure. We're running out of memory. | 526 // Fourth level allocation failure. We're running out of memory. |
592 // More complete mark sweep and allocate in young generation. | 527 // More complete mark sweep and allocate in young generation. |
593 if (result == NULL) { | 528 if (result == NULL) { |
594 invoke_full_gc(true); | 529 do_full_collection(true); |
595 result = young_gen()->allocate(size); | 530 result = young_gen()->allocate(size); |
596 } | 531 } |
597 | 532 |
598 // Fifth level allocation failure. | 533 // Fifth level allocation failure. |
599 // After more complete mark sweep, allocate in old generation. | 534 // After more complete mark sweep, allocate in old generation. |
600 if (result == NULL) { | 535 if (result == NULL) { |
601 result = old_gen()->allocate(size); | 536 result = old_gen()->allocate(size); |
602 } | |
603 | |
604 return result; | |
605 } | |
606 | |
607 // | |
608 // This is the policy loop for allocating in the permanent generation. | |
609 // If the initial allocation fails, we create a vm operation which will | |
610 // cause a collection. | |
611 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) { | |
612 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); | |
613 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); | |
614 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | |
615 | |
616 HeapWord* result; | |
617 | |
618 uint loop_count = 0; | |
619 uint gc_count = 0; | |
620 uint full_gc_count = 0; | |
621 | |
622 do { | |
623 // We don't want to have multiple collections for a single filled generation. | |
624 // To prevent this, each thread tracks the total_collections() value, and if | |
625 // the count has changed, does not do a new collection. | |
626 // | |
627 // The collection count must be read only while holding the heap lock. VM | |
628 // operations also hold the heap lock during collections. There is a lock | |
629 // contention case where thread A blocks waiting on the Heap_lock, while | |
630 // thread B is holding it doing a collection. When thread A gets the lock, | |
631 // the collection count has already changed. To prevent duplicate collections, | |
632 // The policy MUST attempt allocations during the same period it reads the | |
633 // total_collections() value! | |
634 { | |
635 MutexLocker ml(Heap_lock); | |
636 gc_count = Universe::heap()->total_collections(); | |
637 full_gc_count = Universe::heap()->total_full_collections(); | |
638 | |
639 result = perm_gen()->allocate_permanent(size); | |
640 | |
641 if (result != NULL) { | |
642 return result; | |
643 } | |
644 | |
645 if (GC_locker::is_active_and_needs_gc()) { | |
646 // If this thread is not in a jni critical section, we stall | |
647 // the requestor until the critical section has cleared and | |
648 // GC allowed. When the critical section clears, a GC is | |
649 // initiated by the last thread exiting the critical section; so | |
650 // we retry the allocation sequence from the beginning of the loop, | |
651 // rather than causing more, now probably unnecessary, GC attempts. | |
652 JavaThread* jthr = JavaThread::current(); | |
653 if (!jthr->in_critical()) { | |
654 MutexUnlocker mul(Heap_lock); | |
655 GC_locker::stall_until_clear(); | |
656 continue; | |
657 } else { | |
658 if (CheckJNICalls) { | |
659 fatal("Possible deadlock due to allocating while" | |
660 " in jni critical section"); | |
661 } | |
662 return NULL; | |
663 } | |
664 } | |
665 } | |
666 | |
667 if (result == NULL) { | |
668 | |
669 // Exit the loop if the gc time limit has been exceeded. | |
670 // The allocation must have failed above (result must be NULL), | |
671 // and the most recent collection must have exceeded the | |
672 // gc time limit. Exit the loop so that an out-of-memory | |
673 // will be thrown (returning a NULL will do that), but | |
674 // clear gc_overhead_limit_exceeded so that the next collection | |
675 // will succeeded if the applications decides to handle the | |
676 // out-of-memory and tries to go on. | |
677 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); | |
678 if (limit_exceeded) { | |
679 size_policy()->set_gc_overhead_limit_exceeded(false); | |
680 if (PrintGCDetails && Verbose) { | |
681 gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:" | |
682 " return NULL because gc_overhead_limit_exceeded is set"); | |
683 } | |
684 assert(result == NULL, "Allocation did not fail"); | |
685 return NULL; | |
686 } | |
687 | |
688 // Generate a VM operation | |
689 VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count); | |
690 VMThread::execute(&op); | |
691 | |
692 // Did the VM operation execute? If so, return the result directly. | |
693 // This prevents us from looping until time out on requests that can | |
694 // not be satisfied. | |
695 if (op.prologue_succeeded()) { | |
696 assert(Universe::heap()->is_in_permanent_or_null(op.result()), | |
697 "result not in heap"); | |
698 // If GC was locked out during VM operation then retry allocation | |
699 // and/or stall as necessary. | |
700 if (op.gc_locked()) { | |
701 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); | |
702 continue; // retry and/or stall as necessary | |
703 } | |
704 // If a NULL results is being returned, an out-of-memory | |
705 // will be thrown now. Clear the gc_overhead_limit_exceeded | |
706 // flag to avoid the following situation. | |
707 // gc_overhead_limit_exceeded is set during a collection | |
708 // the collection fails to return enough space and an OOM is thrown | |
709 // a subsequent GC prematurely throws an out-of-memory because | |
710 // the gc_overhead_limit_exceeded counts did not start | |
711 // again from 0. | |
712 if (op.result() == NULL) { | |
713 size_policy()->reset_gc_overhead_limit_count(); | |
714 } | |
715 return op.result(); | |
716 } | |
717 } | |
718 | |
719 // The policy object will prevent us from looping forever. If the | |
720 // time spent in gc crosses a threshold, we will bail out. | |
721 loop_count++; | |
722 if ((QueuedAllocationWarningCount > 0) && | |
723 (loop_count % QueuedAllocationWarningCount == 0)) { | |
724 warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t" | |
725 " size=%d", loop_count, size); | |
726 } | |
727 } while (result == NULL); | |
728 | |
729 return result; | |
730 } | |
731 | |
732 // | |
733 // This is the policy code for permanent allocations which have failed | |
734 // and require a collection. Note that just as in failed_mem_allocate, | |
735 // we do not set collection policy, only where & when to allocate and | |
736 // collect. | |
737 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) { | |
738 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
739 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); | |
740 assert(!Universe::heap()->is_gc_active(), "not reentrant"); | |
741 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | |
742 assert(size > perm_gen()->free_in_words(), "Allocation should fail"); | |
743 | |
744 // We assume (and assert!) that an allocation at this point will fail | |
745 // unless we collect. | |
746 | |
747 // First level allocation failure. Mark-sweep and allocate in perm gen. | |
748 GCCauseSetter gccs(this, GCCause::_allocation_failure); | |
749 invoke_full_gc(false); | |
750 HeapWord* result = perm_gen()->allocate_permanent(size); | |
751 | |
752 // Second level allocation failure. We're running out of memory. | |
753 if (result == NULL) { | |
754 invoke_full_gc(true); | |
755 result = perm_gen()->allocate_permanent(size); | |
756 } | 537 } |
757 | 538 |
758 return result; | 539 return result; |
759 } | 540 } |
760 | 541 |
810 | 591 |
811 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); | 592 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); |
812 VMThread::execute(&op); | 593 VMThread::execute(&op); |
813 } | 594 } |
814 | 595 |
815 // This interface assumes that it's being called by the | 596 void ParallelScavengeHeap::oop_iterate(ExtendedOopClosure* cl) { |
816 // vm thread. It collects the heap assuming that the | |
817 // heap lock is already held and that we are executing in | |
818 // the context of the vm thread. | |
819 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) { | |
820 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
821 assert(Heap_lock->is_locked(), "Precondition#2"); | |
822 GCCauseSetter gcs(this, cause); | |
823 switch (cause) { | |
824 case GCCause::_heap_inspection: | |
825 case GCCause::_heap_dump: { | |
826 HandleMark hm; | |
827 invoke_full_gc(false); | |
828 break; | |
829 } | |
830 default: // XXX FIX ME | |
831 ShouldNotReachHere(); | |
832 } | |
833 } | |
834 | |
835 | |
836 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) { | |
837 Unimplemented(); | 597 Unimplemented(); |
838 } | 598 } |
839 | 599 |
840 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { | 600 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { |
841 young_gen()->object_iterate(cl); | 601 young_gen()->object_iterate(cl); |
842 old_gen()->object_iterate(cl); | 602 old_gen()->object_iterate(cl); |
843 perm_gen()->object_iterate(cl); | 603 } |
844 } | 604 |
845 | |
846 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) { | |
847 Unimplemented(); | |
848 } | |
849 | |
850 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) { | |
851 perm_gen()->object_iterate(cl); | |
852 } | |
853 | 605 |
854 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { | 606 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { |
855 if (young_gen()->is_in_reserved(addr)) { | 607 if (young_gen()->is_in_reserved(addr)) { |
856 assert(young_gen()->is_in(addr), | 608 assert(young_gen()->is_in(addr), |
857 "addr should be in allocated part of young gen"); | 609 "addr should be in allocated part of young gen"); |
860 Unimplemented(); | 612 Unimplemented(); |
861 } else if (old_gen()->is_in_reserved(addr)) { | 613 } else if (old_gen()->is_in_reserved(addr)) { |
862 assert(old_gen()->is_in(addr), | 614 assert(old_gen()->is_in(addr), |
863 "addr should be in allocated part of old gen"); | 615 "addr should be in allocated part of old gen"); |
864 return old_gen()->start_array()->object_start((HeapWord*)addr); | 616 return old_gen()->start_array()->object_start((HeapWord*)addr); |
865 } else if (perm_gen()->is_in_reserved(addr)) { | |
866 assert(perm_gen()->is_in(addr), | |
867 "addr should be in allocated part of perm gen"); | |
868 return perm_gen()->start_array()->object_start((HeapWord*)addr); | |
869 } | 617 } |
870 return 0; | 618 return 0; |
871 } | 619 } |
872 | 620 |
873 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const { | 621 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const { |
889 } | 637 } |
890 | 638 |
891 void ParallelScavengeHeap::print_on(outputStream* st) const { | 639 void ParallelScavengeHeap::print_on(outputStream* st) const { |
892 young_gen()->print_on(st); | 640 young_gen()->print_on(st); |
893 old_gen()->print_on(st); | 641 old_gen()->print_on(st); |
894 perm_gen()->print_on(st); | 642 MetaspaceAux::print_on(st); |
895 } | 643 } |
896 | 644 |
897 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { | 645 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { |
898 PSScavenge::gc_task_manager()->threads_do(tc); | 646 PSScavenge::gc_task_manager()->threads_do(tc); |
899 } | 647 } |
915 | 663 |
916 | 664 |
917 void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) { | 665 void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) { |
918 // Why do we need the total_collections()-filter below? | 666 // Why do we need the total_collections()-filter below? |
919 if (total_collections() > 0) { | 667 if (total_collections() > 0) { |
920 if (!silent) { | |
921 gclog_or_tty->print("permanent "); | |
922 } | |
923 perm_gen()->verify(); | |
924 | |
925 if (!silent) { | 668 if (!silent) { |
926 gclog_or_tty->print("tenured "); | 669 gclog_or_tty->print("tenured "); |
927 } | 670 } |
928 old_gen()->verify(); | 671 old_gen()->verify(); |
929 | 672 |
998 #ifndef PRODUCT | 741 #ifndef PRODUCT |
999 void ParallelScavengeHeap::record_gen_tops_before_GC() { | 742 void ParallelScavengeHeap::record_gen_tops_before_GC() { |
1000 if (ZapUnusedHeapArea) { | 743 if (ZapUnusedHeapArea) { |
1001 young_gen()->record_spaces_top(); | 744 young_gen()->record_spaces_top(); |
1002 old_gen()->record_spaces_top(); | 745 old_gen()->record_spaces_top(); |
1003 perm_gen()->record_spaces_top(); | |
1004 } | 746 } |
1005 } | 747 } |
1006 | 748 |
1007 void ParallelScavengeHeap::gen_mangle_unused_area() { | 749 void ParallelScavengeHeap::gen_mangle_unused_area() { |
1008 if (ZapUnusedHeapArea) { | 750 if (ZapUnusedHeapArea) { |
1009 young_gen()->eden_space()->mangle_unused_area(); | 751 young_gen()->eden_space()->mangle_unused_area(); |
1010 young_gen()->to_space()->mangle_unused_area(); | 752 young_gen()->to_space()->mangle_unused_area(); |
1011 young_gen()->from_space()->mangle_unused_area(); | 753 young_gen()->from_space()->mangle_unused_area(); |
1012 old_gen()->object_space()->mangle_unused_area(); | 754 old_gen()->object_space()->mangle_unused_area(); |
1013 perm_gen()->object_space()->mangle_unused_area(); | |
1014 } | 755 } |
1015 } | 756 } |
1016 #endif | 757 #endif |