Mercurial > hg > truffle
annotate src/share/vm/memory/genCollectedHeap.cpp @ 453:c96030fff130
6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa
author | ysr |
---|---|
date | Thu, 20 Nov 2008 16:56:09 -0800 |
parents | 850fdf70db2b |
children | 27a80744a83b |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_genCollectedHeap.cpp.incl" | |
27 | |
28 GenCollectedHeap* GenCollectedHeap::_gch; | |
29 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;) | |
30 | |
31 // The set of potentially parallel tasks in strong root scanning. | |
32 enum GCH_process_strong_roots_tasks { | |
33 // We probably want to parallelize both of these internally, but for now... | |
34 GCH_PS_younger_gens, | |
35 // Leave this one last. | |
36 GCH_PS_NumElements | |
37 }; | |
38 | |
39 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) : | |
40 SharedHeap(policy), | |
41 _gen_policy(policy), | |
42 _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)), | |
43 _full_collections_completed(0) | |
44 { | |
45 if (_gen_process_strong_tasks == NULL || | |
46 !_gen_process_strong_tasks->valid()) { | |
47 vm_exit_during_initialization("Failed necessary allocation."); | |
48 } | |
49 assert(policy != NULL, "Sanity check"); | |
50 _preloading_shared_classes = false; | |
51 } | |
52 | |
53 jint GenCollectedHeap::initialize() { | |
54 int i; | |
55 _n_gens = gen_policy()->number_of_generations(); | |
56 | |
57 // While there are no constraints in the GC code that HeapWordSize | |
58 // be any particular value, there are multiple other areas in the | |
59 // system which believe this to be true (e.g. oop->object_size in some | |
60 // cases incorrectly returns the size in wordSize units rather than | |
61 // HeapWordSize). | |
62 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
63 | |
64 // The heap must be at least as aligned as generations. | |
65 size_t alignment = Generation::GenGrain; | |
66 | |
67 _gen_specs = gen_policy()->generations(); | |
68 PermanentGenerationSpec *perm_gen_spec = | |
69 collector_policy()->permanent_generation(); | |
70 | |
71 // Make sure the sizes are all aligned. | |
72 for (i = 0; i < _n_gens; i++) { | |
73 _gen_specs[i]->align(alignment); | |
74 } | |
75 perm_gen_spec->align(alignment); | |
76 | |
77 // If we are dumping the heap, then allocate a wasted block of address | |
78 // space in order to push the heap to a lower address. This extra | |
79 // address range allows for other (or larger) libraries to be loaded | |
80 // without them occupying the space required for the shared spaces. | |
81 | |
82 if (DumpSharedSpaces) { | |
83 uintx reserved = 0; | |
84 uintx block_size = 64*1024*1024; | |
85 while (reserved < SharedDummyBlockSize) { | |
86 char* dummy = os::reserve_memory(block_size); | |
87 reserved += block_size; | |
88 } | |
89 } | |
90 | |
91 // Allocate space for the heap. | |
92 | |
93 char* heap_address; | |
94 size_t total_reserved = 0; | |
95 int n_covered_regions = 0; | |
96 ReservedSpace heap_rs(0); | |
97 | |
98 heap_address = allocate(alignment, perm_gen_spec, &total_reserved, | |
99 &n_covered_regions, &heap_rs); | |
100 | |
101 if (UseSharedSpaces) { | |
102 if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) { | |
103 if (heap_rs.is_reserved()) { | |
104 heap_rs.release(); | |
105 } | |
106 FileMapInfo* mapinfo = FileMapInfo::current_info(); | |
107 mapinfo->fail_continue("Unable to reserve shared region."); | |
108 allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions, | |
109 &heap_rs); | |
110 } | |
111 } | |
112 | |
113 if (!heap_rs.is_reserved()) { | |
114 vm_shutdown_during_initialization( | |
115 "Could not reserve enough space for object heap"); | |
116 return JNI_ENOMEM; | |
117 } | |
118 | |
119 _reserved = MemRegion((HeapWord*)heap_rs.base(), | |
120 (HeapWord*)(heap_rs.base() + heap_rs.size())); | |
121 | |
122 // It is important to do this in a way such that concurrent readers can't | |
123 // temporarily think somethings in the heap. (Seen this happen in asserts.) | |
124 _reserved.set_word_size(0); | |
125 _reserved.set_start((HeapWord*)heap_rs.base()); | |
126 size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size() | |
127 - perm_gen_spec->misc_code_size(); | |
128 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size)); | |
129 | |
130 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions); | |
131 set_barrier_set(rem_set()->bs()); | |
132 _gch = this; | |
133 | |
134 for (i = 0; i < _n_gens; i++) { | |
135 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), | |
136 UseSharedSpaces, UseSharedSpaces); | |
137 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set()); | |
138 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size()); | |
139 } | |
140 _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set()); | |
141 | |
142 clear_incremental_collection_will_fail(); | |
143 clear_last_incremental_collection_failed(); | |
144 | |
145 #ifndef SERIALGC | |
146 // If we are running CMS, create the collector responsible | |
147 // for collecting the CMS generations. | |
148 if (collector_policy()->is_concurrent_mark_sweep_policy()) { | |
149 bool success = create_cms_collector(); | |
150 if (!success) return JNI_ENOMEM; | |
151 } | |
152 #endif // SERIALGC | |
153 | |
154 return JNI_OK; | |
155 } | |
156 | |
157 | |
158 char* GenCollectedHeap::allocate(size_t alignment, | |
159 PermanentGenerationSpec* perm_gen_spec, | |
160 size_t* _total_reserved, | |
161 int* _n_covered_regions, | |
162 ReservedSpace* heap_rs){ | |
163 const char overflow_msg[] = "The size of the object heap + VM data exceeds " | |
164 "the maximum representable size"; | |
165 | |
166 // Now figure out the total size. | |
167 size_t total_reserved = 0; | |
168 int n_covered_regions = 0; | |
169 const size_t pageSize = UseLargePages ? | |
170 os::large_page_size() : os::vm_page_size(); | |
171 | |
172 for (int i = 0; i < _n_gens; i++) { | |
173 total_reserved += _gen_specs[i]->max_size(); | |
174 if (total_reserved < _gen_specs[i]->max_size()) { | |
175 vm_exit_during_initialization(overflow_msg); | |
176 } | |
177 n_covered_regions += _gen_specs[i]->n_covered_regions(); | |
178 } | |
179 assert(total_reserved % pageSize == 0, "Gen size"); | |
180 total_reserved += perm_gen_spec->max_size(); | |
181 assert(total_reserved % pageSize == 0, "Perm Gen size"); | |
182 | |
183 if (total_reserved < perm_gen_spec->max_size()) { | |
184 vm_exit_during_initialization(overflow_msg); | |
185 } | |
186 n_covered_regions += perm_gen_spec->n_covered_regions(); | |
187 | |
188 // Add the size of the data area which shares the same reserved area | |
189 // as the heap, but which is not actually part of the heap. | |
190 size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size(); | |
191 | |
192 total_reserved += s; | |
193 if (total_reserved < s) { | |
194 vm_exit_during_initialization(overflow_msg); | |
195 } | |
196 | |
197 if (UseLargePages) { | |
198 assert(total_reserved != 0, "total_reserved cannot be 0"); | |
199 total_reserved = round_to(total_reserved, os::large_page_size()); | |
200 if (total_reserved < os::large_page_size()) { | |
201 vm_exit_during_initialization(overflow_msg); | |
202 } | |
203 } | |
204 | |
205 // Calculate the address at which the heap must reside in order for | |
206 // the shared data to be at the required address. | |
207 | |
208 char* heap_address; | |
209 if (UseSharedSpaces) { | |
210 | |
211 // Calculate the address of the first word beyond the heap. | |
212 FileMapInfo* mapinfo = FileMapInfo::current_info(); | |
213 int lr = CompactingPermGenGen::n_regions - 1; | |
214 size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment); | |
215 heap_address = mapinfo->region_base(lr) + capacity; | |
216 | |
217 // Calculate the address of the first word of the heap. | |
218 heap_address -= total_reserved; | |
219 } else { | |
220 heap_address = NULL; // any address will do. | |
221 } | |
222 | |
223 *_total_reserved = total_reserved; | |
224 *_n_covered_regions = n_covered_regions; | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
196
diff
changeset
|
225 *heap_rs = ReservedHeapSpace(total_reserved, alignment, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
196
diff
changeset
|
226 UseLargePages, heap_address); |
0 | 227 |
228 return heap_address; | |
229 } | |
230 | |
231 | |
232 void GenCollectedHeap::post_initialize() { | |
233 SharedHeap::post_initialize(); | |
234 TwoGenerationCollectorPolicy *policy = | |
235 (TwoGenerationCollectorPolicy *)collector_policy(); | |
236 guarantee(policy->is_two_generation_policy(), "Illegal policy type"); | |
237 DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0); | |
238 assert(def_new_gen->kind() == Generation::DefNew || | |
239 def_new_gen->kind() == Generation::ParNew || | |
240 def_new_gen->kind() == Generation::ASParNew, | |
241 "Wrong generation kind"); | |
242 | |
243 Generation* old_gen = get_gen(1); | |
244 assert(old_gen->kind() == Generation::ConcurrentMarkSweep || | |
245 old_gen->kind() == Generation::ASConcurrentMarkSweep || | |
246 old_gen->kind() == Generation::MarkSweepCompact, | |
247 "Wrong generation kind"); | |
248 | |
249 policy->initialize_size_policy(def_new_gen->eden()->capacity(), | |
250 old_gen->capacity(), | |
251 def_new_gen->from()->capacity()); | |
252 policy->initialize_gc_policy_counters(); | |
253 } | |
254 | |
255 void GenCollectedHeap::ref_processing_init() { | |
256 SharedHeap::ref_processing_init(); | |
257 for (int i = 0; i < _n_gens; i++) { | |
258 _gens[i]->ref_processor_init(); | |
259 } | |
260 } | |
261 | |
262 size_t GenCollectedHeap::capacity() const { | |
263 size_t res = 0; | |
264 for (int i = 0; i < _n_gens; i++) { | |
265 res += _gens[i]->capacity(); | |
266 } | |
267 return res; | |
268 } | |
269 | |
270 size_t GenCollectedHeap::used() const { | |
271 size_t res = 0; | |
272 for (int i = 0; i < _n_gens; i++) { | |
273 res += _gens[i]->used(); | |
274 } | |
275 return res; | |
276 } | |
277 | |
278 // Save the "used_region" for generations level and lower, | |
279 // and, if perm is true, for perm gen. | |
280 void GenCollectedHeap::save_used_regions(int level, bool perm) { | |
281 assert(level < _n_gens, "Illegal level parameter"); | |
282 for (int i = level; i >= 0; i--) { | |
283 _gens[i]->save_used_region(); | |
284 } | |
285 if (perm) { | |
286 perm_gen()->save_used_region(); | |
287 } | |
288 } | |
289 | |
290 size_t GenCollectedHeap::max_capacity() const { | |
291 size_t res = 0; | |
292 for (int i = 0; i < _n_gens; i++) { | |
293 res += _gens[i]->max_capacity(); | |
294 } | |
295 return res; | |
296 } | |
297 | |
298 // Update the _full_collections_completed counter | |
299 // at the end of a stop-world full GC. | |
300 unsigned int GenCollectedHeap::update_full_collections_completed() { | |
301 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); | |
302 assert(_full_collections_completed <= _total_full_collections, | |
303 "Can't complete more collections than were started"); | |
304 _full_collections_completed = _total_full_collections; | |
305 ml.notify_all(); | |
306 return _full_collections_completed; | |
307 } | |
308 | |
309 // Update the _full_collections_completed counter, as appropriate, | |
310 // at the end of a concurrent GC cycle. Note the conditional update | |
311 // below to allow this method to be called by a concurrent collector | |
312 // without synchronizing in any manner with the VM thread (which | |
313 // may already have initiated a STW full collection "concurrently"). | |
314 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) { | |
315 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); | |
316 assert((_full_collections_completed <= _total_full_collections) && | |
317 (count <= _total_full_collections), | |
318 "Can't complete more collections than were started"); | |
319 if (count > _full_collections_completed) { | |
320 _full_collections_completed = count; | |
321 ml.notify_all(); | |
322 } | |
323 return _full_collections_completed; | |
324 } | |
325 | |
326 | |
327 #ifndef PRODUCT | |
328 // Override of memory state checking method in CollectedHeap: | |
329 // Some collectors (CMS for example) can't have badHeapWordVal written | |
330 // in the first two words of an object. (For instance , in the case of | |
331 // CMS these words hold state used to synchronize between certain | |
332 // (concurrent) GC steps and direct allocating mutators.) | |
333 // The skip_header_HeapWords() method below, allows us to skip | |
334 // over the requisite number of HeapWord's. Note that (for | |
335 // generational collectors) this means that those many words are | |
336 // skipped in each object, irrespective of the generation in which | |
337 // that object lives. The resultant loss of precision seems to be | |
338 // harmless and the pain of avoiding that imprecision appears somewhat | |
339 // higher than we are prepared to pay for such rudimentary debugging | |
340 // support. | |
341 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, | |
342 size_t size) { | |
343 if (CheckMemoryInitialization && ZapUnusedHeapArea) { | |
344 // We are asked to check a size in HeapWords, | |
345 // but the memory is mangled in juint words. | |
346 juint* start = (juint*) (addr + skip_header_HeapWords()); | |
347 juint* end = (juint*) (addr + size); | |
348 for (juint* slot = start; slot < end; slot += 1) { | |
349 assert(*slot == badHeapWordVal, | |
350 "Found non badHeapWordValue in pre-allocation check"); | |
351 } | |
352 } | |
353 } | |
354 #endif | |
355 | |
356 HeapWord* GenCollectedHeap::attempt_allocation(size_t size, | |
357 bool is_tlab, | |
358 bool first_only) { | |
359 HeapWord* res; | |
360 for (int i = 0; i < _n_gens; i++) { | |
361 if (_gens[i]->should_allocate(size, is_tlab)) { | |
362 res = _gens[i]->allocate(size, is_tlab); | |
363 if (res != NULL) return res; | |
364 else if (first_only) break; | |
365 } | |
366 } | |
367 // Otherwise... | |
368 return NULL; | |
369 } | |
370 | |
371 HeapWord* GenCollectedHeap::mem_allocate(size_t size, | |
372 bool is_large_noref, | |
373 bool is_tlab, | |
374 bool* gc_overhead_limit_was_exceeded) { | |
375 return collector_policy()->mem_allocate_work(size, | |
376 is_tlab, | |
377 gc_overhead_limit_was_exceeded); | |
378 } | |
379 | |
380 bool GenCollectedHeap::must_clear_all_soft_refs() { | |
381 return _gc_cause == GCCause::_last_ditch_collection; | |
382 } | |
383 | |
384 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { | |
385 return (cause == GCCause::_java_lang_system_gc || | |
386 cause == GCCause::_gc_locker) && | |
387 UseConcMarkSweepGC && ExplicitGCInvokesConcurrent; | |
388 } | |
389 | |
390 void GenCollectedHeap::do_collection(bool full, | |
391 bool clear_all_soft_refs, | |
392 size_t size, | |
393 bool is_tlab, | |
394 int max_level) { | |
395 bool prepared_for_verification = false; | |
396 ResourceMark rm; | |
397 DEBUG_ONLY(Thread* my_thread = Thread::current();) | |
398 | |
399 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
400 assert(my_thread->is_VM_thread() || | |
401 my_thread->is_ConcurrentGC_thread(), | |
402 "incorrect thread type capability"); | |
403 assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock"); | |
404 guarantee(!is_gc_active(), "collection is not reentrant"); | |
405 assert(max_level < n_gens(), "sanity check"); | |
406 | |
407 if (GC_locker::check_active_before_gc()) { | |
408 return; // GC is disabled (e.g. JNI GetXXXCritical operation) | |
409 } | |
410 | |
411 const size_t perm_prev_used = perm_gen()->used(); | |
412 | |
413 if (PrintHeapAtGC) { | |
414 Universe::print_heap_before_gc(); | |
415 if (Verbose) { | |
416 gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause())); | |
417 } | |
418 } | |
419 | |
420 { | |
421 FlagSetting fl(_is_gc_active, true); | |
422 | |
423 bool complete = full && (max_level == (n_gens()-1)); | |
424 const char* gc_cause_str = "GC "; | |
425 if (complete) { | |
426 GCCause::Cause cause = gc_cause(); | |
427 if (cause == GCCause::_java_lang_system_gc) { | |
428 gc_cause_str = "Full GC (System) "; | |
429 } else { | |
430 gc_cause_str = "Full GC "; | |
431 } | |
432 } | |
433 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
434 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
435 TraceTime t(gc_cause_str, PrintGCDetails, false, gclog_or_tty); | |
436 | |
437 gc_prologue(complete); | |
438 increment_total_collections(complete); | |
439 | |
440 size_t gch_prev_used = used(); | |
441 | |
442 int starting_level = 0; | |
443 if (full) { | |
444 // Search for the oldest generation which will collect all younger | |
445 // generations, and start collection loop there. | |
446 for (int i = max_level; i >= 0; i--) { | |
447 if (_gens[i]->full_collects_younger_generations()) { | |
448 starting_level = i; | |
449 break; | |
450 } | |
451 } | |
452 } | |
453 | |
454 bool must_restore_marks_for_biased_locking = false; | |
455 | |
456 int max_level_collected = starting_level; | |
457 for (int i = starting_level; i <= max_level; i++) { | |
458 if (_gens[i]->should_collect(full, size, is_tlab)) { | |
459 // Timer for individual generations. Last argument is false: no CR | |
460 TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty); | |
461 TraceCollectorStats tcs(_gens[i]->counters()); | |
462 TraceMemoryManagerStats tmms(_gens[i]->kind()); | |
463 | |
464 size_t prev_used = _gens[i]->used(); | |
465 _gens[i]->stat_record()->invocations++; | |
466 _gens[i]->stat_record()->accumulated_time.start(); | |
467 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
468 // Must be done anew before each collection because |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
469 // a previous collection will do mangling and will |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
470 // change top of some spaces. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
471 record_gen_tops_before_GC(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
472 |
0 | 473 if (PrintGC && Verbose) { |
474 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, | |
475 i, | |
476 _gens[i]->stat_record()->invocations, | |
477 size*HeapWordSize); | |
478 } | |
479 | |
480 if (VerifyBeforeGC && i >= VerifyGCLevel && | |
481 total_collections() >= VerifyGCStartAt) { | |
482 HandleMark hm; // Discard invalid handles created during verification | |
483 if (!prepared_for_verification) { | |
484 prepare_for_verify(); | |
485 prepared_for_verification = true; | |
486 } | |
487 gclog_or_tty->print(" VerifyBeforeGC:"); | |
488 Universe::verify(true); | |
489 } | |
490 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
491 | |
492 if (!must_restore_marks_for_biased_locking && | |
493 _gens[i]->performs_in_place_marking()) { | |
494 // We perform this mark word preservation work lazily | |
495 // because it's only at this point that we know whether we | |
496 // absolutely have to do it; we want to avoid doing it for | |
497 // scavenge-only collections where it's unnecessary | |
498 must_restore_marks_for_biased_locking = true; | |
499 BiasedLocking::preserve_marks(); | |
500 } | |
501 | |
502 // Do collection work | |
503 { | |
504 // Note on ref discovery: For what appear to be historical reasons, | |
505 // GCH enables and disabled (by enqueing) refs discovery. | |
506 // In the future this should be moved into the generation's | |
507 // collect method so that ref discovery and enqueueing concerns | |
508 // are local to a generation. The collect method could return | |
509 // an appropriate indication in the case that notification on | |
510 // the ref lock was needed. This will make the treatment of | |
511 // weak refs more uniform (and indeed remove such concerns | |
512 // from GCH). XXX | |
513 | |
514 HandleMark hm; // Discard invalid handles created during gc | |
515 save_marks(); // save marks for all gens | |
516 // We want to discover references, but not process them yet. | |
517 // This mode is disabled in process_discovered_references if the | |
518 // generation does some collection work, or in | |
519 // enqueue_discovered_references if the generation returns | |
520 // without doing any work. | |
521 ReferenceProcessor* rp = _gens[i]->ref_processor(); | |
522 // If the discovery of ("weak") refs in this generation is | |
523 // atomic wrt other collectors in this configuration, we | |
524 // are guaranteed to have empty discovered ref lists. | |
525 if (rp->discovery_is_atomic()) { | |
526 rp->verify_no_references_recorded(); | |
527 rp->enable_discovery(); | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
269
diff
changeset
|
528 rp->snap_policy(clear_all_soft_refs); |
0 | 529 } else { |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
269
diff
changeset
|
530 // collect() below will enable discovery as appropriate |
0 | 531 } |
532 _gens[i]->collect(full, clear_all_soft_refs, size, is_tlab); | |
533 if (!rp->enqueuing_is_done()) { | |
534 rp->enqueue_discovered_references(); | |
535 } else { | |
536 rp->set_enqueuing_is_done(false); | |
537 } | |
538 rp->verify_no_references_recorded(); | |
539 } | |
540 max_level_collected = i; | |
541 | |
542 // Determine if allocation request was met. | |
543 if (size > 0) { | |
544 if (!is_tlab || _gens[i]->supports_tlab_allocation()) { | |
545 if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) { | |
546 size = 0; | |
547 } | |
548 } | |
549 } | |
550 | |
551 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
552 | |
553 _gens[i]->stat_record()->accumulated_time.stop(); | |
554 | |
555 update_gc_stats(i, full); | |
556 | |
557 if (VerifyAfterGC && i >= VerifyGCLevel && | |
558 total_collections() >= VerifyGCStartAt) { | |
559 HandleMark hm; // Discard invalid handles created during verification | |
560 gclog_or_tty->print(" VerifyAfterGC:"); | |
561 Universe::verify(false); | |
562 } | |
563 | |
564 if (PrintGCDetails) { | |
565 gclog_or_tty->print(":"); | |
566 _gens[i]->print_heap_change(prev_used); | |
567 } | |
568 } | |
569 } | |
570 | |
571 // Update "complete" boolean wrt what actually transpired -- | |
572 // for instance, a promotion failure could have led to | |
573 // a whole heap collection. | |
574 complete = complete || (max_level_collected == n_gens() - 1); | |
575 | |
576 if (PrintGCDetails) { | |
577 print_heap_change(gch_prev_used); | |
578 | |
579 // Print perm gen info for full GC with PrintGCDetails flag. | |
580 if (complete) { | |
581 print_perm_heap_change(perm_prev_used); | |
582 } | |
583 } | |
584 | |
585 for (int j = max_level_collected; j >= 0; j -= 1) { | |
586 // Adjust generation sizes. | |
587 _gens[j]->compute_new_size(); | |
588 } | |
589 | |
590 if (complete) { | |
591 // Ask the permanent generation to adjust size for full collections | |
592 perm()->compute_new_size(); | |
593 update_full_collections_completed(); | |
594 } | |
595 | |
596 // Track memory usage and detect low memory after GC finishes | |
597 MemoryService::track_memory_usage(); | |
598 | |
599 gc_epilogue(complete); | |
600 | |
601 if (must_restore_marks_for_biased_locking) { | |
602 BiasedLocking::restore_marks(); | |
603 } | |
604 } | |
605 | |
606 AdaptiveSizePolicy* sp = gen_policy()->size_policy(); | |
607 AdaptiveSizePolicyOutput(sp, total_collections()); | |
608 | |
609 if (PrintHeapAtGC) { | |
610 Universe::print_heap_after_gc(); | |
611 } | |
612 | |
613 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { | |
614 tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); | |
615 vm_exit(-1); | |
616 } | |
617 } | |
618 | |
619 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) { | |
620 return collector_policy()->satisfy_failed_allocation(size, is_tlab); | |
621 } | |
622 | |
623 void GenCollectedHeap::set_par_threads(int t) { | |
624 SharedHeap::set_par_threads(t); | |
625 _gen_process_strong_tasks->set_par_threads(t); | |
626 } | |
627 | |
628 class AssertIsPermClosure: public OopClosure { | |
629 public: | |
630 void do_oop(oop* p) { | |
631 assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm."); | |
632 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
633 void do_oop(narrowOop* p) { ShouldNotReachHere(); } |
0 | 634 }; |
635 static AssertIsPermClosure assert_is_perm_closure; | |
636 | |
637 void GenCollectedHeap:: | |
638 gen_process_strong_roots(int level, | |
639 bool younger_gens_as_roots, | |
640 bool collecting_perm_gen, | |
641 SharedHeap::ScanningOption so, | |
642 OopsInGenClosure* older_gens, | |
643 OopsInGenClosure* not_older_gens) { | |
644 // General strong roots. | |
645 SharedHeap::process_strong_roots(collecting_perm_gen, so, | |
646 not_older_gens, older_gens); | |
647 | |
648 if (younger_gens_as_roots) { | |
649 if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { | |
650 for (int i = 0; i < level; i++) { | |
651 not_older_gens->set_generation(_gens[i]); | |
652 _gens[i]->oop_iterate(not_older_gens); | |
653 } | |
654 not_older_gens->reset_generation(); | |
655 } | |
656 } | |
657 // When collection is parallel, all threads get to cooperate to do | |
658 // older-gen scanning. | |
659 for (int i = level+1; i < _n_gens; i++) { | |
660 older_gens->set_generation(_gens[i]); | |
661 rem_set()->younger_refs_iterate(_gens[i], older_gens); | |
662 older_gens->reset_generation(); | |
663 } | |
664 | |
665 _gen_process_strong_tasks->all_tasks_completed(); | |
666 } | |
667 | |
668 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure, | |
669 OopClosure* non_root_closure) { | |
670 SharedHeap::process_weak_roots(root_closure, non_root_closure); | |
671 // "Local" "weak" refs | |
672 for (int i = 0; i < _n_gens; i++) { | |
673 _gens[i]->ref_processor()->weak_oops_do(root_closure); | |
674 } | |
675 } | |
676 | |
677 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ | |
678 void GenCollectedHeap:: \ | |
679 oop_since_save_marks_iterate(int level, \ | |
680 OopClosureType* cur, \ | |
681 OopClosureType* older) { \ | |
682 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \ | |
683 for (int i = level+1; i < n_gens(); i++) { \ | |
684 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \ | |
685 } \ | |
686 perm_gen()->oop_since_save_marks_iterate##nv_suffix(older); \ | |
687 } | |
688 | |
689 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) | |
690 | |
691 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN | |
692 | |
693 bool GenCollectedHeap::no_allocs_since_save_marks(int level) { | |
694 for (int i = level; i < _n_gens; i++) { | |
695 if (!_gens[i]->no_allocs_since_save_marks()) return false; | |
696 } | |
697 return perm_gen()->no_allocs_since_save_marks(); | |
698 } | |
699 | |
700 bool GenCollectedHeap::supports_inline_contig_alloc() const { | |
701 return _gens[0]->supports_inline_contig_alloc(); | |
702 } | |
703 | |
704 HeapWord** GenCollectedHeap::top_addr() const { | |
705 return _gens[0]->top_addr(); | |
706 } | |
707 | |
708 HeapWord** GenCollectedHeap::end_addr() const { | |
709 return _gens[0]->end_addr(); | |
710 } | |
711 | |
712 size_t GenCollectedHeap::unsafe_max_alloc() { | |
713 return _gens[0]->unsafe_max_alloc_nogc(); | |
714 } | |
715 | |
716 // public collection interfaces | |
717 | |
718 void GenCollectedHeap::collect(GCCause::Cause cause) { | |
719 if (should_do_concurrent_full_gc(cause)) { | |
720 #ifndef SERIALGC | |
721 // mostly concurrent full collection | |
722 collect_mostly_concurrent(cause); | |
723 #else // SERIALGC | |
724 ShouldNotReachHere(); | |
725 #endif // SERIALGC | |
726 } else { | |
727 #ifdef ASSERT | |
728 if (cause == GCCause::_scavenge_alot) { | |
729 // minor collection only | |
730 collect(cause, 0); | |
731 } else { | |
732 // Stop-the-world full collection | |
733 collect(cause, n_gens() - 1); | |
734 } | |
735 #else | |
736 // Stop-the-world full collection | |
737 collect(cause, n_gens() - 1); | |
738 #endif | |
739 } | |
740 } | |
741 | |
742 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) { | |
743 // The caller doesn't have the Heap_lock | |
744 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | |
745 MutexLocker ml(Heap_lock); | |
746 collect_locked(cause, max_level); | |
747 } | |
748 | |
749 // This interface assumes that it's being called by the | |
750 // vm thread. It collects the heap assuming that the | |
751 // heap lock is already held and that we are executing in | |
752 // the context of the vm thread. | |
753 void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { | |
754 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
755 assert(Heap_lock->is_locked(), "Precondition#2"); | |
756 GCCauseSetter gcs(this, cause); | |
757 switch (cause) { | |
758 case GCCause::_heap_inspection: | |
759 case GCCause::_heap_dump: { | |
760 HandleMark hm; | |
761 do_full_collection(false, // don't clear all soft refs | |
762 n_gens() - 1); | |
763 break; | |
764 } | |
765 default: // XXX FIX ME | |
766 ShouldNotReachHere(); // Unexpected use of this function | |
767 } | |
768 } | |
769 | |
770 void GenCollectedHeap::collect_locked(GCCause::Cause cause) { | |
771 // The caller has the Heap_lock | |
772 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); | |
773 collect_locked(cause, n_gens() - 1); | |
774 } | |
775 | |
776 // this is the private collection interface | |
777 // The Heap_lock is expected to be held on entry. | |
778 | |
779 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { | |
780 if (_preloading_shared_classes) { | |
781 warning("\nThe permanent generation is not large enough to preload " | |
782 "requested classes.\nUse -XX:PermSize= to increase the initial " | |
783 "size of the permanent generation.\n"); | |
784 vm_exit(2); | |
785 } | |
786 // Read the GC count while holding the Heap_lock | |
787 unsigned int gc_count_before = total_collections(); | |
788 unsigned int full_gc_count_before = total_full_collections(); | |
789 { | |
790 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
791 VM_GenCollectFull op(gc_count_before, full_gc_count_before, | |
792 cause, max_level); | |
793 VMThread::execute(&op); | |
794 } | |
795 } | |
796 | |
797 #ifndef SERIALGC | |
798 bool GenCollectedHeap::create_cms_collector() { | |
799 | |
800 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) || | |
801 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) && | |
802 _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep, | |
803 "Unexpected generation kinds"); | |
804 // Skip two header words in the block content verification | |
805 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) | |
806 CMSCollector* collector = new CMSCollector( | |
807 (ConcurrentMarkSweepGeneration*)_gens[1], | |
808 (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(), | |
809 _rem_set->as_CardTableRS(), | |
810 (ConcurrentMarkSweepPolicy*) collector_policy()); | |
811 | |
812 if (collector == NULL || !collector->completed_initialization()) { | |
813 if (collector) { | |
814 delete collector; // Be nice in embedded situation | |
815 } | |
816 vm_shutdown_during_initialization("Could not create CMS collector"); | |
817 return false; | |
818 } | |
819 return true; // success | |
820 } | |
821 | |
822 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) { | |
823 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock"); | |
824 | |
825 MutexLocker ml(Heap_lock); | |
826 // Read the GC counts while holding the Heap_lock | |
827 unsigned int full_gc_count_before = total_full_collections(); | |
828 unsigned int gc_count_before = total_collections(); | |
829 { | |
830 MutexUnlocker mu(Heap_lock); | |
831 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause); | |
832 VMThread::execute(&op); | |
833 } | |
834 } | |
835 #endif // SERIALGC | |
836 | |
837 | |
838 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, | |
839 int max_level) { | |
840 int local_max_level; | |
841 if (!incremental_collection_will_fail() && | |
842 gc_cause() == GCCause::_gc_locker) { | |
843 local_max_level = 0; | |
844 } else { | |
845 local_max_level = max_level; | |
846 } | |
847 | |
848 do_collection(true /* full */, | |
849 clear_all_soft_refs /* clear_all_soft_refs */, | |
850 0 /* size */, | |
851 false /* is_tlab */, | |
852 local_max_level /* max_level */); | |
853 // Hack XXX FIX ME !!! | |
854 // A scavenge may not have been attempted, or may have | |
855 // been attempted and failed, because the old gen was too full | |
856 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && | |
857 incremental_collection_will_fail()) { | |
858 if (PrintGCDetails) { | |
859 gclog_or_tty->print_cr("GC locker: Trying a full collection " | |
860 "because scavenge failed"); | |
861 } | |
862 // This time allow the old gen to be collected as well | |
863 do_collection(true /* full */, | |
864 clear_all_soft_refs /* clear_all_soft_refs */, | |
865 0 /* size */, | |
866 false /* is_tlab */, | |
867 n_gens() - 1 /* max_level */); | |
868 } | |
869 } | |
870 | |
871 // Returns "TRUE" iff "p" points into the allocated area of the heap. | |
872 bool GenCollectedHeap::is_in(const void* p) const { | |
873 #ifndef ASSERT | |
874 guarantee(VerifyBeforeGC || | |
875 VerifyDuringGC || | |
876 VerifyBeforeExit || | |
877 VerifyAfterGC, "too expensive"); | |
878 #endif | |
879 // This might be sped up with a cache of the last generation that | |
880 // answered yes. | |
881 for (int i = 0; i < _n_gens; i++) { | |
882 if (_gens[i]->is_in(p)) return true; | |
883 } | |
884 if (_perm_gen->as_gen()->is_in(p)) return true; | |
885 // Otherwise... | |
886 return false; | |
887 } | |
888 | |
889 // Returns "TRUE" iff "p" points into the allocated area of the heap. | |
890 bool GenCollectedHeap::is_in_youngest(void* p) { | |
891 return _gens[0]->is_in(p); | |
892 } | |
893 | |
894 void GenCollectedHeap::oop_iterate(OopClosure* cl) { | |
895 for (int i = 0; i < _n_gens; i++) { | |
896 _gens[i]->oop_iterate(cl); | |
897 } | |
898 } | |
899 | |
900 void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) { | |
901 for (int i = 0; i < _n_gens; i++) { | |
902 _gens[i]->oop_iterate(mr, cl); | |
903 } | |
904 } | |
905 | |
906 void GenCollectedHeap::object_iterate(ObjectClosure* cl) { | |
907 for (int i = 0; i < _n_gens; i++) { | |
908 _gens[i]->object_iterate(cl); | |
909 } | |
910 perm_gen()->object_iterate(cl); | |
911 } | |
912 | |
913 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
914 for (int i = 0; i < _n_gens; i++) { | |
915 _gens[i]->object_iterate_since_last_GC(cl); | |
916 } | |
917 } | |
918 | |
919 Space* GenCollectedHeap::space_containing(const void* addr) const { | |
920 for (int i = 0; i < _n_gens; i++) { | |
921 Space* res = _gens[i]->space_containing(addr); | |
922 if (res != NULL) return res; | |
923 } | |
924 Space* res = perm_gen()->space_containing(addr); | |
925 if (res != NULL) return res; | |
926 // Otherwise... | |
927 assert(false, "Could not find containing space"); | |
928 return NULL; | |
929 } | |
930 | |
931 | |
932 HeapWord* GenCollectedHeap::block_start(const void* addr) const { | |
933 assert(is_in_reserved(addr), "block_start of address outside of heap"); | |
934 for (int i = 0; i < _n_gens; i++) { | |
935 if (_gens[i]->is_in_reserved(addr)) { | |
936 assert(_gens[i]->is_in(addr), | |
937 "addr should be in allocated part of generation"); | |
938 return _gens[i]->block_start(addr); | |
939 } | |
940 } | |
941 if (perm_gen()->is_in_reserved(addr)) { | |
942 assert(perm_gen()->is_in(addr), | |
943 "addr should be in allocated part of perm gen"); | |
944 return perm_gen()->block_start(addr); | |
945 } | |
946 assert(false, "Some generation should contain the address"); | |
947 return NULL; | |
948 } | |
949 | |
950 size_t GenCollectedHeap::block_size(const HeapWord* addr) const { | |
951 assert(is_in_reserved(addr), "block_size of address outside of heap"); | |
952 for (int i = 0; i < _n_gens; i++) { | |
953 if (_gens[i]->is_in_reserved(addr)) { | |
954 assert(_gens[i]->is_in(addr), | |
955 "addr should be in allocated part of generation"); | |
956 return _gens[i]->block_size(addr); | |
957 } | |
958 } | |
959 if (perm_gen()->is_in_reserved(addr)) { | |
960 assert(perm_gen()->is_in(addr), | |
961 "addr should be in allocated part of perm gen"); | |
962 return perm_gen()->block_size(addr); | |
963 } | |
964 assert(false, "Some generation should contain the address"); | |
965 return 0; | |
966 } | |
967 | |
968 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { | |
969 assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); | |
970 assert(block_start(addr) == addr, "addr must be a block start"); | |
971 for (int i = 0; i < _n_gens; i++) { | |
972 if (_gens[i]->is_in_reserved(addr)) { | |
973 return _gens[i]->block_is_obj(addr); | |
974 } | |
975 } | |
976 if (perm_gen()->is_in_reserved(addr)) { | |
977 return perm_gen()->block_is_obj(addr); | |
978 } | |
979 assert(false, "Some generation should contain the address"); | |
980 return false; | |
981 } | |
982 | |
983 bool GenCollectedHeap::supports_tlab_allocation() const { | |
984 for (int i = 0; i < _n_gens; i += 1) { | |
985 if (_gens[i]->supports_tlab_allocation()) { | |
986 return true; | |
987 } | |
988 } | |
989 return false; | |
990 } | |
991 | |
992 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { | |
993 size_t result = 0; | |
994 for (int i = 0; i < _n_gens; i += 1) { | |
995 if (_gens[i]->supports_tlab_allocation()) { | |
996 result += _gens[i]->tlab_capacity(); | |
997 } | |
998 } | |
999 return result; | |
1000 } | |
1001 | |
1002 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { | |
1003 size_t result = 0; | |
1004 for (int i = 0; i < _n_gens; i += 1) { | |
1005 if (_gens[i]->supports_tlab_allocation()) { | |
1006 result += _gens[i]->unsafe_max_tlab_alloc(); | |
1007 } | |
1008 } | |
1009 return result; | |
1010 } | |
1011 | |
1012 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { | |
1013 bool gc_overhead_limit_was_exceeded; | |
1014 HeapWord* result = mem_allocate(size /* size */, | |
1015 false /* is_large_noref */, | |
1016 true /* is_tlab */, | |
1017 &gc_overhead_limit_was_exceeded); | |
1018 return result; | |
1019 } | |
1020 | |
1021 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size | |
1022 // from the list headed by "*prev_ptr". | |
1023 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) { | |
1024 bool first = true; | |
1025 size_t min_size = 0; // "first" makes this conceptually infinite. | |
1026 ScratchBlock **smallest_ptr, *smallest; | |
1027 ScratchBlock *cur = *prev_ptr; | |
1028 while (cur) { | |
1029 assert(*prev_ptr == cur, "just checking"); | |
1030 if (first || cur->num_words < min_size) { | |
1031 smallest_ptr = prev_ptr; | |
1032 smallest = cur; | |
1033 min_size = smallest->num_words; | |
1034 first = false; | |
1035 } | |
1036 prev_ptr = &cur->next; | |
1037 cur = cur->next; | |
1038 } | |
1039 smallest = *smallest_ptr; | |
1040 *smallest_ptr = smallest->next; | |
1041 return smallest; | |
1042 } | |
1043 | |
1044 // Sort the scratch block list headed by res into decreasing size order, | |
1045 // and set "res" to the result. | |
1046 static void sort_scratch_list(ScratchBlock*& list) { | |
1047 ScratchBlock* sorted = NULL; | |
1048 ScratchBlock* unsorted = list; | |
1049 while (unsorted) { | |
1050 ScratchBlock *smallest = removeSmallestScratch(&unsorted); | |
1051 smallest->next = sorted; | |
1052 sorted = smallest; | |
1053 } | |
1054 list = sorted; | |
1055 } | |
1056 | |
1057 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, | |
1058 size_t max_alloc_words) { | |
1059 ScratchBlock* res = NULL; | |
1060 for (int i = 0; i < _n_gens; i++) { | |
1061 _gens[i]->contribute_scratch(res, requestor, max_alloc_words); | |
1062 } | |
1063 sort_scratch_list(res); | |
1064 return res; | |
1065 } | |
1066 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1067 void GenCollectedHeap::release_scratch() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1068 for (int i = 0; i < _n_gens; i++) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1069 _gens[i]->reset_scratch(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1070 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1071 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1072 |
0 | 1073 size_t GenCollectedHeap::large_typearray_limit() { |
1074 return gen_policy()->large_typearray_limit(); | |
1075 } | |
1076 | |
1077 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { | |
1078 void do_generation(Generation* gen) { | |
1079 gen->prepare_for_verify(); | |
1080 } | |
1081 }; | |
1082 | |
1083 void GenCollectedHeap::prepare_for_verify() { | |
1084 ensure_parsability(false); // no need to retire TLABs | |
1085 GenPrepareForVerifyClosure blk; | |
1086 generation_iterate(&blk, false); | |
1087 perm_gen()->prepare_for_verify(); | |
1088 } | |
1089 | |
1090 | |
1091 void GenCollectedHeap::generation_iterate(GenClosure* cl, | |
1092 bool old_to_young) { | |
1093 if (old_to_young) { | |
1094 for (int i = _n_gens-1; i >= 0; i--) { | |
1095 cl->do_generation(_gens[i]); | |
1096 } | |
1097 } else { | |
1098 for (int i = 0; i < _n_gens; i++) { | |
1099 cl->do_generation(_gens[i]); | |
1100 } | |
1101 } | |
1102 } | |
1103 | |
1104 void GenCollectedHeap::space_iterate(SpaceClosure* cl) { | |
1105 for (int i = 0; i < _n_gens; i++) { | |
1106 _gens[i]->space_iterate(cl, true); | |
1107 } | |
1108 perm_gen()->space_iterate(cl, true); | |
1109 } | |
1110 | |
1111 bool GenCollectedHeap::is_maximal_no_gc() const { | |
1112 for (int i = 0; i < _n_gens; i++) { // skip perm gen | |
1113 if (!_gens[i]->is_maximal_no_gc()) { | |
1114 return false; | |
1115 } | |
1116 } | |
1117 return true; | |
1118 } | |
1119 | |
1120 void GenCollectedHeap::save_marks() { | |
1121 for (int i = 0; i < _n_gens; i++) { | |
1122 _gens[i]->save_marks(); | |
1123 } | |
1124 perm_gen()->save_marks(); | |
1125 } | |
1126 | |
1127 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) { | |
1128 for (int i = 0; i <= collectedGen; i++) { | |
1129 _gens[i]->compute_new_size(); | |
1130 } | |
1131 } | |
1132 | |
1133 GenCollectedHeap* GenCollectedHeap::heap() { | |
1134 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); | |
1135 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); | |
1136 return _gch; | |
1137 } | |
1138 | |
1139 | |
1140 void GenCollectedHeap::prepare_for_compaction() { | |
1141 Generation* scanning_gen = _gens[_n_gens-1]; | |
1142 // Start by compacting into same gen. | |
1143 CompactPoint cp(scanning_gen, NULL, NULL); | |
1144 while (scanning_gen != NULL) { | |
1145 scanning_gen->prepare_for_compaction(&cp); | |
1146 scanning_gen = prev_gen(scanning_gen); | |
1147 } | |
1148 } | |
1149 | |
1150 GCStats* GenCollectedHeap::gc_stats(int level) const { | |
1151 return _gens[level]->gc_stats(); | |
1152 } | |
1153 | |
1154 void GenCollectedHeap::verify(bool allow_dirty, bool silent) { | |
1155 if (!silent) { | |
1156 gclog_or_tty->print("permgen "); | |
1157 } | |
1158 perm_gen()->verify(allow_dirty); | |
1159 for (int i = _n_gens-1; i >= 0; i--) { | |
1160 Generation* g = _gens[i]; | |
1161 if (!silent) { | |
1162 gclog_or_tty->print(g->name()); | |
1163 gclog_or_tty->print(" "); | |
1164 } | |
1165 g->verify(allow_dirty); | |
1166 } | |
1167 if (!silent) { | |
1168 gclog_or_tty->print("remset "); | |
1169 } | |
1170 rem_set()->verify(); | |
1171 if (!silent) { | |
1172 gclog_or_tty->print("ref_proc "); | |
1173 } | |
1174 ReferenceProcessor::verify(); | |
1175 } | |
1176 | |
1177 void GenCollectedHeap::print() const { print_on(tty); } | |
1178 void GenCollectedHeap::print_on(outputStream* st) const { | |
1179 for (int i = 0; i < _n_gens; i++) { | |
1180 _gens[i]->print_on(st); | |
1181 } | |
1182 perm_gen()->print_on(st); | |
1183 } | |
1184 | |
1185 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
1186 if (workers() != NULL) { | |
1187 workers()->threads_do(tc); | |
1188 } | |
1189 #ifndef SERIALGC | |
1190 if (UseConcMarkSweepGC) { | |
1191 ConcurrentMarkSweepThread::threads_do(tc); | |
1192 } | |
1193 #endif // SERIALGC | |
1194 } | |
1195 | |
1196 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const { | |
1197 #ifndef SERIALGC | |
1198 if (UseParNewGC) { | |
1199 workers()->print_worker_threads_on(st); | |
1200 } | |
1201 if (UseConcMarkSweepGC) { | |
1202 ConcurrentMarkSweepThread::print_all_on(st); | |
1203 } | |
1204 #endif // SERIALGC | |
1205 } | |
1206 | |
1207 void GenCollectedHeap::print_tracing_info() const { | |
1208 if (TraceGen0Time) { | |
1209 get_gen(0)->print_summary_info(); | |
1210 } | |
1211 if (TraceGen1Time) { | |
1212 get_gen(1)->print_summary_info(); | |
1213 } | |
1214 } | |
1215 | |
1216 void GenCollectedHeap::print_heap_change(size_t prev_used) const { | |
1217 if (PrintGCDetails && Verbose) { | |
1218 gclog_or_tty->print(" " SIZE_FORMAT | |
1219 "->" SIZE_FORMAT | |
1220 "(" SIZE_FORMAT ")", | |
1221 prev_used, used(), capacity()); | |
1222 } else { | |
1223 gclog_or_tty->print(" " SIZE_FORMAT "K" | |
1224 "->" SIZE_FORMAT "K" | |
1225 "(" SIZE_FORMAT "K)", | |
1226 prev_used / K, used() / K, capacity() / K); | |
1227 } | |
1228 } | |
1229 | |
1230 //New method to print perm gen info with PrintGCDetails flag | |
1231 void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const { | |
1232 gclog_or_tty->print(", [%s :", perm_gen()->short_name()); | |
1233 perm_gen()->print_heap_change(perm_prev_used); | |
1234 gclog_or_tty->print("]"); | |
1235 } | |
1236 | |
1237 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure { | |
1238 private: | |
1239 bool _full; | |
1240 public: | |
1241 void do_generation(Generation* gen) { | |
1242 gen->gc_prologue(_full); | |
1243 } | |
1244 GenGCPrologueClosure(bool full) : _full(full) {}; | |
1245 }; | |
1246 | |
1247 void GenCollectedHeap::gc_prologue(bool full) { | |
1248 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); | |
1249 | |
1250 always_do_update_barrier = false; | |
1251 // Fill TLAB's and such | |
1252 CollectedHeap::accumulate_statistics_all_tlabs(); | |
1253 ensure_parsability(true); // retire TLABs | |
1254 | |
1255 // Call allocation profiler | |
1256 AllocationProfiler::iterate_since_last_gc(); | |
1257 // Walk generations | |
1258 GenGCPrologueClosure blk(full); | |
1259 generation_iterate(&blk, false); // not old-to-young. | |
1260 perm_gen()->gc_prologue(full); | |
1261 }; | |
1262 | |
1263 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure { | |
1264 private: | |
1265 bool _full; | |
1266 public: | |
1267 void do_generation(Generation* gen) { | |
1268 gen->gc_epilogue(_full); | |
1269 } | |
1270 GenGCEpilogueClosure(bool full) : _full(full) {}; | |
1271 }; | |
1272 | |
1273 void GenCollectedHeap::gc_epilogue(bool full) { | |
1274 // Remember if a partial collection of the heap failed, and | |
1275 // we did a complete collection. | |
1276 if (full && incremental_collection_will_fail()) { | |
1277 set_last_incremental_collection_failed(); | |
1278 } else { | |
1279 clear_last_incremental_collection_failed(); | |
1280 } | |
1281 // Clear the flag, if set; the generation gc_epilogues will set the | |
1282 // flag again if the condition persists despite the collection. | |
1283 clear_incremental_collection_will_fail(); | |
1284 | |
1285 #ifdef COMPILER2 | |
1286 assert(DerivedPointerTable::is_empty(), "derived pointer present"); | |
1287 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr())); | |
1288 guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps"); | |
1289 #endif /* COMPILER2 */ | |
1290 | |
1291 resize_all_tlabs(); | |
1292 | |
1293 GenGCEpilogueClosure blk(full); | |
1294 generation_iterate(&blk, false); // not old-to-young. | |
1295 perm_gen()->gc_epilogue(full); | |
1296 | |
1297 always_do_update_barrier = UseConcMarkSweepGC; | |
1298 }; | |
1299 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1300 #ifndef PRODUCT |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1301 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1302 private: |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1303 public: |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1304 void do_generation(Generation* gen) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1305 gen->record_spaces_top(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1306 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1307 }; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1308 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1309 void GenCollectedHeap::record_gen_tops_before_GC() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1310 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1311 GenGCSaveTopsBeforeGCClosure blk; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1312 generation_iterate(&blk, false); // not old-to-young. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1313 perm_gen()->record_spaces_top(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1314 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1315 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1316 #endif // not PRODUCT |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1317 |
0 | 1318 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure { |
1319 public: | |
1320 void do_generation(Generation* gen) { | |
1321 gen->ensure_parsability(); | |
1322 } | |
1323 }; | |
1324 | |
1325 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) { | |
1326 CollectedHeap::ensure_parsability(retire_tlabs); | |
1327 GenEnsureParsabilityClosure ep_cl; | |
1328 generation_iterate(&ep_cl, false); | |
1329 perm_gen()->ensure_parsability(); | |
1330 } | |
1331 | |
1332 oop GenCollectedHeap::handle_failed_promotion(Generation* gen, | |
1333 oop obj, | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1334 size_t obj_size) { |
0 | 1335 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
1336 HeapWord* result = NULL; | |
1337 | |
1338 // First give each higher generation a chance to allocate the promoted object. | |
1339 Generation* allocator = next_gen(gen); | |
1340 if (allocator != NULL) { | |
1341 do { | |
1342 result = allocator->allocate(obj_size, false); | |
1343 } while (result == NULL && (allocator = next_gen(allocator)) != NULL); | |
1344 } | |
1345 | |
1346 if (result == NULL) { | |
1347 // Then give gen and higher generations a chance to expand and allocate the | |
1348 // object. | |
1349 do { | |
1350 result = gen->expand_and_allocate(obj_size, false); | |
1351 } while (result == NULL && (gen = next_gen(gen)) != NULL); | |
1352 } | |
1353 | |
1354 if (result != NULL) { | |
1355 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); | |
1356 } | |
1357 return oop(result); | |
1358 } | |
1359 | |
1360 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure { | |
1361 jlong _time; // in ms | |
1362 jlong _now; // in ms | |
1363 | |
1364 public: | |
1365 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { } | |
1366 | |
1367 jlong time() { return _time; } | |
1368 | |
1369 void do_generation(Generation* gen) { | |
1370 _time = MIN2(_time, gen->time_of_last_gc(_now)); | |
1371 } | |
1372 }; | |
1373 | |
1374 jlong GenCollectedHeap::millis_since_last_gc() { | |
1375 jlong now = os::javaTimeMillis(); | |
1376 GenTimeOfLastGCClosure tolgc_cl(now); | |
1377 // iterate over generations getting the oldest | |
1378 // time that a generation was collected | |
1379 generation_iterate(&tolgc_cl, false); | |
1380 tolgc_cl.do_generation(perm_gen()); | |
1381 // XXX Despite the assert above, since javaTimeMillis() | |
1382 // doesnot guarantee monotonically increasing return | |
1383 // values (note, i didn't say "strictly monotonic"), | |
1384 // we need to guard against getting back a time | |
1385 // later than now. This should be fixed by basing | |
1386 // on someting like gethrtime() which guarantees | |
1387 // monotonicity. Note that cond_wait() is susceptible | |
1388 // to a similar problem, because its interface is | |
1389 // based on absolute time in the form of the | |
1390 // system time's notion of UCT. See also 4506635 | |
1391 // for yet another problem of similar nature. XXX | |
1392 jlong retVal = now - tolgc_cl.time(); | |
1393 if (retVal < 0) { | |
1394 NOT_PRODUCT(warning("time warp: %d", retVal);) | |
1395 return 0; | |
1396 } | |
1397 return retVal; | |
1398 } |