Mercurial > hg > truffle
annotate src/share/vm/memory/genCollectedHeap.cpp @ 301:387a62b4be60
6728478: Assertion at parallel promotion from young to old generation
Summary: The fix avoids a call to address_for_index() in this particular situation where it is not known if the passed index is in bounds.
Reviewed-by: tonyp
author | jmasa |
---|---|
date | Wed, 20 Aug 2008 23:05:04 -0700 |
parents | 850fdf70db2b |
children | c96030fff130 |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_genCollectedHeap.cpp.incl" | |
27 | |
28 GenCollectedHeap* GenCollectedHeap::_gch; | |
29 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;) | |
30 | |
31 // The set of potentially parallel tasks in strong root scanning. | |
32 enum GCH_process_strong_roots_tasks { | |
33 // We probably want to parallelize both of these internally, but for now... | |
34 GCH_PS_younger_gens, | |
35 // Leave this one last. | |
36 GCH_PS_NumElements | |
37 }; | |
38 | |
39 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) : | |
40 SharedHeap(policy), | |
41 _gen_policy(policy), | |
42 _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)), | |
43 _full_collections_completed(0) | |
44 { | |
45 if (_gen_process_strong_tasks == NULL || | |
46 !_gen_process_strong_tasks->valid()) { | |
47 vm_exit_during_initialization("Failed necessary allocation."); | |
48 } | |
49 assert(policy != NULL, "Sanity check"); | |
50 _preloading_shared_classes = false; | |
51 } | |
52 | |
53 jint GenCollectedHeap::initialize() { | |
54 int i; | |
55 _n_gens = gen_policy()->number_of_generations(); | |
56 | |
57 // While there are no constraints in the GC code that HeapWordSize | |
58 // be any particular value, there are multiple other areas in the | |
59 // system which believe this to be true (e.g. oop->object_size in some | |
60 // cases incorrectly returns the size in wordSize units rather than | |
61 // HeapWordSize). | |
62 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
63 | |
64 // The heap must be at least as aligned as generations. | |
65 size_t alignment = Generation::GenGrain; | |
66 | |
67 _gen_specs = gen_policy()->generations(); | |
68 PermanentGenerationSpec *perm_gen_spec = | |
69 collector_policy()->permanent_generation(); | |
70 | |
71 // Make sure the sizes are all aligned. | |
72 for (i = 0; i < _n_gens; i++) { | |
73 _gen_specs[i]->align(alignment); | |
74 } | |
75 perm_gen_spec->align(alignment); | |
76 | |
77 // If we are dumping the heap, then allocate a wasted block of address | |
78 // space in order to push the heap to a lower address. This extra | |
79 // address range allows for other (or larger) libraries to be loaded | |
80 // without them occupying the space required for the shared spaces. | |
81 | |
82 if (DumpSharedSpaces) { | |
83 uintx reserved = 0; | |
84 uintx block_size = 64*1024*1024; | |
85 while (reserved < SharedDummyBlockSize) { | |
86 char* dummy = os::reserve_memory(block_size); | |
87 reserved += block_size; | |
88 } | |
89 } | |
90 | |
91 // Allocate space for the heap. | |
92 | |
93 char* heap_address; | |
94 size_t total_reserved = 0; | |
95 int n_covered_regions = 0; | |
96 ReservedSpace heap_rs(0); | |
97 | |
98 heap_address = allocate(alignment, perm_gen_spec, &total_reserved, | |
99 &n_covered_regions, &heap_rs); | |
100 | |
101 if (UseSharedSpaces) { | |
102 if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) { | |
103 if (heap_rs.is_reserved()) { | |
104 heap_rs.release(); | |
105 } | |
106 FileMapInfo* mapinfo = FileMapInfo::current_info(); | |
107 mapinfo->fail_continue("Unable to reserve shared region."); | |
108 allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions, | |
109 &heap_rs); | |
110 } | |
111 } | |
112 | |
113 if (!heap_rs.is_reserved()) { | |
114 vm_shutdown_during_initialization( | |
115 "Could not reserve enough space for object heap"); | |
116 return JNI_ENOMEM; | |
117 } | |
118 | |
119 _reserved = MemRegion((HeapWord*)heap_rs.base(), | |
120 (HeapWord*)(heap_rs.base() + heap_rs.size())); | |
121 | |
122 // It is important to do this in a way such that concurrent readers can't | |
123 // temporarily think somethings in the heap. (Seen this happen in asserts.) | |
124 _reserved.set_word_size(0); | |
125 _reserved.set_start((HeapWord*)heap_rs.base()); | |
126 size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size() | |
127 - perm_gen_spec->misc_code_size(); | |
128 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size)); | |
129 | |
130 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions); | |
131 set_barrier_set(rem_set()->bs()); | |
132 _gch = this; | |
133 | |
134 for (i = 0; i < _n_gens; i++) { | |
135 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), | |
136 UseSharedSpaces, UseSharedSpaces); | |
137 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set()); | |
138 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size()); | |
139 } | |
140 _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set()); | |
141 | |
142 clear_incremental_collection_will_fail(); | |
143 clear_last_incremental_collection_failed(); | |
144 | |
145 #ifndef SERIALGC | |
146 // If we are running CMS, create the collector responsible | |
147 // for collecting the CMS generations. | |
148 if (collector_policy()->is_concurrent_mark_sweep_policy()) { | |
149 bool success = create_cms_collector(); | |
150 if (!success) return JNI_ENOMEM; | |
151 } | |
152 #endif // SERIALGC | |
153 | |
154 return JNI_OK; | |
155 } | |
156 | |
157 | |
158 char* GenCollectedHeap::allocate(size_t alignment, | |
159 PermanentGenerationSpec* perm_gen_spec, | |
160 size_t* _total_reserved, | |
161 int* _n_covered_regions, | |
162 ReservedSpace* heap_rs){ | |
163 const char overflow_msg[] = "The size of the object heap + VM data exceeds " | |
164 "the maximum representable size"; | |
165 | |
166 // Now figure out the total size. | |
167 size_t total_reserved = 0; | |
168 int n_covered_regions = 0; | |
169 const size_t pageSize = UseLargePages ? | |
170 os::large_page_size() : os::vm_page_size(); | |
171 | |
172 for (int i = 0; i < _n_gens; i++) { | |
173 total_reserved += _gen_specs[i]->max_size(); | |
174 if (total_reserved < _gen_specs[i]->max_size()) { | |
175 vm_exit_during_initialization(overflow_msg); | |
176 } | |
177 n_covered_regions += _gen_specs[i]->n_covered_regions(); | |
178 } | |
179 assert(total_reserved % pageSize == 0, "Gen size"); | |
180 total_reserved += perm_gen_spec->max_size(); | |
181 assert(total_reserved % pageSize == 0, "Perm Gen size"); | |
182 | |
183 if (total_reserved < perm_gen_spec->max_size()) { | |
184 vm_exit_during_initialization(overflow_msg); | |
185 } | |
186 n_covered_regions += perm_gen_spec->n_covered_regions(); | |
187 | |
188 // Add the size of the data area which shares the same reserved area | |
189 // as the heap, but which is not actually part of the heap. | |
190 size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size(); | |
191 | |
192 total_reserved += s; | |
193 if (total_reserved < s) { | |
194 vm_exit_during_initialization(overflow_msg); | |
195 } | |
196 | |
197 if (UseLargePages) { | |
198 assert(total_reserved != 0, "total_reserved cannot be 0"); | |
199 total_reserved = round_to(total_reserved, os::large_page_size()); | |
200 if (total_reserved < os::large_page_size()) { | |
201 vm_exit_during_initialization(overflow_msg); | |
202 } | |
203 } | |
204 | |
205 // Calculate the address at which the heap must reside in order for | |
206 // the shared data to be at the required address. | |
207 | |
208 char* heap_address; | |
209 if (UseSharedSpaces) { | |
210 | |
211 // Calculate the address of the first word beyond the heap. | |
212 FileMapInfo* mapinfo = FileMapInfo::current_info(); | |
213 int lr = CompactingPermGenGen::n_regions - 1; | |
214 size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment); | |
215 heap_address = mapinfo->region_base(lr) + capacity; | |
216 | |
217 // Calculate the address of the first word of the heap. | |
218 heap_address -= total_reserved; | |
219 } else { | |
220 heap_address = NULL; // any address will do. | |
221 } | |
222 | |
223 *_total_reserved = total_reserved; | |
224 *_n_covered_regions = n_covered_regions; | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
196
diff
changeset
|
225 *heap_rs = ReservedHeapSpace(total_reserved, alignment, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
196
diff
changeset
|
226 UseLargePages, heap_address); |
0 | 227 |
228 return heap_address; | |
229 } | |
230 | |
231 | |
232 void GenCollectedHeap::post_initialize() { | |
233 SharedHeap::post_initialize(); | |
234 TwoGenerationCollectorPolicy *policy = | |
235 (TwoGenerationCollectorPolicy *)collector_policy(); | |
236 guarantee(policy->is_two_generation_policy(), "Illegal policy type"); | |
237 DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0); | |
238 assert(def_new_gen->kind() == Generation::DefNew || | |
239 def_new_gen->kind() == Generation::ParNew || | |
240 def_new_gen->kind() == Generation::ASParNew, | |
241 "Wrong generation kind"); | |
242 | |
243 Generation* old_gen = get_gen(1); | |
244 assert(old_gen->kind() == Generation::ConcurrentMarkSweep || | |
245 old_gen->kind() == Generation::ASConcurrentMarkSweep || | |
246 old_gen->kind() == Generation::MarkSweepCompact, | |
247 "Wrong generation kind"); | |
248 | |
249 policy->initialize_size_policy(def_new_gen->eden()->capacity(), | |
250 old_gen->capacity(), | |
251 def_new_gen->from()->capacity()); | |
252 policy->initialize_gc_policy_counters(); | |
253 } | |
254 | |
255 void GenCollectedHeap::ref_processing_init() { | |
256 SharedHeap::ref_processing_init(); | |
257 for (int i = 0; i < _n_gens; i++) { | |
258 _gens[i]->ref_processor_init(); | |
259 } | |
260 } | |
261 | |
262 size_t GenCollectedHeap::capacity() const { | |
263 size_t res = 0; | |
264 for (int i = 0; i < _n_gens; i++) { | |
265 res += _gens[i]->capacity(); | |
266 } | |
267 return res; | |
268 } | |
269 | |
270 size_t GenCollectedHeap::used() const { | |
271 size_t res = 0; | |
272 for (int i = 0; i < _n_gens; i++) { | |
273 res += _gens[i]->used(); | |
274 } | |
275 return res; | |
276 } | |
277 | |
278 // Save the "used_region" for generations level and lower, | |
279 // and, if perm is true, for perm gen. | |
280 void GenCollectedHeap::save_used_regions(int level, bool perm) { | |
281 assert(level < _n_gens, "Illegal level parameter"); | |
282 for (int i = level; i >= 0; i--) { | |
283 _gens[i]->save_used_region(); | |
284 } | |
285 if (perm) { | |
286 perm_gen()->save_used_region(); | |
287 } | |
288 } | |
289 | |
290 size_t GenCollectedHeap::max_capacity() const { | |
291 size_t res = 0; | |
292 for (int i = 0; i < _n_gens; i++) { | |
293 res += _gens[i]->max_capacity(); | |
294 } | |
295 return res; | |
296 } | |
297 | |
298 // Update the _full_collections_completed counter | |
299 // at the end of a stop-world full GC. | |
300 unsigned int GenCollectedHeap::update_full_collections_completed() { | |
301 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); | |
302 assert(_full_collections_completed <= _total_full_collections, | |
303 "Can't complete more collections than were started"); | |
304 _full_collections_completed = _total_full_collections; | |
305 ml.notify_all(); | |
306 return _full_collections_completed; | |
307 } | |
308 | |
309 // Update the _full_collections_completed counter, as appropriate, | |
310 // at the end of a concurrent GC cycle. Note the conditional update | |
311 // below to allow this method to be called by a concurrent collector | |
312 // without synchronizing in any manner with the VM thread (which | |
313 // may already have initiated a STW full collection "concurrently"). | |
314 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) { | |
315 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); | |
316 assert((_full_collections_completed <= _total_full_collections) && | |
317 (count <= _total_full_collections), | |
318 "Can't complete more collections than were started"); | |
319 if (count > _full_collections_completed) { | |
320 _full_collections_completed = count; | |
321 ml.notify_all(); | |
322 } | |
323 return _full_collections_completed; | |
324 } | |
325 | |
326 | |
327 #ifndef PRODUCT | |
328 // Override of memory state checking method in CollectedHeap: | |
329 // Some collectors (CMS for example) can't have badHeapWordVal written | |
330 // in the first two words of an object. (For instance , in the case of | |
331 // CMS these words hold state used to synchronize between certain | |
332 // (concurrent) GC steps and direct allocating mutators.) | |
333 // The skip_header_HeapWords() method below, allows us to skip | |
334 // over the requisite number of HeapWord's. Note that (for | |
335 // generational collectors) this means that those many words are | |
336 // skipped in each object, irrespective of the generation in which | |
337 // that object lives. The resultant loss of precision seems to be | |
338 // harmless and the pain of avoiding that imprecision appears somewhat | |
339 // higher than we are prepared to pay for such rudimentary debugging | |
340 // support. | |
341 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, | |
342 size_t size) { | |
343 if (CheckMemoryInitialization && ZapUnusedHeapArea) { | |
344 // We are asked to check a size in HeapWords, | |
345 // but the memory is mangled in juint words. | |
346 juint* start = (juint*) (addr + skip_header_HeapWords()); | |
347 juint* end = (juint*) (addr + size); | |
348 for (juint* slot = start; slot < end; slot += 1) { | |
349 assert(*slot == badHeapWordVal, | |
350 "Found non badHeapWordValue in pre-allocation check"); | |
351 } | |
352 } | |
353 } | |
354 #endif | |
355 | |
356 HeapWord* GenCollectedHeap::attempt_allocation(size_t size, | |
357 bool is_tlab, | |
358 bool first_only) { | |
359 HeapWord* res; | |
360 for (int i = 0; i < _n_gens; i++) { | |
361 if (_gens[i]->should_allocate(size, is_tlab)) { | |
362 res = _gens[i]->allocate(size, is_tlab); | |
363 if (res != NULL) return res; | |
364 else if (first_only) break; | |
365 } | |
366 } | |
367 // Otherwise... | |
368 return NULL; | |
369 } | |
370 | |
371 HeapWord* GenCollectedHeap::mem_allocate(size_t size, | |
372 bool is_large_noref, | |
373 bool is_tlab, | |
374 bool* gc_overhead_limit_was_exceeded) { | |
375 return collector_policy()->mem_allocate_work(size, | |
376 is_tlab, | |
377 gc_overhead_limit_was_exceeded); | |
378 } | |
379 | |
380 bool GenCollectedHeap::must_clear_all_soft_refs() { | |
381 return _gc_cause == GCCause::_last_ditch_collection; | |
382 } | |
383 | |
384 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { | |
385 return (cause == GCCause::_java_lang_system_gc || | |
386 cause == GCCause::_gc_locker) && | |
387 UseConcMarkSweepGC && ExplicitGCInvokesConcurrent; | |
388 } | |
389 | |
390 void GenCollectedHeap::do_collection(bool full, | |
391 bool clear_all_soft_refs, | |
392 size_t size, | |
393 bool is_tlab, | |
394 int max_level) { | |
395 bool prepared_for_verification = false; | |
396 ResourceMark rm; | |
397 DEBUG_ONLY(Thread* my_thread = Thread::current();) | |
398 | |
399 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
400 assert(my_thread->is_VM_thread() || | |
401 my_thread->is_ConcurrentGC_thread(), | |
402 "incorrect thread type capability"); | |
403 assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock"); | |
404 guarantee(!is_gc_active(), "collection is not reentrant"); | |
405 assert(max_level < n_gens(), "sanity check"); | |
406 | |
407 if (GC_locker::check_active_before_gc()) { | |
408 return; // GC is disabled (e.g. JNI GetXXXCritical operation) | |
409 } | |
410 | |
411 const size_t perm_prev_used = perm_gen()->used(); | |
412 | |
413 if (PrintHeapAtGC) { | |
414 Universe::print_heap_before_gc(); | |
415 if (Verbose) { | |
416 gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause())); | |
417 } | |
418 } | |
419 | |
420 { | |
421 FlagSetting fl(_is_gc_active, true); | |
422 | |
423 bool complete = full && (max_level == (n_gens()-1)); | |
424 const char* gc_cause_str = "GC "; | |
425 if (complete) { | |
426 GCCause::Cause cause = gc_cause(); | |
427 if (cause == GCCause::_java_lang_system_gc) { | |
428 gc_cause_str = "Full GC (System) "; | |
429 } else { | |
430 gc_cause_str = "Full GC "; | |
431 } | |
432 } | |
433 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
434 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
435 TraceTime t(gc_cause_str, PrintGCDetails, false, gclog_or_tty); | |
436 | |
437 gc_prologue(complete); | |
438 increment_total_collections(complete); | |
439 | |
440 size_t gch_prev_used = used(); | |
441 | |
442 int starting_level = 0; | |
443 if (full) { | |
444 // Search for the oldest generation which will collect all younger | |
445 // generations, and start collection loop there. | |
446 for (int i = max_level; i >= 0; i--) { | |
447 if (_gens[i]->full_collects_younger_generations()) { | |
448 starting_level = i; | |
449 break; | |
450 } | |
451 } | |
452 } | |
453 | |
454 bool must_restore_marks_for_biased_locking = false; | |
455 | |
456 int max_level_collected = starting_level; | |
457 for (int i = starting_level; i <= max_level; i++) { | |
458 if (_gens[i]->should_collect(full, size, is_tlab)) { | |
459 // Timer for individual generations. Last argument is false: no CR | |
460 TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty); | |
461 TraceCollectorStats tcs(_gens[i]->counters()); | |
462 TraceMemoryManagerStats tmms(_gens[i]->kind()); | |
463 | |
464 size_t prev_used = _gens[i]->used(); | |
465 _gens[i]->stat_record()->invocations++; | |
466 _gens[i]->stat_record()->accumulated_time.start(); | |
467 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
468 // Must be done anew before each collection because |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
469 // a previous collection will do mangling and will |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
470 // change top of some spaces. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
471 record_gen_tops_before_GC(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
472 |
0 | 473 if (PrintGC && Verbose) { |
474 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, | |
475 i, | |
476 _gens[i]->stat_record()->invocations, | |
477 size*HeapWordSize); | |
478 } | |
479 | |
480 if (VerifyBeforeGC && i >= VerifyGCLevel && | |
481 total_collections() >= VerifyGCStartAt) { | |
482 HandleMark hm; // Discard invalid handles created during verification | |
483 if (!prepared_for_verification) { | |
484 prepare_for_verify(); | |
485 prepared_for_verification = true; | |
486 } | |
487 gclog_or_tty->print(" VerifyBeforeGC:"); | |
488 Universe::verify(true); | |
489 } | |
490 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
491 | |
492 if (!must_restore_marks_for_biased_locking && | |
493 _gens[i]->performs_in_place_marking()) { | |
494 // We perform this mark word preservation work lazily | |
495 // because it's only at this point that we know whether we | |
496 // absolutely have to do it; we want to avoid doing it for | |
497 // scavenge-only collections where it's unnecessary | |
498 must_restore_marks_for_biased_locking = true; | |
499 BiasedLocking::preserve_marks(); | |
500 } | |
501 | |
502 // Do collection work | |
503 { | |
504 // Note on ref discovery: For what appear to be historical reasons, | |
505 // GCH enables and disabled (by enqueing) refs discovery. | |
506 // In the future this should be moved into the generation's | |
507 // collect method so that ref discovery and enqueueing concerns | |
508 // are local to a generation. The collect method could return | |
509 // an appropriate indication in the case that notification on | |
510 // the ref lock was needed. This will make the treatment of | |
511 // weak refs more uniform (and indeed remove such concerns | |
512 // from GCH). XXX | |
513 | |
514 HandleMark hm; // Discard invalid handles created during gc | |
515 save_marks(); // save marks for all gens | |
516 // We want to discover references, but not process them yet. | |
517 // This mode is disabled in process_discovered_references if the | |
518 // generation does some collection work, or in | |
519 // enqueue_discovered_references if the generation returns | |
520 // without doing any work. | |
521 ReferenceProcessor* rp = _gens[i]->ref_processor(); | |
522 // If the discovery of ("weak") refs in this generation is | |
523 // atomic wrt other collectors in this configuration, we | |
524 // are guaranteed to have empty discovered ref lists. | |
525 if (rp->discovery_is_atomic()) { | |
526 rp->verify_no_references_recorded(); | |
527 rp->enable_discovery(); | |
528 } else { | |
529 // collect() will enable discovery as appropriate | |
530 } | |
531 _gens[i]->collect(full, clear_all_soft_refs, size, is_tlab); | |
532 if (!rp->enqueuing_is_done()) { | |
533 rp->enqueue_discovered_references(); | |
534 } else { | |
535 rp->set_enqueuing_is_done(false); | |
536 } | |
537 rp->verify_no_references_recorded(); | |
538 } | |
539 max_level_collected = i; | |
540 | |
541 // Determine if allocation request was met. | |
542 if (size > 0) { | |
543 if (!is_tlab || _gens[i]->supports_tlab_allocation()) { | |
544 if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) { | |
545 size = 0; | |
546 } | |
547 } | |
548 } | |
549 | |
550 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
551 | |
552 _gens[i]->stat_record()->accumulated_time.stop(); | |
553 | |
554 update_gc_stats(i, full); | |
555 | |
556 if (VerifyAfterGC && i >= VerifyGCLevel && | |
557 total_collections() >= VerifyGCStartAt) { | |
558 HandleMark hm; // Discard invalid handles created during verification | |
559 gclog_or_tty->print(" VerifyAfterGC:"); | |
560 Universe::verify(false); | |
561 } | |
562 | |
563 if (PrintGCDetails) { | |
564 gclog_or_tty->print(":"); | |
565 _gens[i]->print_heap_change(prev_used); | |
566 } | |
567 } | |
568 } | |
569 | |
570 // Update "complete" boolean wrt what actually transpired -- | |
571 // for instance, a promotion failure could have led to | |
572 // a whole heap collection. | |
573 complete = complete || (max_level_collected == n_gens() - 1); | |
574 | |
575 if (PrintGCDetails) { | |
576 print_heap_change(gch_prev_used); | |
577 | |
578 // Print perm gen info for full GC with PrintGCDetails flag. | |
579 if (complete) { | |
580 print_perm_heap_change(perm_prev_used); | |
581 } | |
582 } | |
583 | |
584 for (int j = max_level_collected; j >= 0; j -= 1) { | |
585 // Adjust generation sizes. | |
586 _gens[j]->compute_new_size(); | |
587 } | |
588 | |
589 if (complete) { | |
590 // Ask the permanent generation to adjust size for full collections | |
591 perm()->compute_new_size(); | |
592 update_full_collections_completed(); | |
593 } | |
594 | |
595 // Track memory usage and detect low memory after GC finishes | |
596 MemoryService::track_memory_usage(); | |
597 | |
598 gc_epilogue(complete); | |
599 | |
600 if (must_restore_marks_for_biased_locking) { | |
601 BiasedLocking::restore_marks(); | |
602 } | |
603 } | |
604 | |
605 AdaptiveSizePolicy* sp = gen_policy()->size_policy(); | |
606 AdaptiveSizePolicyOutput(sp, total_collections()); | |
607 | |
608 if (PrintHeapAtGC) { | |
609 Universe::print_heap_after_gc(); | |
610 } | |
611 | |
612 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { | |
613 tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); | |
614 vm_exit(-1); | |
615 } | |
616 } | |
617 | |
618 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) { | |
619 return collector_policy()->satisfy_failed_allocation(size, is_tlab); | |
620 } | |
621 | |
622 void GenCollectedHeap::set_par_threads(int t) { | |
623 SharedHeap::set_par_threads(t); | |
624 _gen_process_strong_tasks->set_par_threads(t); | |
625 } | |
626 | |
627 class AssertIsPermClosure: public OopClosure { | |
628 public: | |
629 void do_oop(oop* p) { | |
630 assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm."); | |
631 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
632 void do_oop(narrowOop* p) { ShouldNotReachHere(); } |
0 | 633 }; |
634 static AssertIsPermClosure assert_is_perm_closure; | |
635 | |
636 void GenCollectedHeap:: | |
637 gen_process_strong_roots(int level, | |
638 bool younger_gens_as_roots, | |
639 bool collecting_perm_gen, | |
640 SharedHeap::ScanningOption so, | |
641 OopsInGenClosure* older_gens, | |
642 OopsInGenClosure* not_older_gens) { | |
643 // General strong roots. | |
644 SharedHeap::process_strong_roots(collecting_perm_gen, so, | |
645 not_older_gens, older_gens); | |
646 | |
647 if (younger_gens_as_roots) { | |
648 if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { | |
649 for (int i = 0; i < level; i++) { | |
650 not_older_gens->set_generation(_gens[i]); | |
651 _gens[i]->oop_iterate(not_older_gens); | |
652 } | |
653 not_older_gens->reset_generation(); | |
654 } | |
655 } | |
656 // When collection is parallel, all threads get to cooperate to do | |
657 // older-gen scanning. | |
658 for (int i = level+1; i < _n_gens; i++) { | |
659 older_gens->set_generation(_gens[i]); | |
660 rem_set()->younger_refs_iterate(_gens[i], older_gens); | |
661 older_gens->reset_generation(); | |
662 } | |
663 | |
664 _gen_process_strong_tasks->all_tasks_completed(); | |
665 } | |
666 | |
667 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure, | |
668 OopClosure* non_root_closure) { | |
669 SharedHeap::process_weak_roots(root_closure, non_root_closure); | |
670 // "Local" "weak" refs | |
671 for (int i = 0; i < _n_gens; i++) { | |
672 _gens[i]->ref_processor()->weak_oops_do(root_closure); | |
673 } | |
674 } | |
675 | |
676 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ | |
677 void GenCollectedHeap:: \ | |
678 oop_since_save_marks_iterate(int level, \ | |
679 OopClosureType* cur, \ | |
680 OopClosureType* older) { \ | |
681 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \ | |
682 for (int i = level+1; i < n_gens(); i++) { \ | |
683 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \ | |
684 } \ | |
685 perm_gen()->oop_since_save_marks_iterate##nv_suffix(older); \ | |
686 } | |
687 | |
688 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) | |
689 | |
690 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN | |
691 | |
692 bool GenCollectedHeap::no_allocs_since_save_marks(int level) { | |
693 for (int i = level; i < _n_gens; i++) { | |
694 if (!_gens[i]->no_allocs_since_save_marks()) return false; | |
695 } | |
696 return perm_gen()->no_allocs_since_save_marks(); | |
697 } | |
698 | |
699 bool GenCollectedHeap::supports_inline_contig_alloc() const { | |
700 return _gens[0]->supports_inline_contig_alloc(); | |
701 } | |
702 | |
703 HeapWord** GenCollectedHeap::top_addr() const { | |
704 return _gens[0]->top_addr(); | |
705 } | |
706 | |
707 HeapWord** GenCollectedHeap::end_addr() const { | |
708 return _gens[0]->end_addr(); | |
709 } | |
710 | |
711 size_t GenCollectedHeap::unsafe_max_alloc() { | |
712 return _gens[0]->unsafe_max_alloc_nogc(); | |
713 } | |
714 | |
715 // public collection interfaces | |
716 | |
717 void GenCollectedHeap::collect(GCCause::Cause cause) { | |
718 if (should_do_concurrent_full_gc(cause)) { | |
719 #ifndef SERIALGC | |
720 // mostly concurrent full collection | |
721 collect_mostly_concurrent(cause); | |
722 #else // SERIALGC | |
723 ShouldNotReachHere(); | |
724 #endif // SERIALGC | |
725 } else { | |
726 #ifdef ASSERT | |
727 if (cause == GCCause::_scavenge_alot) { | |
728 // minor collection only | |
729 collect(cause, 0); | |
730 } else { | |
731 // Stop-the-world full collection | |
732 collect(cause, n_gens() - 1); | |
733 } | |
734 #else | |
735 // Stop-the-world full collection | |
736 collect(cause, n_gens() - 1); | |
737 #endif | |
738 } | |
739 } | |
740 | |
741 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) { | |
742 // The caller doesn't have the Heap_lock | |
743 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | |
744 MutexLocker ml(Heap_lock); | |
745 collect_locked(cause, max_level); | |
746 } | |
747 | |
748 // This interface assumes that it's being called by the | |
749 // vm thread. It collects the heap assuming that the | |
750 // heap lock is already held and that we are executing in | |
751 // the context of the vm thread. | |
752 void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { | |
753 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
754 assert(Heap_lock->is_locked(), "Precondition#2"); | |
755 GCCauseSetter gcs(this, cause); | |
756 switch (cause) { | |
757 case GCCause::_heap_inspection: | |
758 case GCCause::_heap_dump: { | |
759 HandleMark hm; | |
760 do_full_collection(false, // don't clear all soft refs | |
761 n_gens() - 1); | |
762 break; | |
763 } | |
764 default: // XXX FIX ME | |
765 ShouldNotReachHere(); // Unexpected use of this function | |
766 } | |
767 } | |
768 | |
769 void GenCollectedHeap::collect_locked(GCCause::Cause cause) { | |
770 // The caller has the Heap_lock | |
771 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); | |
772 collect_locked(cause, n_gens() - 1); | |
773 } | |
774 | |
775 // this is the private collection interface | |
776 // The Heap_lock is expected to be held on entry. | |
777 | |
778 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { | |
779 if (_preloading_shared_classes) { | |
780 warning("\nThe permanent generation is not large enough to preload " | |
781 "requested classes.\nUse -XX:PermSize= to increase the initial " | |
782 "size of the permanent generation.\n"); | |
783 vm_exit(2); | |
784 } | |
785 // Read the GC count while holding the Heap_lock | |
786 unsigned int gc_count_before = total_collections(); | |
787 unsigned int full_gc_count_before = total_full_collections(); | |
788 { | |
789 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
790 VM_GenCollectFull op(gc_count_before, full_gc_count_before, | |
791 cause, max_level); | |
792 VMThread::execute(&op); | |
793 } | |
794 } | |
795 | |
796 #ifndef SERIALGC | |
797 bool GenCollectedHeap::create_cms_collector() { | |
798 | |
799 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) || | |
800 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) && | |
801 _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep, | |
802 "Unexpected generation kinds"); | |
803 // Skip two header words in the block content verification | |
804 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) | |
805 CMSCollector* collector = new CMSCollector( | |
806 (ConcurrentMarkSweepGeneration*)_gens[1], | |
807 (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(), | |
808 _rem_set->as_CardTableRS(), | |
809 (ConcurrentMarkSweepPolicy*) collector_policy()); | |
810 | |
811 if (collector == NULL || !collector->completed_initialization()) { | |
812 if (collector) { | |
813 delete collector; // Be nice in embedded situation | |
814 } | |
815 vm_shutdown_during_initialization("Could not create CMS collector"); | |
816 return false; | |
817 } | |
818 return true; // success | |
819 } | |
820 | |
821 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) { | |
822 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock"); | |
823 | |
824 MutexLocker ml(Heap_lock); | |
825 // Read the GC counts while holding the Heap_lock | |
826 unsigned int full_gc_count_before = total_full_collections(); | |
827 unsigned int gc_count_before = total_collections(); | |
828 { | |
829 MutexUnlocker mu(Heap_lock); | |
830 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause); | |
831 VMThread::execute(&op); | |
832 } | |
833 } | |
834 #endif // SERIALGC | |
835 | |
836 | |
837 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, | |
838 int max_level) { | |
839 int local_max_level; | |
840 if (!incremental_collection_will_fail() && | |
841 gc_cause() == GCCause::_gc_locker) { | |
842 local_max_level = 0; | |
843 } else { | |
844 local_max_level = max_level; | |
845 } | |
846 | |
847 do_collection(true /* full */, | |
848 clear_all_soft_refs /* clear_all_soft_refs */, | |
849 0 /* size */, | |
850 false /* is_tlab */, | |
851 local_max_level /* max_level */); | |
852 // Hack XXX FIX ME !!! | |
853 // A scavenge may not have been attempted, or may have | |
854 // been attempted and failed, because the old gen was too full | |
855 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && | |
856 incremental_collection_will_fail()) { | |
857 if (PrintGCDetails) { | |
858 gclog_or_tty->print_cr("GC locker: Trying a full collection " | |
859 "because scavenge failed"); | |
860 } | |
861 // This time allow the old gen to be collected as well | |
862 do_collection(true /* full */, | |
863 clear_all_soft_refs /* clear_all_soft_refs */, | |
864 0 /* size */, | |
865 false /* is_tlab */, | |
866 n_gens() - 1 /* max_level */); | |
867 } | |
868 } | |
869 | |
870 // Returns "TRUE" iff "p" points into the allocated area of the heap. | |
871 bool GenCollectedHeap::is_in(const void* p) const { | |
872 #ifndef ASSERT | |
873 guarantee(VerifyBeforeGC || | |
874 VerifyDuringGC || | |
875 VerifyBeforeExit || | |
876 VerifyAfterGC, "too expensive"); | |
877 #endif | |
878 // This might be sped up with a cache of the last generation that | |
879 // answered yes. | |
880 for (int i = 0; i < _n_gens; i++) { | |
881 if (_gens[i]->is_in(p)) return true; | |
882 } | |
883 if (_perm_gen->as_gen()->is_in(p)) return true; | |
884 // Otherwise... | |
885 return false; | |
886 } | |
887 | |
888 // Returns "TRUE" iff "p" points into the allocated area of the heap. | |
889 bool GenCollectedHeap::is_in_youngest(void* p) { | |
890 return _gens[0]->is_in(p); | |
891 } | |
892 | |
893 void GenCollectedHeap::oop_iterate(OopClosure* cl) { | |
894 for (int i = 0; i < _n_gens; i++) { | |
895 _gens[i]->oop_iterate(cl); | |
896 } | |
897 } | |
898 | |
899 void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) { | |
900 for (int i = 0; i < _n_gens; i++) { | |
901 _gens[i]->oop_iterate(mr, cl); | |
902 } | |
903 } | |
904 | |
905 void GenCollectedHeap::object_iterate(ObjectClosure* cl) { | |
906 for (int i = 0; i < _n_gens; i++) { | |
907 _gens[i]->object_iterate(cl); | |
908 } | |
909 perm_gen()->object_iterate(cl); | |
910 } | |
911 | |
912 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { | |
913 for (int i = 0; i < _n_gens; i++) { | |
914 _gens[i]->object_iterate_since_last_GC(cl); | |
915 } | |
916 } | |
917 | |
918 Space* GenCollectedHeap::space_containing(const void* addr) const { | |
919 for (int i = 0; i < _n_gens; i++) { | |
920 Space* res = _gens[i]->space_containing(addr); | |
921 if (res != NULL) return res; | |
922 } | |
923 Space* res = perm_gen()->space_containing(addr); | |
924 if (res != NULL) return res; | |
925 // Otherwise... | |
926 assert(false, "Could not find containing space"); | |
927 return NULL; | |
928 } | |
929 | |
930 | |
931 HeapWord* GenCollectedHeap::block_start(const void* addr) const { | |
932 assert(is_in_reserved(addr), "block_start of address outside of heap"); | |
933 for (int i = 0; i < _n_gens; i++) { | |
934 if (_gens[i]->is_in_reserved(addr)) { | |
935 assert(_gens[i]->is_in(addr), | |
936 "addr should be in allocated part of generation"); | |
937 return _gens[i]->block_start(addr); | |
938 } | |
939 } | |
940 if (perm_gen()->is_in_reserved(addr)) { | |
941 assert(perm_gen()->is_in(addr), | |
942 "addr should be in allocated part of perm gen"); | |
943 return perm_gen()->block_start(addr); | |
944 } | |
945 assert(false, "Some generation should contain the address"); | |
946 return NULL; | |
947 } | |
948 | |
949 size_t GenCollectedHeap::block_size(const HeapWord* addr) const { | |
950 assert(is_in_reserved(addr), "block_size of address outside of heap"); | |
951 for (int i = 0; i < _n_gens; i++) { | |
952 if (_gens[i]->is_in_reserved(addr)) { | |
953 assert(_gens[i]->is_in(addr), | |
954 "addr should be in allocated part of generation"); | |
955 return _gens[i]->block_size(addr); | |
956 } | |
957 } | |
958 if (perm_gen()->is_in_reserved(addr)) { | |
959 assert(perm_gen()->is_in(addr), | |
960 "addr should be in allocated part of perm gen"); | |
961 return perm_gen()->block_size(addr); | |
962 } | |
963 assert(false, "Some generation should contain the address"); | |
964 return 0; | |
965 } | |
966 | |
967 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { | |
968 assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); | |
969 assert(block_start(addr) == addr, "addr must be a block start"); | |
970 for (int i = 0; i < _n_gens; i++) { | |
971 if (_gens[i]->is_in_reserved(addr)) { | |
972 return _gens[i]->block_is_obj(addr); | |
973 } | |
974 } | |
975 if (perm_gen()->is_in_reserved(addr)) { | |
976 return perm_gen()->block_is_obj(addr); | |
977 } | |
978 assert(false, "Some generation should contain the address"); | |
979 return false; | |
980 } | |
981 | |
982 bool GenCollectedHeap::supports_tlab_allocation() const { | |
983 for (int i = 0; i < _n_gens; i += 1) { | |
984 if (_gens[i]->supports_tlab_allocation()) { | |
985 return true; | |
986 } | |
987 } | |
988 return false; | |
989 } | |
990 | |
991 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { | |
992 size_t result = 0; | |
993 for (int i = 0; i < _n_gens; i += 1) { | |
994 if (_gens[i]->supports_tlab_allocation()) { | |
995 result += _gens[i]->tlab_capacity(); | |
996 } | |
997 } | |
998 return result; | |
999 } | |
1000 | |
1001 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { | |
1002 size_t result = 0; | |
1003 for (int i = 0; i < _n_gens; i += 1) { | |
1004 if (_gens[i]->supports_tlab_allocation()) { | |
1005 result += _gens[i]->unsafe_max_tlab_alloc(); | |
1006 } | |
1007 } | |
1008 return result; | |
1009 } | |
1010 | |
1011 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { | |
1012 bool gc_overhead_limit_was_exceeded; | |
1013 HeapWord* result = mem_allocate(size /* size */, | |
1014 false /* is_large_noref */, | |
1015 true /* is_tlab */, | |
1016 &gc_overhead_limit_was_exceeded); | |
1017 return result; | |
1018 } | |
1019 | |
1020 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size | |
1021 // from the list headed by "*prev_ptr". | |
1022 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) { | |
1023 bool first = true; | |
1024 size_t min_size = 0; // "first" makes this conceptually infinite. | |
1025 ScratchBlock **smallest_ptr, *smallest; | |
1026 ScratchBlock *cur = *prev_ptr; | |
1027 while (cur) { | |
1028 assert(*prev_ptr == cur, "just checking"); | |
1029 if (first || cur->num_words < min_size) { | |
1030 smallest_ptr = prev_ptr; | |
1031 smallest = cur; | |
1032 min_size = smallest->num_words; | |
1033 first = false; | |
1034 } | |
1035 prev_ptr = &cur->next; | |
1036 cur = cur->next; | |
1037 } | |
1038 smallest = *smallest_ptr; | |
1039 *smallest_ptr = smallest->next; | |
1040 return smallest; | |
1041 } | |
1042 | |
1043 // Sort the scratch block list headed by res into decreasing size order, | |
1044 // and set "res" to the result. | |
1045 static void sort_scratch_list(ScratchBlock*& list) { | |
1046 ScratchBlock* sorted = NULL; | |
1047 ScratchBlock* unsorted = list; | |
1048 while (unsorted) { | |
1049 ScratchBlock *smallest = removeSmallestScratch(&unsorted); | |
1050 smallest->next = sorted; | |
1051 sorted = smallest; | |
1052 } | |
1053 list = sorted; | |
1054 } | |
1055 | |
1056 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, | |
1057 size_t max_alloc_words) { | |
1058 ScratchBlock* res = NULL; | |
1059 for (int i = 0; i < _n_gens; i++) { | |
1060 _gens[i]->contribute_scratch(res, requestor, max_alloc_words); | |
1061 } | |
1062 sort_scratch_list(res); | |
1063 return res; | |
1064 } | |
1065 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1066 void GenCollectedHeap::release_scratch() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1067 for (int i = 0; i < _n_gens; i++) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1068 _gens[i]->reset_scratch(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1069 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1070 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1071 |
0 | 1072 size_t GenCollectedHeap::large_typearray_limit() { |
1073 return gen_policy()->large_typearray_limit(); | |
1074 } | |
1075 | |
1076 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { | |
1077 void do_generation(Generation* gen) { | |
1078 gen->prepare_for_verify(); | |
1079 } | |
1080 }; | |
1081 | |
1082 void GenCollectedHeap::prepare_for_verify() { | |
1083 ensure_parsability(false); // no need to retire TLABs | |
1084 GenPrepareForVerifyClosure blk; | |
1085 generation_iterate(&blk, false); | |
1086 perm_gen()->prepare_for_verify(); | |
1087 } | |
1088 | |
1089 | |
1090 void GenCollectedHeap::generation_iterate(GenClosure* cl, | |
1091 bool old_to_young) { | |
1092 if (old_to_young) { | |
1093 for (int i = _n_gens-1; i >= 0; i--) { | |
1094 cl->do_generation(_gens[i]); | |
1095 } | |
1096 } else { | |
1097 for (int i = 0; i < _n_gens; i++) { | |
1098 cl->do_generation(_gens[i]); | |
1099 } | |
1100 } | |
1101 } | |
1102 | |
1103 void GenCollectedHeap::space_iterate(SpaceClosure* cl) { | |
1104 for (int i = 0; i < _n_gens; i++) { | |
1105 _gens[i]->space_iterate(cl, true); | |
1106 } | |
1107 perm_gen()->space_iterate(cl, true); | |
1108 } | |
1109 | |
1110 bool GenCollectedHeap::is_maximal_no_gc() const { | |
1111 for (int i = 0; i < _n_gens; i++) { // skip perm gen | |
1112 if (!_gens[i]->is_maximal_no_gc()) { | |
1113 return false; | |
1114 } | |
1115 } | |
1116 return true; | |
1117 } | |
1118 | |
1119 void GenCollectedHeap::save_marks() { | |
1120 for (int i = 0; i < _n_gens; i++) { | |
1121 _gens[i]->save_marks(); | |
1122 } | |
1123 perm_gen()->save_marks(); | |
1124 } | |
1125 | |
1126 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) { | |
1127 for (int i = 0; i <= collectedGen; i++) { | |
1128 _gens[i]->compute_new_size(); | |
1129 } | |
1130 } | |
1131 | |
1132 GenCollectedHeap* GenCollectedHeap::heap() { | |
1133 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); | |
1134 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); | |
1135 return _gch; | |
1136 } | |
1137 | |
1138 | |
1139 void GenCollectedHeap::prepare_for_compaction() { | |
1140 Generation* scanning_gen = _gens[_n_gens-1]; | |
1141 // Start by compacting into same gen. | |
1142 CompactPoint cp(scanning_gen, NULL, NULL); | |
1143 while (scanning_gen != NULL) { | |
1144 scanning_gen->prepare_for_compaction(&cp); | |
1145 scanning_gen = prev_gen(scanning_gen); | |
1146 } | |
1147 } | |
1148 | |
1149 GCStats* GenCollectedHeap::gc_stats(int level) const { | |
1150 return _gens[level]->gc_stats(); | |
1151 } | |
1152 | |
1153 void GenCollectedHeap::verify(bool allow_dirty, bool silent) { | |
1154 if (!silent) { | |
1155 gclog_or_tty->print("permgen "); | |
1156 } | |
1157 perm_gen()->verify(allow_dirty); | |
1158 for (int i = _n_gens-1; i >= 0; i--) { | |
1159 Generation* g = _gens[i]; | |
1160 if (!silent) { | |
1161 gclog_or_tty->print(g->name()); | |
1162 gclog_or_tty->print(" "); | |
1163 } | |
1164 g->verify(allow_dirty); | |
1165 } | |
1166 if (!silent) { | |
1167 gclog_or_tty->print("remset "); | |
1168 } | |
1169 rem_set()->verify(); | |
1170 if (!silent) { | |
1171 gclog_or_tty->print("ref_proc "); | |
1172 } | |
1173 ReferenceProcessor::verify(); | |
1174 } | |
1175 | |
1176 void GenCollectedHeap::print() const { print_on(tty); } | |
1177 void GenCollectedHeap::print_on(outputStream* st) const { | |
1178 for (int i = 0; i < _n_gens; i++) { | |
1179 _gens[i]->print_on(st); | |
1180 } | |
1181 perm_gen()->print_on(st); | |
1182 } | |
1183 | |
1184 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
1185 if (workers() != NULL) { | |
1186 workers()->threads_do(tc); | |
1187 } | |
1188 #ifndef SERIALGC | |
1189 if (UseConcMarkSweepGC) { | |
1190 ConcurrentMarkSweepThread::threads_do(tc); | |
1191 } | |
1192 #endif // SERIALGC | |
1193 } | |
1194 | |
1195 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const { | |
1196 #ifndef SERIALGC | |
1197 if (UseParNewGC) { | |
1198 workers()->print_worker_threads_on(st); | |
1199 } | |
1200 if (UseConcMarkSweepGC) { | |
1201 ConcurrentMarkSweepThread::print_all_on(st); | |
1202 } | |
1203 #endif // SERIALGC | |
1204 } | |
1205 | |
1206 void GenCollectedHeap::print_tracing_info() const { | |
1207 if (TraceGen0Time) { | |
1208 get_gen(0)->print_summary_info(); | |
1209 } | |
1210 if (TraceGen1Time) { | |
1211 get_gen(1)->print_summary_info(); | |
1212 } | |
1213 } | |
1214 | |
1215 void GenCollectedHeap::print_heap_change(size_t prev_used) const { | |
1216 if (PrintGCDetails && Verbose) { | |
1217 gclog_or_tty->print(" " SIZE_FORMAT | |
1218 "->" SIZE_FORMAT | |
1219 "(" SIZE_FORMAT ")", | |
1220 prev_used, used(), capacity()); | |
1221 } else { | |
1222 gclog_or_tty->print(" " SIZE_FORMAT "K" | |
1223 "->" SIZE_FORMAT "K" | |
1224 "(" SIZE_FORMAT "K)", | |
1225 prev_used / K, used() / K, capacity() / K); | |
1226 } | |
1227 } | |
1228 | |
1229 //New method to print perm gen info with PrintGCDetails flag | |
1230 void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const { | |
1231 gclog_or_tty->print(", [%s :", perm_gen()->short_name()); | |
1232 perm_gen()->print_heap_change(perm_prev_used); | |
1233 gclog_or_tty->print("]"); | |
1234 } | |
1235 | |
1236 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure { | |
1237 private: | |
1238 bool _full; | |
1239 public: | |
1240 void do_generation(Generation* gen) { | |
1241 gen->gc_prologue(_full); | |
1242 } | |
1243 GenGCPrologueClosure(bool full) : _full(full) {}; | |
1244 }; | |
1245 | |
1246 void GenCollectedHeap::gc_prologue(bool full) { | |
1247 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); | |
1248 | |
1249 always_do_update_barrier = false; | |
1250 // Fill TLAB's and such | |
1251 CollectedHeap::accumulate_statistics_all_tlabs(); | |
1252 ensure_parsability(true); // retire TLABs | |
1253 | |
1254 // Call allocation profiler | |
1255 AllocationProfiler::iterate_since_last_gc(); | |
1256 // Walk generations | |
1257 GenGCPrologueClosure blk(full); | |
1258 generation_iterate(&blk, false); // not old-to-young. | |
1259 perm_gen()->gc_prologue(full); | |
1260 }; | |
1261 | |
1262 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure { | |
1263 private: | |
1264 bool _full; | |
1265 public: | |
1266 void do_generation(Generation* gen) { | |
1267 gen->gc_epilogue(_full); | |
1268 } | |
1269 GenGCEpilogueClosure(bool full) : _full(full) {}; | |
1270 }; | |
1271 | |
1272 void GenCollectedHeap::gc_epilogue(bool full) { | |
1273 // Remember if a partial collection of the heap failed, and | |
1274 // we did a complete collection. | |
1275 if (full && incremental_collection_will_fail()) { | |
1276 set_last_incremental_collection_failed(); | |
1277 } else { | |
1278 clear_last_incremental_collection_failed(); | |
1279 } | |
1280 // Clear the flag, if set; the generation gc_epilogues will set the | |
1281 // flag again if the condition persists despite the collection. | |
1282 clear_incremental_collection_will_fail(); | |
1283 | |
1284 #ifdef COMPILER2 | |
1285 assert(DerivedPointerTable::is_empty(), "derived pointer present"); | |
1286 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr())); | |
1287 guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps"); | |
1288 #endif /* COMPILER2 */ | |
1289 | |
1290 resize_all_tlabs(); | |
1291 | |
1292 GenGCEpilogueClosure blk(full); | |
1293 generation_iterate(&blk, false); // not old-to-young. | |
1294 perm_gen()->gc_epilogue(full); | |
1295 | |
1296 always_do_update_barrier = UseConcMarkSweepGC; | |
1297 }; | |
1298 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1299 #ifndef PRODUCT |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1300 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1301 private: |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1302 public: |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1303 void do_generation(Generation* gen) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1304 gen->record_spaces_top(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1305 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1306 }; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1307 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1308 void GenCollectedHeap::record_gen_tops_before_GC() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1309 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1310 GenGCSaveTopsBeforeGCClosure blk; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1311 generation_iterate(&blk, false); // not old-to-young. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1312 perm_gen()->record_spaces_top(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1313 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1314 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1315 #endif // not PRODUCT |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1316 |
0 | 1317 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure { |
1318 public: | |
1319 void do_generation(Generation* gen) { | |
1320 gen->ensure_parsability(); | |
1321 } | |
1322 }; | |
1323 | |
1324 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) { | |
1325 CollectedHeap::ensure_parsability(retire_tlabs); | |
1326 GenEnsureParsabilityClosure ep_cl; | |
1327 generation_iterate(&ep_cl, false); | |
1328 perm_gen()->ensure_parsability(); | |
1329 } | |
1330 | |
1331 oop GenCollectedHeap::handle_failed_promotion(Generation* gen, | |
1332 oop obj, | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1333 size_t obj_size) { |
0 | 1334 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
1335 HeapWord* result = NULL; | |
1336 | |
1337 // First give each higher generation a chance to allocate the promoted object. | |
1338 Generation* allocator = next_gen(gen); | |
1339 if (allocator != NULL) { | |
1340 do { | |
1341 result = allocator->allocate(obj_size, false); | |
1342 } while (result == NULL && (allocator = next_gen(allocator)) != NULL); | |
1343 } | |
1344 | |
1345 if (result == NULL) { | |
1346 // Then give gen and higher generations a chance to expand and allocate the | |
1347 // object. | |
1348 do { | |
1349 result = gen->expand_and_allocate(obj_size, false); | |
1350 } while (result == NULL && (gen = next_gen(gen)) != NULL); | |
1351 } | |
1352 | |
1353 if (result != NULL) { | |
1354 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); | |
1355 } | |
1356 return oop(result); | |
1357 } | |
1358 | |
1359 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure { | |
1360 jlong _time; // in ms | |
1361 jlong _now; // in ms | |
1362 | |
1363 public: | |
1364 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { } | |
1365 | |
1366 jlong time() { return _time; } | |
1367 | |
1368 void do_generation(Generation* gen) { | |
1369 _time = MIN2(_time, gen->time_of_last_gc(_now)); | |
1370 } | |
1371 }; | |
1372 | |
1373 jlong GenCollectedHeap::millis_since_last_gc() { | |
1374 jlong now = os::javaTimeMillis(); | |
1375 GenTimeOfLastGCClosure tolgc_cl(now); | |
1376 // iterate over generations getting the oldest | |
1377 // time that a generation was collected | |
1378 generation_iterate(&tolgc_cl, false); | |
1379 tolgc_cl.do_generation(perm_gen()); | |
1380 // XXX Despite the assert above, since javaTimeMillis() | |
1381 // doesnot guarantee monotonically increasing return | |
1382 // values (note, i didn't say "strictly monotonic"), | |
1383 // we need to guard against getting back a time | |
1384 // later than now. This should be fixed by basing | |
1385 // on someting like gethrtime() which guarantees | |
1386 // monotonicity. Note that cond_wait() is susceptible | |
1387 // to a similar problem, because its interface is | |
1388 // based on absolute time in the form of the | |
1389 // system time's notion of UCT. See also 4506635 | |
1390 // for yet another problem of similar nature. XXX | |
1391 jlong retVal = now - tolgc_cl.time(); | |
1392 if (retVal < 0) { | |
1393 NOT_PRODUCT(warning("time warp: %d", retVal);) | |
1394 return 0; | |
1395 } | |
1396 return retVal; | |
1397 } |