Mercurial > hg > truffle
annotate src/share/vm/memory/genCollectedHeap.cpp @ 1064:473cce303f13
6887571: Increase default heap config sizes
Summary: Apply modification of existing server heap size ergo to all collectors except CMS.
Reviewed-by: jmasa, ysr, xlu
author | phh |
---|---|
date | Wed, 28 Oct 2009 16:25:51 -0400 |
parents | 148e5441d916 |
children | 4e6abf09f540 7b0e9cba0307 |
rev | line source |
---|---|
0 | 1 /* |
880
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
2 * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_genCollectedHeap.cpp.incl" | |
27 | |
28 GenCollectedHeap* GenCollectedHeap::_gch; | |
29 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;) | |
30 | |
31 // The set of potentially parallel tasks in strong root scanning. | |
32 enum GCH_process_strong_roots_tasks { | |
33 // We probably want to parallelize both of these internally, but for now... | |
34 GCH_PS_younger_gens, | |
35 // Leave this one last. | |
36 GCH_PS_NumElements | |
37 }; | |
38 | |
39 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) : | |
40 SharedHeap(policy), | |
41 _gen_policy(policy), | |
42 _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)), | |
43 _full_collections_completed(0) | |
44 { | |
45 if (_gen_process_strong_tasks == NULL || | |
46 !_gen_process_strong_tasks->valid()) { | |
47 vm_exit_during_initialization("Failed necessary allocation."); | |
48 } | |
49 assert(policy != NULL, "Sanity check"); | |
50 _preloading_shared_classes = false; | |
51 } | |
52 | |
53 jint GenCollectedHeap::initialize() { | |
54 int i; | |
55 _n_gens = gen_policy()->number_of_generations(); | |
56 | |
57 // While there are no constraints in the GC code that HeapWordSize | |
58 // be any particular value, there are multiple other areas in the | |
59 // system which believe this to be true (e.g. oop->object_size in some | |
60 // cases incorrectly returns the size in wordSize units rather than | |
61 // HeapWordSize). | |
62 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
63 | |
64 // The heap must be at least as aligned as generations. | |
65 size_t alignment = Generation::GenGrain; | |
66 | |
67 _gen_specs = gen_policy()->generations(); | |
68 PermanentGenerationSpec *perm_gen_spec = | |
69 collector_policy()->permanent_generation(); | |
70 | |
71 // Make sure the sizes are all aligned. | |
72 for (i = 0; i < _n_gens; i++) { | |
73 _gen_specs[i]->align(alignment); | |
74 } | |
75 perm_gen_spec->align(alignment); | |
76 | |
77 // If we are dumping the heap, then allocate a wasted block of address | |
78 // space in order to push the heap to a lower address. This extra | |
79 // address range allows for other (or larger) libraries to be loaded | |
80 // without them occupying the space required for the shared spaces. | |
81 | |
82 if (DumpSharedSpaces) { | |
83 uintx reserved = 0; | |
84 uintx block_size = 64*1024*1024; | |
85 while (reserved < SharedDummyBlockSize) { | |
86 char* dummy = os::reserve_memory(block_size); | |
87 reserved += block_size; | |
88 } | |
89 } | |
90 | |
91 // Allocate space for the heap. | |
92 | |
93 char* heap_address; | |
94 size_t total_reserved = 0; | |
95 int n_covered_regions = 0; | |
96 ReservedSpace heap_rs(0); | |
97 | |
98 heap_address = allocate(alignment, perm_gen_spec, &total_reserved, | |
99 &n_covered_regions, &heap_rs); | |
100 | |
101 if (UseSharedSpaces) { | |
102 if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) { | |
103 if (heap_rs.is_reserved()) { | |
104 heap_rs.release(); | |
105 } | |
106 FileMapInfo* mapinfo = FileMapInfo::current_info(); | |
107 mapinfo->fail_continue("Unable to reserve shared region."); | |
108 allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions, | |
109 &heap_rs); | |
110 } | |
111 } | |
112 | |
113 if (!heap_rs.is_reserved()) { | |
114 vm_shutdown_during_initialization( | |
115 "Could not reserve enough space for object heap"); | |
116 return JNI_ENOMEM; | |
117 } | |
118 | |
119 _reserved = MemRegion((HeapWord*)heap_rs.base(), | |
120 (HeapWord*)(heap_rs.base() + heap_rs.size())); | |
121 | |
122 // It is important to do this in a way such that concurrent readers can't | |
123 // temporarily think somethings in the heap. (Seen this happen in asserts.) | |
124 _reserved.set_word_size(0); | |
125 _reserved.set_start((HeapWord*)heap_rs.base()); | |
126 size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size() | |
127 - perm_gen_spec->misc_code_size(); | |
128 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size)); | |
129 | |
130 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions); | |
131 set_barrier_set(rem_set()->bs()); | |
132 _gch = this; | |
133 | |
134 for (i = 0; i < _n_gens; i++) { | |
135 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), | |
136 UseSharedSpaces, UseSharedSpaces); | |
137 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set()); | |
138 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size()); | |
139 } | |
140 _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set()); | |
141 | |
142 clear_incremental_collection_will_fail(); | |
143 clear_last_incremental_collection_failed(); | |
144 | |
145 #ifndef SERIALGC | |
146 // If we are running CMS, create the collector responsible | |
147 // for collecting the CMS generations. | |
148 if (collector_policy()->is_concurrent_mark_sweep_policy()) { | |
149 bool success = create_cms_collector(); | |
150 if (!success) return JNI_ENOMEM; | |
151 } | |
152 #endif // SERIALGC | |
153 | |
154 return JNI_OK; | |
155 } | |
156 | |
157 | |
158 char* GenCollectedHeap::allocate(size_t alignment, | |
159 PermanentGenerationSpec* perm_gen_spec, | |
160 size_t* _total_reserved, | |
161 int* _n_covered_regions, | |
162 ReservedSpace* heap_rs){ | |
163 const char overflow_msg[] = "The size of the object heap + VM data exceeds " | |
164 "the maximum representable size"; | |
165 | |
166 // Now figure out the total size. | |
167 size_t total_reserved = 0; | |
168 int n_covered_regions = 0; | |
169 const size_t pageSize = UseLargePages ? | |
170 os::large_page_size() : os::vm_page_size(); | |
171 | |
172 for (int i = 0; i < _n_gens; i++) { | |
173 total_reserved += _gen_specs[i]->max_size(); | |
174 if (total_reserved < _gen_specs[i]->max_size()) { | |
175 vm_exit_during_initialization(overflow_msg); | |
176 } | |
177 n_covered_regions += _gen_specs[i]->n_covered_regions(); | |
178 } | |
179 assert(total_reserved % pageSize == 0, "Gen size"); | |
180 total_reserved += perm_gen_spec->max_size(); | |
181 assert(total_reserved % pageSize == 0, "Perm Gen size"); | |
182 | |
183 if (total_reserved < perm_gen_spec->max_size()) { | |
184 vm_exit_during_initialization(overflow_msg); | |
185 } | |
186 n_covered_regions += perm_gen_spec->n_covered_regions(); | |
187 | |
188 // Add the size of the data area which shares the same reserved area | |
189 // as the heap, but which is not actually part of the heap. | |
190 size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size(); | |
191 | |
192 total_reserved += s; | |
193 if (total_reserved < s) { | |
194 vm_exit_during_initialization(overflow_msg); | |
195 } | |
196 | |
197 if (UseLargePages) { | |
198 assert(total_reserved != 0, "total_reserved cannot be 0"); | |
199 total_reserved = round_to(total_reserved, os::large_page_size()); | |
200 if (total_reserved < os::large_page_size()) { | |
201 vm_exit_during_initialization(overflow_msg); | |
202 } | |
203 } | |
204 | |
205 // Calculate the address at which the heap must reside in order for | |
206 // the shared data to be at the required address. | |
207 | |
208 char* heap_address; | |
209 if (UseSharedSpaces) { | |
210 | |
211 // Calculate the address of the first word beyond the heap. | |
212 FileMapInfo* mapinfo = FileMapInfo::current_info(); | |
213 int lr = CompactingPermGenGen::n_regions - 1; | |
214 size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment); | |
215 heap_address = mapinfo->region_base(lr) + capacity; | |
216 | |
217 // Calculate the address of the first word of the heap. | |
218 heap_address -= total_reserved; | |
219 } else { | |
220 heap_address = NULL; // any address will do. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
221 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
222 heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
223 *_total_reserved = total_reserved; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
224 *_n_covered_regions = n_covered_regions; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
225 *heap_rs = ReservedHeapSpace(total_reserved, alignment, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
226 UseLargePages, heap_address); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
227 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
228 if (heap_address != NULL && !heap_rs->is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
229 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
230 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
231 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
232 heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
233 *heap_rs = ReservedHeapSpace(total_reserved, alignment, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
234 UseLargePages, heap_address); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
235 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
236 if (heap_address != NULL && !heap_rs->is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
237 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
238 heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
239 assert(heap_address == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
240 *heap_rs = ReservedHeapSpace(total_reserved, alignment, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
241 UseLargePages, heap_address); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
242 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
243 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
244 return heap_address; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
245 } |
0 | 246 } |
247 | |
248 *_total_reserved = total_reserved; | |
249 *_n_covered_regions = n_covered_regions; | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
196
diff
changeset
|
250 *heap_rs = ReservedHeapSpace(total_reserved, alignment, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
196
diff
changeset
|
251 UseLargePages, heap_address); |
0 | 252 |
253 return heap_address; | |
254 } | |
255 | |
256 | |
257 void GenCollectedHeap::post_initialize() { | |
258 SharedHeap::post_initialize(); | |
259 TwoGenerationCollectorPolicy *policy = | |
260 (TwoGenerationCollectorPolicy *)collector_policy(); | |
261 guarantee(policy->is_two_generation_policy(), "Illegal policy type"); | |
262 DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0); | |
263 assert(def_new_gen->kind() == Generation::DefNew || | |
264 def_new_gen->kind() == Generation::ParNew || | |
265 def_new_gen->kind() == Generation::ASParNew, | |
266 "Wrong generation kind"); | |
267 | |
268 Generation* old_gen = get_gen(1); | |
269 assert(old_gen->kind() == Generation::ConcurrentMarkSweep || | |
270 old_gen->kind() == Generation::ASConcurrentMarkSweep || | |
271 old_gen->kind() == Generation::MarkSweepCompact, | |
272 "Wrong generation kind"); | |
273 | |
274 policy->initialize_size_policy(def_new_gen->eden()->capacity(), | |
275 old_gen->capacity(), | |
276 def_new_gen->from()->capacity()); | |
277 policy->initialize_gc_policy_counters(); | |
278 } | |
279 | |
280 void GenCollectedHeap::ref_processing_init() { | |
281 SharedHeap::ref_processing_init(); | |
282 for (int i = 0; i < _n_gens; i++) { | |
283 _gens[i]->ref_processor_init(); | |
284 } | |
285 } | |
286 | |
287 size_t GenCollectedHeap::capacity() const { | |
288 size_t res = 0; | |
289 for (int i = 0; i < _n_gens; i++) { | |
290 res += _gens[i]->capacity(); | |
291 } | |
292 return res; | |
293 } | |
294 | |
295 size_t GenCollectedHeap::used() const { | |
296 size_t res = 0; | |
297 for (int i = 0; i < _n_gens; i++) { | |
298 res += _gens[i]->used(); | |
299 } | |
300 return res; | |
301 } | |
302 | |
303 // Save the "used_region" for generations level and lower, | |
304 // and, if perm is true, for perm gen. | |
305 void GenCollectedHeap::save_used_regions(int level, bool perm) { | |
306 assert(level < _n_gens, "Illegal level parameter"); | |
307 for (int i = level; i >= 0; i--) { | |
308 _gens[i]->save_used_region(); | |
309 } | |
310 if (perm) { | |
311 perm_gen()->save_used_region(); | |
312 } | |
313 } | |
314 | |
315 size_t GenCollectedHeap::max_capacity() const { | |
316 size_t res = 0; | |
317 for (int i = 0; i < _n_gens; i++) { | |
318 res += _gens[i]->max_capacity(); | |
319 } | |
320 return res; | |
321 } | |
322 | |
323 // Update the _full_collections_completed counter | |
324 // at the end of a stop-world full GC. | |
325 unsigned int GenCollectedHeap::update_full_collections_completed() { | |
326 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); | |
327 assert(_full_collections_completed <= _total_full_collections, | |
328 "Can't complete more collections than were started"); | |
329 _full_collections_completed = _total_full_collections; | |
330 ml.notify_all(); | |
331 return _full_collections_completed; | |
332 } | |
333 | |
334 // Update the _full_collections_completed counter, as appropriate, | |
335 // at the end of a concurrent GC cycle. Note the conditional update | |
336 // below to allow this method to be called by a concurrent collector | |
337 // without synchronizing in any manner with the VM thread (which | |
338 // may already have initiated a STW full collection "concurrently"). | |
339 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) { | |
340 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); | |
341 assert((_full_collections_completed <= _total_full_collections) && | |
342 (count <= _total_full_collections), | |
343 "Can't complete more collections than were started"); | |
344 if (count > _full_collections_completed) { | |
345 _full_collections_completed = count; | |
346 ml.notify_all(); | |
347 } | |
348 return _full_collections_completed; | |
349 } | |
350 | |
351 | |
352 #ifndef PRODUCT | |
353 // Override of memory state checking method in CollectedHeap: | |
354 // Some collectors (CMS for example) can't have badHeapWordVal written | |
355 // in the first two words of an object. (For instance , in the case of | |
356 // CMS these words hold state used to synchronize between certain | |
357 // (concurrent) GC steps and direct allocating mutators.) | |
358 // The skip_header_HeapWords() method below, allows us to skip | |
359 // over the requisite number of HeapWord's. Note that (for | |
360 // generational collectors) this means that those many words are | |
361 // skipped in each object, irrespective of the generation in which | |
362 // that object lives. The resultant loss of precision seems to be | |
363 // harmless and the pain of avoiding that imprecision appears somewhat | |
364 // higher than we are prepared to pay for such rudimentary debugging | |
365 // support. | |
366 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, | |
367 size_t size) { | |
368 if (CheckMemoryInitialization && ZapUnusedHeapArea) { | |
369 // We are asked to check a size in HeapWords, | |
370 // but the memory is mangled in juint words. | |
371 juint* start = (juint*) (addr + skip_header_HeapWords()); | |
372 juint* end = (juint*) (addr + size); | |
373 for (juint* slot = start; slot < end; slot += 1) { | |
374 assert(*slot == badHeapWordVal, | |
375 "Found non badHeapWordValue in pre-allocation check"); | |
376 } | |
377 } | |
378 } | |
379 #endif | |
380 | |
381 HeapWord* GenCollectedHeap::attempt_allocation(size_t size, | |
382 bool is_tlab, | |
383 bool first_only) { | |
384 HeapWord* res; | |
385 for (int i = 0; i < _n_gens; i++) { | |
386 if (_gens[i]->should_allocate(size, is_tlab)) { | |
387 res = _gens[i]->allocate(size, is_tlab); | |
388 if (res != NULL) return res; | |
389 else if (first_only) break; | |
390 } | |
391 } | |
392 // Otherwise... | |
393 return NULL; | |
394 } | |
395 | |
396 HeapWord* GenCollectedHeap::mem_allocate(size_t size, | |
397 bool is_large_noref, | |
398 bool is_tlab, | |
399 bool* gc_overhead_limit_was_exceeded) { | |
400 return collector_policy()->mem_allocate_work(size, | |
401 is_tlab, | |
402 gc_overhead_limit_was_exceeded); | |
403 } | |
404 | |
405 bool GenCollectedHeap::must_clear_all_soft_refs() { | |
406 return _gc_cause == GCCause::_last_ditch_collection; | |
407 } | |
408 | |
409 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { | |
410 return (cause == GCCause::_java_lang_system_gc || | |
411 cause == GCCause::_gc_locker) && | |
412 UseConcMarkSweepGC && ExplicitGCInvokesConcurrent; | |
413 } | |
414 | |
415 void GenCollectedHeap::do_collection(bool full, | |
416 bool clear_all_soft_refs, | |
417 size_t size, | |
418 bool is_tlab, | |
419 int max_level) { | |
420 bool prepared_for_verification = false; | |
421 ResourceMark rm; | |
422 DEBUG_ONLY(Thread* my_thread = Thread::current();) | |
423 | |
424 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
425 assert(my_thread->is_VM_thread() || | |
426 my_thread->is_ConcurrentGC_thread(), | |
427 "incorrect thread type capability"); | |
428 assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock"); | |
429 guarantee(!is_gc_active(), "collection is not reentrant"); | |
430 assert(max_level < n_gens(), "sanity check"); | |
431 | |
432 if (GC_locker::check_active_before_gc()) { | |
433 return; // GC is disabled (e.g. JNI GetXXXCritical operation) | |
434 } | |
435 | |
436 const size_t perm_prev_used = perm_gen()->used(); | |
437 | |
438 if (PrintHeapAtGC) { | |
439 Universe::print_heap_before_gc(); | |
440 if (Verbose) { | |
441 gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause())); | |
442 } | |
443 } | |
444 | |
445 { | |
446 FlagSetting fl(_is_gc_active, true); | |
447 | |
448 bool complete = full && (max_level == (n_gens()-1)); | |
449 const char* gc_cause_str = "GC "; | |
450 if (complete) { | |
451 GCCause::Cause cause = gc_cause(); | |
452 if (cause == GCCause::_java_lang_system_gc) { | |
453 gc_cause_str = "Full GC (System) "; | |
454 } else { | |
455 gc_cause_str = "Full GC "; | |
456 } | |
457 } | |
458 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
459 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
460 TraceTime t(gc_cause_str, PrintGCDetails, false, gclog_or_tty); | |
461 | |
462 gc_prologue(complete); | |
463 increment_total_collections(complete); | |
464 | |
465 size_t gch_prev_used = used(); | |
466 | |
467 int starting_level = 0; | |
468 if (full) { | |
469 // Search for the oldest generation which will collect all younger | |
470 // generations, and start collection loop there. | |
471 for (int i = max_level; i >= 0; i--) { | |
472 if (_gens[i]->full_collects_younger_generations()) { | |
473 starting_level = i; | |
474 break; | |
475 } | |
476 } | |
477 } | |
478 | |
479 bool must_restore_marks_for_biased_locking = false; | |
480 | |
481 int max_level_collected = starting_level; | |
482 for (int i = starting_level; i <= max_level; i++) { | |
483 if (_gens[i]->should_collect(full, size, is_tlab)) { | |
880
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
484 if (i == n_gens() - 1) { // a major collection is to happen |
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
485 if (!complete) { |
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
486 // The full_collections increment was missed above. |
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
487 increment_total_full_collections(); |
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
488 } |
615
c6c601a0f2d6
6797870: Add -XX:+{HeapDump,PrintClassHistogram}{Before,After}FullGC
ysr
parents:
546
diff
changeset
|
489 pre_full_gc_dump(); // do any pre full gc dumps |
880
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
490 } |
0 | 491 // Timer for individual generations. Last argument is false: no CR |
492 TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty); | |
493 TraceCollectorStats tcs(_gens[i]->counters()); | |
494 TraceMemoryManagerStats tmms(_gens[i]->kind()); | |
495 | |
496 size_t prev_used = _gens[i]->used(); | |
497 _gens[i]->stat_record()->invocations++; | |
498 _gens[i]->stat_record()->accumulated_time.start(); | |
499 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
500 // Must be done anew before each collection because |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
501 // a previous collection will do mangling and will |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
502 // change top of some spaces. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
503 record_gen_tops_before_GC(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
504 |
0 | 505 if (PrintGC && Verbose) { |
506 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, | |
507 i, | |
508 _gens[i]->stat_record()->invocations, | |
509 size*HeapWordSize); | |
510 } | |
511 | |
512 if (VerifyBeforeGC && i >= VerifyGCLevel && | |
513 total_collections() >= VerifyGCStartAt) { | |
514 HandleMark hm; // Discard invalid handles created during verification | |
515 if (!prepared_for_verification) { | |
516 prepare_for_verify(); | |
517 prepared_for_verification = true; | |
518 } | |
519 gclog_or_tty->print(" VerifyBeforeGC:"); | |
520 Universe::verify(true); | |
521 } | |
522 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
523 | |
524 if (!must_restore_marks_for_biased_locking && | |
525 _gens[i]->performs_in_place_marking()) { | |
526 // We perform this mark word preservation work lazily | |
527 // because it's only at this point that we know whether we | |
528 // absolutely have to do it; we want to avoid doing it for | |
529 // scavenge-only collections where it's unnecessary | |
530 must_restore_marks_for_biased_locking = true; | |
531 BiasedLocking::preserve_marks(); | |
532 } | |
533 | |
534 // Do collection work | |
535 { | |
536 // Note on ref discovery: For what appear to be historical reasons, | |
537 // GCH enables and disabled (by enqueing) refs discovery. | |
538 // In the future this should be moved into the generation's | |
539 // collect method so that ref discovery and enqueueing concerns | |
540 // are local to a generation. The collect method could return | |
541 // an appropriate indication in the case that notification on | |
542 // the ref lock was needed. This will make the treatment of | |
543 // weak refs more uniform (and indeed remove such concerns | |
544 // from GCH). XXX | |
545 | |
546 HandleMark hm; // Discard invalid handles created during gc | |
547 save_marks(); // save marks for all gens | |
548 // We want to discover references, but not process them yet. | |
549 // This mode is disabled in process_discovered_references if the | |
550 // generation does some collection work, or in | |
551 // enqueue_discovered_references if the generation returns | |
552 // without doing any work. | |
553 ReferenceProcessor* rp = _gens[i]->ref_processor(); | |
554 // If the discovery of ("weak") refs in this generation is | |
555 // atomic wrt other collectors in this configuration, we | |
556 // are guaranteed to have empty discovered ref lists. | |
557 if (rp->discovery_is_atomic()) { | |
558 rp->verify_no_references_recorded(); | |
559 rp->enable_discovery(); | |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
560 rp->setup_policy(clear_all_soft_refs); |
0 | 561 } else { |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
269
diff
changeset
|
562 // collect() below will enable discovery as appropriate |
0 | 563 } |
564 _gens[i]->collect(full, clear_all_soft_refs, size, is_tlab); | |
565 if (!rp->enqueuing_is_done()) { | |
566 rp->enqueue_discovered_references(); | |
567 } else { | |
568 rp->set_enqueuing_is_done(false); | |
569 } | |
570 rp->verify_no_references_recorded(); | |
571 } | |
572 max_level_collected = i; | |
573 | |
574 // Determine if allocation request was met. | |
575 if (size > 0) { | |
576 if (!is_tlab || _gens[i]->supports_tlab_allocation()) { | |
577 if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) { | |
578 size = 0; | |
579 } | |
580 } | |
581 } | |
582 | |
583 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
584 | |
585 _gens[i]->stat_record()->accumulated_time.stop(); | |
586 | |
587 update_gc_stats(i, full); | |
588 | |
589 if (VerifyAfterGC && i >= VerifyGCLevel && | |
590 total_collections() >= VerifyGCStartAt) { | |
591 HandleMark hm; // Discard invalid handles created during verification | |
592 gclog_or_tty->print(" VerifyAfterGC:"); | |
593 Universe::verify(false); | |
594 } | |
595 | |
596 if (PrintGCDetails) { | |
597 gclog_or_tty->print(":"); | |
598 _gens[i]->print_heap_change(prev_used); | |
599 } | |
600 } | |
601 } | |
602 | |
603 // Update "complete" boolean wrt what actually transpired -- | |
604 // for instance, a promotion failure could have led to | |
605 // a whole heap collection. | |
606 complete = complete || (max_level_collected == n_gens() - 1); | |
607 | |
615
c6c601a0f2d6
6797870: Add -XX:+{HeapDump,PrintClassHistogram}{Before,After}FullGC
ysr
parents:
546
diff
changeset
|
608 if (complete) { // We did a "major" collection |
c6c601a0f2d6
6797870: Add -XX:+{HeapDump,PrintClassHistogram}{Before,After}FullGC
ysr
parents:
546
diff
changeset
|
609 post_full_gc_dump(); // do any post full gc dumps |
c6c601a0f2d6
6797870: Add -XX:+{HeapDump,PrintClassHistogram}{Before,After}FullGC
ysr
parents:
546
diff
changeset
|
610 } |
c6c601a0f2d6
6797870: Add -XX:+{HeapDump,PrintClassHistogram}{Before,After}FullGC
ysr
parents:
546
diff
changeset
|
611 |
0 | 612 if (PrintGCDetails) { |
613 print_heap_change(gch_prev_used); | |
614 | |
615 // Print perm gen info for full GC with PrintGCDetails flag. | |
616 if (complete) { | |
617 print_perm_heap_change(perm_prev_used); | |
618 } | |
619 } | |
620 | |
621 for (int j = max_level_collected; j >= 0; j -= 1) { | |
622 // Adjust generation sizes. | |
623 _gens[j]->compute_new_size(); | |
624 } | |
625 | |
626 if (complete) { | |
627 // Ask the permanent generation to adjust size for full collections | |
628 perm()->compute_new_size(); | |
629 update_full_collections_completed(); | |
630 } | |
631 | |
632 // Track memory usage and detect low memory after GC finishes | |
633 MemoryService::track_memory_usage(); | |
634 | |
635 gc_epilogue(complete); | |
636 | |
637 if (must_restore_marks_for_biased_locking) { | |
638 BiasedLocking::restore_marks(); | |
639 } | |
640 } | |
641 | |
642 AdaptiveSizePolicy* sp = gen_policy()->size_policy(); | |
643 AdaptiveSizePolicyOutput(sp, total_collections()); | |
644 | |
645 if (PrintHeapAtGC) { | |
646 Universe::print_heap_after_gc(); | |
647 } | |
648 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
517
diff
changeset
|
649 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
517
diff
changeset
|
650 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
517
diff
changeset
|
651 #endif |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
517
diff
changeset
|
652 |
0 | 653 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
654 tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); | |
655 vm_exit(-1); | |
656 } | |
657 } | |
658 | |
659 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) { | |
660 return collector_policy()->satisfy_failed_allocation(size, is_tlab); | |
661 } | |
662 | |
663 void GenCollectedHeap::set_par_threads(int t) { | |
664 SharedHeap::set_par_threads(t); | |
665 _gen_process_strong_tasks->set_par_threads(t); | |
666 } | |
667 | |
668 class AssertIsPermClosure: public OopClosure { | |
669 public: | |
670 void do_oop(oop* p) { | |
671 assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm."); | |
672 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
673 void do_oop(narrowOop* p) { ShouldNotReachHere(); } |
0 | 674 }; |
675 static AssertIsPermClosure assert_is_perm_closure; | |
676 | |
677 void GenCollectedHeap:: | |
678 gen_process_strong_roots(int level, | |
679 bool younger_gens_as_roots, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
680 bool activate_scope, |
0 | 681 bool collecting_perm_gen, |
682 SharedHeap::ScanningOption so, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
683 OopsInGenClosure* not_older_gens, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
684 bool do_code_roots, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
685 OopsInGenClosure* older_gens) { |
0 | 686 // General strong roots. |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
687 |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
688 if (!do_code_roots) { |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
689 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
690 not_older_gens, NULL, older_gens); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
691 } else { |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
692 bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active()); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
693 CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
694 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
695 not_older_gens, &code_roots, older_gens); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
696 } |
0 | 697 |
698 if (younger_gens_as_roots) { | |
699 if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { | |
700 for (int i = 0; i < level; i++) { | |
701 not_older_gens->set_generation(_gens[i]); | |
702 _gens[i]->oop_iterate(not_older_gens); | |
703 } | |
704 not_older_gens->reset_generation(); | |
705 } | |
706 } | |
707 // When collection is parallel, all threads get to cooperate to do | |
708 // older-gen scanning. | |
709 for (int i = level+1; i < _n_gens; i++) { | |
710 older_gens->set_generation(_gens[i]); | |
711 rem_set()->younger_refs_iterate(_gens[i], older_gens); | |
712 older_gens->reset_generation(); | |
713 } | |
714 | |
715 _gen_process_strong_tasks->all_tasks_completed(); | |
716 } | |
717 | |
718 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
719 CodeBlobClosure* code_roots, |
0 | 720 OopClosure* non_root_closure) { |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
721 SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure); |
0 | 722 // "Local" "weak" refs |
723 for (int i = 0; i < _n_gens; i++) { | |
724 _gens[i]->ref_processor()->weak_oops_do(root_closure); | |
725 } | |
726 } | |
727 | |
728 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ | |
729 void GenCollectedHeap:: \ | |
730 oop_since_save_marks_iterate(int level, \ | |
731 OopClosureType* cur, \ | |
732 OopClosureType* older) { \ | |
733 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \ | |
734 for (int i = level+1; i < n_gens(); i++) { \ | |
735 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \ | |
736 } \ | |
737 perm_gen()->oop_since_save_marks_iterate##nv_suffix(older); \ | |
738 } | |
739 | |
740 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) | |
741 | |
742 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN | |
743 | |
744 bool GenCollectedHeap::no_allocs_since_save_marks(int level) { | |
745 for (int i = level; i < _n_gens; i++) { | |
746 if (!_gens[i]->no_allocs_since_save_marks()) return false; | |
747 } | |
748 return perm_gen()->no_allocs_since_save_marks(); | |
749 } | |
750 | |
751 bool GenCollectedHeap::supports_inline_contig_alloc() const { | |
752 return _gens[0]->supports_inline_contig_alloc(); | |
753 } | |
754 | |
755 HeapWord** GenCollectedHeap::top_addr() const { | |
756 return _gens[0]->top_addr(); | |
757 } | |
758 | |
759 HeapWord** GenCollectedHeap::end_addr() const { | |
760 return _gens[0]->end_addr(); | |
761 } | |
762 | |
763 size_t GenCollectedHeap::unsafe_max_alloc() { | |
764 return _gens[0]->unsafe_max_alloc_nogc(); | |
765 } | |
766 | |
767 // public collection interfaces | |
768 | |
769 void GenCollectedHeap::collect(GCCause::Cause cause) { | |
770 if (should_do_concurrent_full_gc(cause)) { | |
771 #ifndef SERIALGC | |
772 // mostly concurrent full collection | |
773 collect_mostly_concurrent(cause); | |
774 #else // SERIALGC | |
775 ShouldNotReachHere(); | |
776 #endif // SERIALGC | |
777 } else { | |
778 #ifdef ASSERT | |
779 if (cause == GCCause::_scavenge_alot) { | |
780 // minor collection only | |
781 collect(cause, 0); | |
782 } else { | |
783 // Stop-the-world full collection | |
784 collect(cause, n_gens() - 1); | |
785 } | |
786 #else | |
787 // Stop-the-world full collection | |
788 collect(cause, n_gens() - 1); | |
789 #endif | |
790 } | |
791 } | |
792 | |
793 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) { | |
794 // The caller doesn't have the Heap_lock | |
795 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | |
796 MutexLocker ml(Heap_lock); | |
797 collect_locked(cause, max_level); | |
798 } | |
799 | |
800 // This interface assumes that it's being called by the | |
801 // vm thread. It collects the heap assuming that the | |
802 // heap lock is already held and that we are executing in | |
803 // the context of the vm thread. | |
804 void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { | |
805 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
806 assert(Heap_lock->is_locked(), "Precondition#2"); | |
807 GCCauseSetter gcs(this, cause); | |
808 switch (cause) { | |
809 case GCCause::_heap_inspection: | |
810 case GCCause::_heap_dump: { | |
811 HandleMark hm; | |
812 do_full_collection(false, // don't clear all soft refs | |
813 n_gens() - 1); | |
814 break; | |
815 } | |
816 default: // XXX FIX ME | |
817 ShouldNotReachHere(); // Unexpected use of this function | |
818 } | |
819 } | |
820 | |
821 void GenCollectedHeap::collect_locked(GCCause::Cause cause) { | |
822 // The caller has the Heap_lock | |
823 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); | |
824 collect_locked(cause, n_gens() - 1); | |
825 } | |
826 | |
827 // this is the private collection interface | |
828 // The Heap_lock is expected to be held on entry. | |
829 | |
830 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { | |
831 if (_preloading_shared_classes) { | |
832 warning("\nThe permanent generation is not large enough to preload " | |
833 "requested classes.\nUse -XX:PermSize= to increase the initial " | |
834 "size of the permanent generation.\n"); | |
835 vm_exit(2); | |
836 } | |
837 // Read the GC count while holding the Heap_lock | |
838 unsigned int gc_count_before = total_collections(); | |
839 unsigned int full_gc_count_before = total_full_collections(); | |
840 { | |
841 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
842 VM_GenCollectFull op(gc_count_before, full_gc_count_before, | |
843 cause, max_level); | |
844 VMThread::execute(&op); | |
845 } | |
846 } | |
847 | |
848 #ifndef SERIALGC | |
849 bool GenCollectedHeap::create_cms_collector() { | |
850 | |
851 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) || | |
852 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) && | |
853 _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep, | |
854 "Unexpected generation kinds"); | |
855 // Skip two header words in the block content verification | |
856 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) | |
857 CMSCollector* collector = new CMSCollector( | |
858 (ConcurrentMarkSweepGeneration*)_gens[1], | |
859 (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(), | |
860 _rem_set->as_CardTableRS(), | |
861 (ConcurrentMarkSweepPolicy*) collector_policy()); | |
862 | |
863 if (collector == NULL || !collector->completed_initialization()) { | |
864 if (collector) { | |
865 delete collector; // Be nice in embedded situation | |
866 } | |
867 vm_shutdown_during_initialization("Could not create CMS collector"); | |
868 return false; | |
869 } | |
870 return true; // success | |
871 } | |
872 | |
873 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) { | |
874 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock"); | |
875 | |
876 MutexLocker ml(Heap_lock); | |
877 // Read the GC counts while holding the Heap_lock | |
878 unsigned int full_gc_count_before = total_full_collections(); | |
879 unsigned int gc_count_before = total_collections(); | |
880 { | |
881 MutexUnlocker mu(Heap_lock); | |
882 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause); | |
883 VMThread::execute(&op); | |
884 } | |
885 } | |
886 #endif // SERIALGC | |
887 | |
888 | |
889 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, | |
890 int max_level) { | |
891 int local_max_level; | |
892 if (!incremental_collection_will_fail() && | |
893 gc_cause() == GCCause::_gc_locker) { | |
894 local_max_level = 0; | |
895 } else { | |
896 local_max_level = max_level; | |
897 } | |
898 | |
899 do_collection(true /* full */, | |
900 clear_all_soft_refs /* clear_all_soft_refs */, | |
901 0 /* size */, | |
902 false /* is_tlab */, | |
903 local_max_level /* max_level */); | |
904 // Hack XXX FIX ME !!! | |
905 // A scavenge may not have been attempted, or may have | |
906 // been attempted and failed, because the old gen was too full | |
907 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && | |
908 incremental_collection_will_fail()) { | |
909 if (PrintGCDetails) { | |
910 gclog_or_tty->print_cr("GC locker: Trying a full collection " | |
911 "because scavenge failed"); | |
912 } | |
913 // This time allow the old gen to be collected as well | |
914 do_collection(true /* full */, | |
915 clear_all_soft_refs /* clear_all_soft_refs */, | |
916 0 /* size */, | |
917 false /* is_tlab */, | |
918 n_gens() - 1 /* max_level */); | |
919 } | |
920 } | |
921 | |
922 // Returns "TRUE" iff "p" points into the allocated area of the heap. | |
923 bool GenCollectedHeap::is_in(const void* p) const { | |
924 #ifndef ASSERT | |
925 guarantee(VerifyBeforeGC || | |
926 VerifyDuringGC || | |
927 VerifyBeforeExit || | |
928 VerifyAfterGC, "too expensive"); | |
929 #endif | |
930 // This might be sped up with a cache of the last generation that | |
931 // answered yes. | |
932 for (int i = 0; i < _n_gens; i++) { | |
933 if (_gens[i]->is_in(p)) return true; | |
934 } | |
935 if (_perm_gen->as_gen()->is_in(p)) return true; | |
936 // Otherwise... | |
937 return false; | |
938 } | |
939 | |
940 // Returns "TRUE" iff "p" points into the allocated area of the heap. | |
941 bool GenCollectedHeap::is_in_youngest(void* p) { | |
942 return _gens[0]->is_in(p); | |
943 } | |
944 | |
945 void GenCollectedHeap::oop_iterate(OopClosure* cl) { | |
946 for (int i = 0; i < _n_gens; i++) { | |
947 _gens[i]->oop_iterate(cl); | |
948 } | |
949 } | |
950 | |
951 void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) { | |
952 for (int i = 0; i < _n_gens; i++) { | |
953 _gens[i]->oop_iterate(mr, cl); | |
954 } | |
955 } | |
956 | |
957 void GenCollectedHeap::object_iterate(ObjectClosure* cl) { | |
958 for (int i = 0; i < _n_gens; i++) { | |
959 _gens[i]->object_iterate(cl); | |
960 } | |
961 perm_gen()->object_iterate(cl); | |
962 } | |
963 | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
964 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
965 for (int i = 0; i < _n_gens; i++) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
966 _gens[i]->safe_object_iterate(cl); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
967 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
968 perm_gen()->safe_object_iterate(cl); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
969 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
970 |
0 | 971 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { |
972 for (int i = 0; i < _n_gens; i++) { | |
973 _gens[i]->object_iterate_since_last_GC(cl); | |
974 } | |
975 } | |
976 | |
977 Space* GenCollectedHeap::space_containing(const void* addr) const { | |
978 for (int i = 0; i < _n_gens; i++) { | |
979 Space* res = _gens[i]->space_containing(addr); | |
980 if (res != NULL) return res; | |
981 } | |
982 Space* res = perm_gen()->space_containing(addr); | |
983 if (res != NULL) return res; | |
984 // Otherwise... | |
985 assert(false, "Could not find containing space"); | |
986 return NULL; | |
987 } | |
988 | |
989 | |
990 HeapWord* GenCollectedHeap::block_start(const void* addr) const { | |
991 assert(is_in_reserved(addr), "block_start of address outside of heap"); | |
992 for (int i = 0; i < _n_gens; i++) { | |
993 if (_gens[i]->is_in_reserved(addr)) { | |
994 assert(_gens[i]->is_in(addr), | |
995 "addr should be in allocated part of generation"); | |
996 return _gens[i]->block_start(addr); | |
997 } | |
998 } | |
999 if (perm_gen()->is_in_reserved(addr)) { | |
1000 assert(perm_gen()->is_in(addr), | |
1001 "addr should be in allocated part of perm gen"); | |
1002 return perm_gen()->block_start(addr); | |
1003 } | |
1004 assert(false, "Some generation should contain the address"); | |
1005 return NULL; | |
1006 } | |
1007 | |
1008 size_t GenCollectedHeap::block_size(const HeapWord* addr) const { | |
1009 assert(is_in_reserved(addr), "block_size of address outside of heap"); | |
1010 for (int i = 0; i < _n_gens; i++) { | |
1011 if (_gens[i]->is_in_reserved(addr)) { | |
1012 assert(_gens[i]->is_in(addr), | |
1013 "addr should be in allocated part of generation"); | |
1014 return _gens[i]->block_size(addr); | |
1015 } | |
1016 } | |
1017 if (perm_gen()->is_in_reserved(addr)) { | |
1018 assert(perm_gen()->is_in(addr), | |
1019 "addr should be in allocated part of perm gen"); | |
1020 return perm_gen()->block_size(addr); | |
1021 } | |
1022 assert(false, "Some generation should contain the address"); | |
1023 return 0; | |
1024 } | |
1025 | |
1026 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { | |
1027 assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); | |
1028 assert(block_start(addr) == addr, "addr must be a block start"); | |
1029 for (int i = 0; i < _n_gens; i++) { | |
1030 if (_gens[i]->is_in_reserved(addr)) { | |
1031 return _gens[i]->block_is_obj(addr); | |
1032 } | |
1033 } | |
1034 if (perm_gen()->is_in_reserved(addr)) { | |
1035 return perm_gen()->block_is_obj(addr); | |
1036 } | |
1037 assert(false, "Some generation should contain the address"); | |
1038 return false; | |
1039 } | |
1040 | |
1041 bool GenCollectedHeap::supports_tlab_allocation() const { | |
1042 for (int i = 0; i < _n_gens; i += 1) { | |
1043 if (_gens[i]->supports_tlab_allocation()) { | |
1044 return true; | |
1045 } | |
1046 } | |
1047 return false; | |
1048 } | |
1049 | |
1050 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { | |
1051 size_t result = 0; | |
1052 for (int i = 0; i < _n_gens; i += 1) { | |
1053 if (_gens[i]->supports_tlab_allocation()) { | |
1054 result += _gens[i]->tlab_capacity(); | |
1055 } | |
1056 } | |
1057 return result; | |
1058 } | |
1059 | |
1060 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { | |
1061 size_t result = 0; | |
1062 for (int i = 0; i < _n_gens; i += 1) { | |
1063 if (_gens[i]->supports_tlab_allocation()) { | |
1064 result += _gens[i]->unsafe_max_tlab_alloc(); | |
1065 } | |
1066 } | |
1067 return result; | |
1068 } | |
1069 | |
1070 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { | |
1071 bool gc_overhead_limit_was_exceeded; | |
1072 HeapWord* result = mem_allocate(size /* size */, | |
1073 false /* is_large_noref */, | |
1074 true /* is_tlab */, | |
1075 &gc_overhead_limit_was_exceeded); | |
1076 return result; | |
1077 } | |
1078 | |
1079 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size | |
1080 // from the list headed by "*prev_ptr". | |
1081 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) { | |
1082 bool first = true; | |
1083 size_t min_size = 0; // "first" makes this conceptually infinite. | |
1084 ScratchBlock **smallest_ptr, *smallest; | |
1085 ScratchBlock *cur = *prev_ptr; | |
1086 while (cur) { | |
1087 assert(*prev_ptr == cur, "just checking"); | |
1088 if (first || cur->num_words < min_size) { | |
1089 smallest_ptr = prev_ptr; | |
1090 smallest = cur; | |
1091 min_size = smallest->num_words; | |
1092 first = false; | |
1093 } | |
1094 prev_ptr = &cur->next; | |
1095 cur = cur->next; | |
1096 } | |
1097 smallest = *smallest_ptr; | |
1098 *smallest_ptr = smallest->next; | |
1099 return smallest; | |
1100 } | |
1101 | |
1102 // Sort the scratch block list headed by res into decreasing size order, | |
1103 // and set "res" to the result. | |
1104 static void sort_scratch_list(ScratchBlock*& list) { | |
1105 ScratchBlock* sorted = NULL; | |
1106 ScratchBlock* unsorted = list; | |
1107 while (unsorted) { | |
1108 ScratchBlock *smallest = removeSmallestScratch(&unsorted); | |
1109 smallest->next = sorted; | |
1110 sorted = smallest; | |
1111 } | |
1112 list = sorted; | |
1113 } | |
1114 | |
1115 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, | |
1116 size_t max_alloc_words) { | |
1117 ScratchBlock* res = NULL; | |
1118 for (int i = 0; i < _n_gens; i++) { | |
1119 _gens[i]->contribute_scratch(res, requestor, max_alloc_words); | |
1120 } | |
1121 sort_scratch_list(res); | |
1122 return res; | |
1123 } | |
1124 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1125 void GenCollectedHeap::release_scratch() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1126 for (int i = 0; i < _n_gens; i++) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1127 _gens[i]->reset_scratch(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1128 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1129 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1130 |
0 | 1131 size_t GenCollectedHeap::large_typearray_limit() { |
1132 return gen_policy()->large_typearray_limit(); | |
1133 } | |
1134 | |
1135 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { | |
1136 void do_generation(Generation* gen) { | |
1137 gen->prepare_for_verify(); | |
1138 } | |
1139 }; | |
1140 | |
1141 void GenCollectedHeap::prepare_for_verify() { | |
1142 ensure_parsability(false); // no need to retire TLABs | |
1143 GenPrepareForVerifyClosure blk; | |
1144 generation_iterate(&blk, false); | |
1145 perm_gen()->prepare_for_verify(); | |
1146 } | |
1147 | |
1148 | |
1149 void GenCollectedHeap::generation_iterate(GenClosure* cl, | |
1150 bool old_to_young) { | |
1151 if (old_to_young) { | |
1152 for (int i = _n_gens-1; i >= 0; i--) { | |
1153 cl->do_generation(_gens[i]); | |
1154 } | |
1155 } else { | |
1156 for (int i = 0; i < _n_gens; i++) { | |
1157 cl->do_generation(_gens[i]); | |
1158 } | |
1159 } | |
1160 } | |
1161 | |
1162 void GenCollectedHeap::space_iterate(SpaceClosure* cl) { | |
1163 for (int i = 0; i < _n_gens; i++) { | |
1164 _gens[i]->space_iterate(cl, true); | |
1165 } | |
1166 perm_gen()->space_iterate(cl, true); | |
1167 } | |
1168 | |
1169 bool GenCollectedHeap::is_maximal_no_gc() const { | |
1170 for (int i = 0; i < _n_gens; i++) { // skip perm gen | |
1171 if (!_gens[i]->is_maximal_no_gc()) { | |
1172 return false; | |
1173 } | |
1174 } | |
1175 return true; | |
1176 } | |
1177 | |
1178 void GenCollectedHeap::save_marks() { | |
1179 for (int i = 0; i < _n_gens; i++) { | |
1180 _gens[i]->save_marks(); | |
1181 } | |
1182 perm_gen()->save_marks(); | |
1183 } | |
1184 | |
1185 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) { | |
1186 for (int i = 0; i <= collectedGen; i++) { | |
1187 _gens[i]->compute_new_size(); | |
1188 } | |
1189 } | |
1190 | |
1191 GenCollectedHeap* GenCollectedHeap::heap() { | |
1192 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); | |
1193 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); | |
1194 return _gch; | |
1195 } | |
1196 | |
1197 | |
1198 void GenCollectedHeap::prepare_for_compaction() { | |
1199 Generation* scanning_gen = _gens[_n_gens-1]; | |
1200 // Start by compacting into same gen. | |
1201 CompactPoint cp(scanning_gen, NULL, NULL); | |
1202 while (scanning_gen != NULL) { | |
1203 scanning_gen->prepare_for_compaction(&cp); | |
1204 scanning_gen = prev_gen(scanning_gen); | |
1205 } | |
1206 } | |
1207 | |
1208 GCStats* GenCollectedHeap::gc_stats(int level) const { | |
1209 return _gens[level]->gc_stats(); | |
1210 } | |
1211 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
647
diff
changeset
|
1212 void GenCollectedHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) { |
0 | 1213 if (!silent) { |
1214 gclog_or_tty->print("permgen "); | |
1215 } | |
1216 perm_gen()->verify(allow_dirty); | |
1217 for (int i = _n_gens-1; i >= 0; i--) { | |
1218 Generation* g = _gens[i]; | |
1219 if (!silent) { | |
1220 gclog_or_tty->print(g->name()); | |
1221 gclog_or_tty->print(" "); | |
1222 } | |
1223 g->verify(allow_dirty); | |
1224 } | |
1225 if (!silent) { | |
1226 gclog_or_tty->print("remset "); | |
1227 } | |
1228 rem_set()->verify(); | |
1229 if (!silent) { | |
1230 gclog_or_tty->print("ref_proc "); | |
1231 } | |
1232 ReferenceProcessor::verify(); | |
1233 } | |
1234 | |
1235 void GenCollectedHeap::print() const { print_on(tty); } | |
1236 void GenCollectedHeap::print_on(outputStream* st) const { | |
1237 for (int i = 0; i < _n_gens; i++) { | |
1238 _gens[i]->print_on(st); | |
1239 } | |
1240 perm_gen()->print_on(st); | |
1241 } | |
1242 | |
1243 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
1244 if (workers() != NULL) { | |
1245 workers()->threads_do(tc); | |
1246 } | |
1247 #ifndef SERIALGC | |
1248 if (UseConcMarkSweepGC) { | |
1249 ConcurrentMarkSweepThread::threads_do(tc); | |
1250 } | |
1251 #endif // SERIALGC | |
1252 } | |
1253 | |
1254 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const { | |
1255 #ifndef SERIALGC | |
1256 if (UseParNewGC) { | |
1257 workers()->print_worker_threads_on(st); | |
1258 } | |
1259 if (UseConcMarkSweepGC) { | |
1260 ConcurrentMarkSweepThread::print_all_on(st); | |
1261 } | |
1262 #endif // SERIALGC | |
1263 } | |
1264 | |
1265 void GenCollectedHeap::print_tracing_info() const { | |
1266 if (TraceGen0Time) { | |
1267 get_gen(0)->print_summary_info(); | |
1268 } | |
1269 if (TraceGen1Time) { | |
1270 get_gen(1)->print_summary_info(); | |
1271 } | |
1272 } | |
1273 | |
1274 void GenCollectedHeap::print_heap_change(size_t prev_used) const { | |
1275 if (PrintGCDetails && Verbose) { | |
1276 gclog_or_tty->print(" " SIZE_FORMAT | |
1277 "->" SIZE_FORMAT | |
1278 "(" SIZE_FORMAT ")", | |
1279 prev_used, used(), capacity()); | |
1280 } else { | |
1281 gclog_or_tty->print(" " SIZE_FORMAT "K" | |
1282 "->" SIZE_FORMAT "K" | |
1283 "(" SIZE_FORMAT "K)", | |
1284 prev_used / K, used() / K, capacity() / K); | |
1285 } | |
1286 } | |
1287 | |
1288 //New method to print perm gen info with PrintGCDetails flag | |
1289 void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const { | |
1290 gclog_or_tty->print(", [%s :", perm_gen()->short_name()); | |
1291 perm_gen()->print_heap_change(perm_prev_used); | |
1292 gclog_or_tty->print("]"); | |
1293 } | |
1294 | |
1295 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure { | |
1296 private: | |
1297 bool _full; | |
1298 public: | |
1299 void do_generation(Generation* gen) { | |
1300 gen->gc_prologue(_full); | |
1301 } | |
1302 GenGCPrologueClosure(bool full) : _full(full) {}; | |
1303 }; | |
1304 | |
1305 void GenCollectedHeap::gc_prologue(bool full) { | |
1306 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); | |
1307 | |
1308 always_do_update_barrier = false; | |
1309 // Fill TLAB's and such | |
1310 CollectedHeap::accumulate_statistics_all_tlabs(); | |
1311 ensure_parsability(true); // retire TLABs | |
1312 | |
1313 // Call allocation profiler | |
1314 AllocationProfiler::iterate_since_last_gc(); | |
1315 // Walk generations | |
1316 GenGCPrologueClosure blk(full); | |
1317 generation_iterate(&blk, false); // not old-to-young. | |
1318 perm_gen()->gc_prologue(full); | |
1319 }; | |
1320 | |
1321 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure { | |
1322 private: | |
1323 bool _full; | |
1324 public: | |
1325 void do_generation(Generation* gen) { | |
1326 gen->gc_epilogue(_full); | |
1327 } | |
1328 GenGCEpilogueClosure(bool full) : _full(full) {}; | |
1329 }; | |
1330 | |
1331 void GenCollectedHeap::gc_epilogue(bool full) { | |
1332 // Remember if a partial collection of the heap failed, and | |
1333 // we did a complete collection. | |
1334 if (full && incremental_collection_will_fail()) { | |
1335 set_last_incremental_collection_failed(); | |
1336 } else { | |
1337 clear_last_incremental_collection_failed(); | |
1338 } | |
1339 // Clear the flag, if set; the generation gc_epilogues will set the | |
1340 // flag again if the condition persists despite the collection. | |
1341 clear_incremental_collection_will_fail(); | |
1342 | |
1343 #ifdef COMPILER2 | |
1344 assert(DerivedPointerTable::is_empty(), "derived pointer present"); | |
1345 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr())); | |
1346 guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps"); | |
1347 #endif /* COMPILER2 */ | |
1348 | |
1349 resize_all_tlabs(); | |
1350 | |
1351 GenGCEpilogueClosure blk(full); | |
1352 generation_iterate(&blk, false); // not old-to-young. | |
1353 perm_gen()->gc_epilogue(full); | |
1354 | |
1355 always_do_update_barrier = UseConcMarkSweepGC; | |
1356 }; | |
1357 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1358 #ifndef PRODUCT |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1359 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1360 private: |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1361 public: |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1362 void do_generation(Generation* gen) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1363 gen->record_spaces_top(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1364 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1365 }; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1366 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1367 void GenCollectedHeap::record_gen_tops_before_GC() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1368 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1369 GenGCSaveTopsBeforeGCClosure blk; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1370 generation_iterate(&blk, false); // not old-to-young. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1371 perm_gen()->record_spaces_top(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1372 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1373 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1374 #endif // not PRODUCT |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1375 |
0 | 1376 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure { |
1377 public: | |
1378 void do_generation(Generation* gen) { | |
1379 gen->ensure_parsability(); | |
1380 } | |
1381 }; | |
1382 | |
1383 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) { | |
1384 CollectedHeap::ensure_parsability(retire_tlabs); | |
1385 GenEnsureParsabilityClosure ep_cl; | |
1386 generation_iterate(&ep_cl, false); | |
1387 perm_gen()->ensure_parsability(); | |
1388 } | |
1389 | |
1390 oop GenCollectedHeap::handle_failed_promotion(Generation* gen, | |
1391 oop obj, | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1392 size_t obj_size) { |
0 | 1393 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
1394 HeapWord* result = NULL; | |
1395 | |
1396 // First give each higher generation a chance to allocate the promoted object. | |
1397 Generation* allocator = next_gen(gen); | |
1398 if (allocator != NULL) { | |
1399 do { | |
1400 result = allocator->allocate(obj_size, false); | |
1401 } while (result == NULL && (allocator = next_gen(allocator)) != NULL); | |
1402 } | |
1403 | |
1404 if (result == NULL) { | |
1405 // Then give gen and higher generations a chance to expand and allocate the | |
1406 // object. | |
1407 do { | |
1408 result = gen->expand_and_allocate(obj_size, false); | |
1409 } while (result == NULL && (gen = next_gen(gen)) != NULL); | |
1410 } | |
1411 | |
1412 if (result != NULL) { | |
1413 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); | |
1414 } | |
1415 return oop(result); | |
1416 } | |
1417 | |
1418 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure { | |
1419 jlong _time; // in ms | |
1420 jlong _now; // in ms | |
1421 | |
1422 public: | |
1423 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { } | |
1424 | |
1425 jlong time() { return _time; } | |
1426 | |
1427 void do_generation(Generation* gen) { | |
1428 _time = MIN2(_time, gen->time_of_last_gc(_now)); | |
1429 } | |
1430 }; | |
1431 | |
1432 jlong GenCollectedHeap::millis_since_last_gc() { | |
1433 jlong now = os::javaTimeMillis(); | |
1434 GenTimeOfLastGCClosure tolgc_cl(now); | |
1435 // iterate over generations getting the oldest | |
1436 // time that a generation was collected | |
1437 generation_iterate(&tolgc_cl, false); | |
1438 tolgc_cl.do_generation(perm_gen()); | |
1439 // XXX Despite the assert above, since javaTimeMillis() | |
1440 // doesnot guarantee monotonically increasing return | |
1441 // values (note, i didn't say "strictly monotonic"), | |
1442 // we need to guard against getting back a time | |
1443 // later than now. This should be fixed by basing | |
1444 // on someting like gethrtime() which guarantees | |
1445 // monotonicity. Note that cond_wait() is susceptible | |
1446 // to a similar problem, because its interface is | |
1447 // based on absolute time in the form of the | |
1448 // system time's notion of UCT. See also 4506635 | |
1449 // for yet another problem of similar nature. XXX | |
1450 jlong retVal = now - tolgc_cl.time(); | |
1451 if (retVal < 0) { | |
1452 NOT_PRODUCT(warning("time warp: %d", retVal);) | |
1453 return 0; | |
1454 } | |
1455 return retVal; | |
1456 } |