Mercurial > hg > truffle
annotate src/share/vm/memory/genCollectedHeap.cpp @ 1355:6ccd32c284ac
Merge
author | kamg |
---|---|
date | Wed, 07 Apr 2010 12:28:22 -0400 |
parents | c4d722788ed6 |
children | 0bfd3fb24150 |
rev | line source |
---|---|
0 | 1 /* |
880
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
2 * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_genCollectedHeap.cpp.incl" | |
27 | |
28 GenCollectedHeap* GenCollectedHeap::_gch; | |
29 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;) | |
30 | |
31 // The set of potentially parallel tasks in strong root scanning. | |
32 enum GCH_process_strong_roots_tasks { | |
33 // We probably want to parallelize both of these internally, but for now... | |
34 GCH_PS_younger_gens, | |
35 // Leave this one last. | |
36 GCH_PS_NumElements | |
37 }; | |
38 | |
39 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) : | |
40 SharedHeap(policy), | |
41 _gen_policy(policy), | |
42 _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)), | |
43 _full_collections_completed(0) | |
44 { | |
45 if (_gen_process_strong_tasks == NULL || | |
46 !_gen_process_strong_tasks->valid()) { | |
47 vm_exit_during_initialization("Failed necessary allocation."); | |
48 } | |
49 assert(policy != NULL, "Sanity check"); | |
50 _preloading_shared_classes = false; | |
51 } | |
52 | |
53 jint GenCollectedHeap::initialize() { | |
1166 | 54 CollectedHeap::pre_initialize(); |
55 | |
0 | 56 int i; |
57 _n_gens = gen_policy()->number_of_generations(); | |
58 | |
59 // While there are no constraints in the GC code that HeapWordSize | |
60 // be any particular value, there are multiple other areas in the | |
61 // system which believe this to be true (e.g. oop->object_size in some | |
62 // cases incorrectly returns the size in wordSize units rather than | |
63 // HeapWordSize). | |
64 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
65 | |
66 // The heap must be at least as aligned as generations. | |
67 size_t alignment = Generation::GenGrain; | |
68 | |
69 _gen_specs = gen_policy()->generations(); | |
70 PermanentGenerationSpec *perm_gen_spec = | |
71 collector_policy()->permanent_generation(); | |
72 | |
73 // Make sure the sizes are all aligned. | |
74 for (i = 0; i < _n_gens; i++) { | |
75 _gen_specs[i]->align(alignment); | |
76 } | |
77 perm_gen_spec->align(alignment); | |
78 | |
79 // If we are dumping the heap, then allocate a wasted block of address | |
80 // space in order to push the heap to a lower address. This extra | |
81 // address range allows for other (or larger) libraries to be loaded | |
82 // without them occupying the space required for the shared spaces. | |
83 | |
84 if (DumpSharedSpaces) { | |
85 uintx reserved = 0; | |
86 uintx block_size = 64*1024*1024; | |
87 while (reserved < SharedDummyBlockSize) { | |
88 char* dummy = os::reserve_memory(block_size); | |
89 reserved += block_size; | |
90 } | |
91 } | |
92 | |
93 // Allocate space for the heap. | |
94 | |
95 char* heap_address; | |
96 size_t total_reserved = 0; | |
97 int n_covered_regions = 0; | |
98 ReservedSpace heap_rs(0); | |
99 | |
100 heap_address = allocate(alignment, perm_gen_spec, &total_reserved, | |
101 &n_covered_regions, &heap_rs); | |
102 | |
103 if (UseSharedSpaces) { | |
104 if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) { | |
105 if (heap_rs.is_reserved()) { | |
106 heap_rs.release(); | |
107 } | |
108 FileMapInfo* mapinfo = FileMapInfo::current_info(); | |
109 mapinfo->fail_continue("Unable to reserve shared region."); | |
110 allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions, | |
111 &heap_rs); | |
112 } | |
113 } | |
114 | |
115 if (!heap_rs.is_reserved()) { | |
116 vm_shutdown_during_initialization( | |
117 "Could not reserve enough space for object heap"); | |
118 return JNI_ENOMEM; | |
119 } | |
120 | |
121 _reserved = MemRegion((HeapWord*)heap_rs.base(), | |
122 (HeapWord*)(heap_rs.base() + heap_rs.size())); | |
123 | |
124 // It is important to do this in a way such that concurrent readers can't | |
125 // temporarily think somethings in the heap. (Seen this happen in asserts.) | |
126 _reserved.set_word_size(0); | |
127 _reserved.set_start((HeapWord*)heap_rs.base()); | |
128 size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size() | |
129 - perm_gen_spec->misc_code_size(); | |
130 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size)); | |
131 | |
132 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions); | |
133 set_barrier_set(rem_set()->bs()); | |
1166 | 134 |
0 | 135 _gch = this; |
136 | |
137 for (i = 0; i < _n_gens; i++) { | |
138 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), | |
139 UseSharedSpaces, UseSharedSpaces); | |
140 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set()); | |
141 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size()); | |
142 } | |
143 _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set()); | |
144 | |
145 clear_incremental_collection_will_fail(); | |
146 clear_last_incremental_collection_failed(); | |
147 | |
148 #ifndef SERIALGC | |
149 // If we are running CMS, create the collector responsible | |
150 // for collecting the CMS generations. | |
151 if (collector_policy()->is_concurrent_mark_sweep_policy()) { | |
152 bool success = create_cms_collector(); | |
153 if (!success) return JNI_ENOMEM; | |
154 } | |
155 #endif // SERIALGC | |
156 | |
157 return JNI_OK; | |
158 } | |
159 | |
160 | |
161 char* GenCollectedHeap::allocate(size_t alignment, | |
162 PermanentGenerationSpec* perm_gen_spec, | |
163 size_t* _total_reserved, | |
164 int* _n_covered_regions, | |
165 ReservedSpace* heap_rs){ | |
166 const char overflow_msg[] = "The size of the object heap + VM data exceeds " | |
167 "the maximum representable size"; | |
168 | |
169 // Now figure out the total size. | |
170 size_t total_reserved = 0; | |
171 int n_covered_regions = 0; | |
172 const size_t pageSize = UseLargePages ? | |
173 os::large_page_size() : os::vm_page_size(); | |
174 | |
175 for (int i = 0; i < _n_gens; i++) { | |
176 total_reserved += _gen_specs[i]->max_size(); | |
177 if (total_reserved < _gen_specs[i]->max_size()) { | |
178 vm_exit_during_initialization(overflow_msg); | |
179 } | |
180 n_covered_regions += _gen_specs[i]->n_covered_regions(); | |
181 } | |
182 assert(total_reserved % pageSize == 0, "Gen size"); | |
183 total_reserved += perm_gen_spec->max_size(); | |
184 assert(total_reserved % pageSize == 0, "Perm Gen size"); | |
185 | |
186 if (total_reserved < perm_gen_spec->max_size()) { | |
187 vm_exit_during_initialization(overflow_msg); | |
188 } | |
189 n_covered_regions += perm_gen_spec->n_covered_regions(); | |
190 | |
191 // Add the size of the data area which shares the same reserved area | |
192 // as the heap, but which is not actually part of the heap. | |
193 size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size(); | |
194 | |
195 total_reserved += s; | |
196 if (total_reserved < s) { | |
197 vm_exit_during_initialization(overflow_msg); | |
198 } | |
199 | |
200 if (UseLargePages) { | |
201 assert(total_reserved != 0, "total_reserved cannot be 0"); | |
202 total_reserved = round_to(total_reserved, os::large_page_size()); | |
203 if (total_reserved < os::large_page_size()) { | |
204 vm_exit_during_initialization(overflow_msg); | |
205 } | |
206 } | |
207 | |
208 // Calculate the address at which the heap must reside in order for | |
209 // the shared data to be at the required address. | |
210 | |
211 char* heap_address; | |
212 if (UseSharedSpaces) { | |
213 | |
214 // Calculate the address of the first word beyond the heap. | |
215 FileMapInfo* mapinfo = FileMapInfo::current_info(); | |
216 int lr = CompactingPermGenGen::n_regions - 1; | |
217 size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment); | |
218 heap_address = mapinfo->region_base(lr) + capacity; | |
219 | |
220 // Calculate the address of the first word of the heap. | |
221 heap_address -= total_reserved; | |
222 } else { | |
223 heap_address = NULL; // any address will do. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
224 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
225 heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
226 *_total_reserved = total_reserved; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
227 *_n_covered_regions = n_covered_regions; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
228 *heap_rs = ReservedHeapSpace(total_reserved, alignment, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
229 UseLargePages, heap_address); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
230 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
231 if (heap_address != NULL && !heap_rs->is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
232 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
233 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
234 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
235 heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
236 *heap_rs = ReservedHeapSpace(total_reserved, alignment, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
237 UseLargePages, heap_address); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
238 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
239 if (heap_address != NULL && !heap_rs->is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
240 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
241 heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
242 assert(heap_address == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
243 *heap_rs = ReservedHeapSpace(total_reserved, alignment, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
244 UseLargePages, heap_address); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
245 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
246 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
247 return heap_address; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
248 } |
0 | 249 } |
250 | |
251 *_total_reserved = total_reserved; | |
252 *_n_covered_regions = n_covered_regions; | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
196
diff
changeset
|
253 *heap_rs = ReservedHeapSpace(total_reserved, alignment, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
196
diff
changeset
|
254 UseLargePages, heap_address); |
0 | 255 |
256 return heap_address; | |
257 } | |
258 | |
259 | |
260 void GenCollectedHeap::post_initialize() { | |
261 SharedHeap::post_initialize(); | |
262 TwoGenerationCollectorPolicy *policy = | |
263 (TwoGenerationCollectorPolicy *)collector_policy(); | |
264 guarantee(policy->is_two_generation_policy(), "Illegal policy type"); | |
265 DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0); | |
266 assert(def_new_gen->kind() == Generation::DefNew || | |
267 def_new_gen->kind() == Generation::ParNew || | |
268 def_new_gen->kind() == Generation::ASParNew, | |
269 "Wrong generation kind"); | |
270 | |
271 Generation* old_gen = get_gen(1); | |
272 assert(old_gen->kind() == Generation::ConcurrentMarkSweep || | |
273 old_gen->kind() == Generation::ASConcurrentMarkSweep || | |
274 old_gen->kind() == Generation::MarkSweepCompact, | |
275 "Wrong generation kind"); | |
276 | |
277 policy->initialize_size_policy(def_new_gen->eden()->capacity(), | |
278 old_gen->capacity(), | |
279 def_new_gen->from()->capacity()); | |
280 policy->initialize_gc_policy_counters(); | |
281 } | |
282 | |
283 void GenCollectedHeap::ref_processing_init() { | |
284 SharedHeap::ref_processing_init(); | |
285 for (int i = 0; i < _n_gens; i++) { | |
286 _gens[i]->ref_processor_init(); | |
287 } | |
288 } | |
289 | |
290 size_t GenCollectedHeap::capacity() const { | |
291 size_t res = 0; | |
292 for (int i = 0; i < _n_gens; i++) { | |
293 res += _gens[i]->capacity(); | |
294 } | |
295 return res; | |
296 } | |
297 | |
298 size_t GenCollectedHeap::used() const { | |
299 size_t res = 0; | |
300 for (int i = 0; i < _n_gens; i++) { | |
301 res += _gens[i]->used(); | |
302 } | |
303 return res; | |
304 } | |
305 | |
306 // Save the "used_region" for generations level and lower, | |
307 // and, if perm is true, for perm gen. | |
308 void GenCollectedHeap::save_used_regions(int level, bool perm) { | |
309 assert(level < _n_gens, "Illegal level parameter"); | |
310 for (int i = level; i >= 0; i--) { | |
311 _gens[i]->save_used_region(); | |
312 } | |
313 if (perm) { | |
314 perm_gen()->save_used_region(); | |
315 } | |
316 } | |
317 | |
318 size_t GenCollectedHeap::max_capacity() const { | |
319 size_t res = 0; | |
320 for (int i = 0; i < _n_gens; i++) { | |
321 res += _gens[i]->max_capacity(); | |
322 } | |
323 return res; | |
324 } | |
325 | |
326 // Update the _full_collections_completed counter | |
327 // at the end of a stop-world full GC. | |
328 unsigned int GenCollectedHeap::update_full_collections_completed() { | |
329 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); | |
330 assert(_full_collections_completed <= _total_full_collections, | |
331 "Can't complete more collections than were started"); | |
332 _full_collections_completed = _total_full_collections; | |
333 ml.notify_all(); | |
334 return _full_collections_completed; | |
335 } | |
336 | |
337 // Update the _full_collections_completed counter, as appropriate, | |
338 // at the end of a concurrent GC cycle. Note the conditional update | |
339 // below to allow this method to be called by a concurrent collector | |
340 // without synchronizing in any manner with the VM thread (which | |
341 // may already have initiated a STW full collection "concurrently"). | |
342 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) { | |
343 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); | |
344 assert((_full_collections_completed <= _total_full_collections) && | |
345 (count <= _total_full_collections), | |
346 "Can't complete more collections than were started"); | |
347 if (count > _full_collections_completed) { | |
348 _full_collections_completed = count; | |
349 ml.notify_all(); | |
350 } | |
351 return _full_collections_completed; | |
352 } | |
353 | |
354 | |
355 #ifndef PRODUCT | |
356 // Override of memory state checking method in CollectedHeap: | |
357 // Some collectors (CMS for example) can't have badHeapWordVal written | |
358 // in the first two words of an object. (For instance , in the case of | |
359 // CMS these words hold state used to synchronize between certain | |
360 // (concurrent) GC steps and direct allocating mutators.) | |
361 // The skip_header_HeapWords() method below, allows us to skip | |
362 // over the requisite number of HeapWord's. Note that (for | |
363 // generational collectors) this means that those many words are | |
364 // skipped in each object, irrespective of the generation in which | |
365 // that object lives. The resultant loss of precision seems to be | |
366 // harmless and the pain of avoiding that imprecision appears somewhat | |
367 // higher than we are prepared to pay for such rudimentary debugging | |
368 // support. | |
369 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, | |
370 size_t size) { | |
371 if (CheckMemoryInitialization && ZapUnusedHeapArea) { | |
372 // We are asked to check a size in HeapWords, | |
373 // but the memory is mangled in juint words. | |
374 juint* start = (juint*) (addr + skip_header_HeapWords()); | |
375 juint* end = (juint*) (addr + size); | |
376 for (juint* slot = start; slot < end; slot += 1) { | |
377 assert(*slot == badHeapWordVal, | |
378 "Found non badHeapWordValue in pre-allocation check"); | |
379 } | |
380 } | |
381 } | |
382 #endif | |
383 | |
384 HeapWord* GenCollectedHeap::attempt_allocation(size_t size, | |
385 bool is_tlab, | |
386 bool first_only) { | |
387 HeapWord* res; | |
388 for (int i = 0; i < _n_gens; i++) { | |
389 if (_gens[i]->should_allocate(size, is_tlab)) { | |
390 res = _gens[i]->allocate(size, is_tlab); | |
391 if (res != NULL) return res; | |
392 else if (first_only) break; | |
393 } | |
394 } | |
395 // Otherwise... | |
396 return NULL; | |
397 } | |
398 | |
399 HeapWord* GenCollectedHeap::mem_allocate(size_t size, | |
400 bool is_large_noref, | |
401 bool is_tlab, | |
402 bool* gc_overhead_limit_was_exceeded) { | |
403 return collector_policy()->mem_allocate_work(size, | |
404 is_tlab, | |
405 gc_overhead_limit_was_exceeded); | |
406 } | |
407 | |
408 bool GenCollectedHeap::must_clear_all_soft_refs() { | |
409 return _gc_cause == GCCause::_last_ditch_collection; | |
410 } | |
411 | |
412 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { | |
413 return (cause == GCCause::_java_lang_system_gc || | |
414 cause == GCCause::_gc_locker) && | |
415 UseConcMarkSweepGC && ExplicitGCInvokesConcurrent; | |
416 } | |
417 | |
418 void GenCollectedHeap::do_collection(bool full, | |
419 bool clear_all_soft_refs, | |
420 size_t size, | |
421 bool is_tlab, | |
422 int max_level) { | |
423 bool prepared_for_verification = false; | |
424 ResourceMark rm; | |
425 DEBUG_ONLY(Thread* my_thread = Thread::current();) | |
426 | |
427 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
428 assert(my_thread->is_VM_thread() || | |
429 my_thread->is_ConcurrentGC_thread(), | |
430 "incorrect thread type capability"); | |
431 assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock"); | |
432 guarantee(!is_gc_active(), "collection is not reentrant"); | |
433 assert(max_level < n_gens(), "sanity check"); | |
434 | |
435 if (GC_locker::check_active_before_gc()) { | |
436 return; // GC is disabled (e.g. JNI GetXXXCritical operation) | |
437 } | |
438 | |
439 const size_t perm_prev_used = perm_gen()->used(); | |
440 | |
441 if (PrintHeapAtGC) { | |
442 Universe::print_heap_before_gc(); | |
443 if (Verbose) { | |
444 gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause())); | |
445 } | |
446 } | |
447 | |
448 { | |
449 FlagSetting fl(_is_gc_active, true); | |
450 | |
451 bool complete = full && (max_level == (n_gens()-1)); | |
452 const char* gc_cause_str = "GC "; | |
453 if (complete) { | |
454 GCCause::Cause cause = gc_cause(); | |
455 if (cause == GCCause::_java_lang_system_gc) { | |
456 gc_cause_str = "Full GC (System) "; | |
457 } else { | |
458 gc_cause_str = "Full GC "; | |
459 } | |
460 } | |
461 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
462 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
463 TraceTime t(gc_cause_str, PrintGCDetails, false, gclog_or_tty); | |
464 | |
465 gc_prologue(complete); | |
466 increment_total_collections(complete); | |
467 | |
468 size_t gch_prev_used = used(); | |
469 | |
470 int starting_level = 0; | |
471 if (full) { | |
472 // Search for the oldest generation which will collect all younger | |
473 // generations, and start collection loop there. | |
474 for (int i = max_level; i >= 0; i--) { | |
475 if (_gens[i]->full_collects_younger_generations()) { | |
476 starting_level = i; | |
477 break; | |
478 } | |
479 } | |
480 } | |
481 | |
482 bool must_restore_marks_for_biased_locking = false; | |
483 | |
484 int max_level_collected = starting_level; | |
485 for (int i = starting_level; i <= max_level; i++) { | |
486 if (_gens[i]->should_collect(full, size, is_tlab)) { | |
880
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
487 if (i == n_gens() - 1) { // a major collection is to happen |
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
488 if (!complete) { |
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
489 // The full_collections increment was missed above. |
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
490 increment_total_full_collections(); |
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
491 } |
615
c6c601a0f2d6
6797870: Add -XX:+{HeapDump,PrintClassHistogram}{Before,After}FullGC
ysr
parents:
546
diff
changeset
|
492 pre_full_gc_dump(); // do any pre full gc dumps |
880
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
493 } |
0 | 494 // Timer for individual generations. Last argument is false: no CR |
495 TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty); | |
496 TraceCollectorStats tcs(_gens[i]->counters()); | |
497 TraceMemoryManagerStats tmms(_gens[i]->kind()); | |
498 | |
499 size_t prev_used = _gens[i]->used(); | |
500 _gens[i]->stat_record()->invocations++; | |
501 _gens[i]->stat_record()->accumulated_time.start(); | |
502 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
503 // Must be done anew before each collection because |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
504 // a previous collection will do mangling and will |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
505 // change top of some spaces. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
506 record_gen_tops_before_GC(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
507 |
0 | 508 if (PrintGC && Verbose) { |
509 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, | |
510 i, | |
511 _gens[i]->stat_record()->invocations, | |
512 size*HeapWordSize); | |
513 } | |
514 | |
515 if (VerifyBeforeGC && i >= VerifyGCLevel && | |
516 total_collections() >= VerifyGCStartAt) { | |
517 HandleMark hm; // Discard invalid handles created during verification | |
518 if (!prepared_for_verification) { | |
519 prepare_for_verify(); | |
520 prepared_for_verification = true; | |
521 } | |
522 gclog_or_tty->print(" VerifyBeforeGC:"); | |
523 Universe::verify(true); | |
524 } | |
525 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
526 | |
527 if (!must_restore_marks_for_biased_locking && | |
528 _gens[i]->performs_in_place_marking()) { | |
529 // We perform this mark word preservation work lazily | |
530 // because it's only at this point that we know whether we | |
531 // absolutely have to do it; we want to avoid doing it for | |
532 // scavenge-only collections where it's unnecessary | |
533 must_restore_marks_for_biased_locking = true; | |
534 BiasedLocking::preserve_marks(); | |
535 } | |
536 | |
537 // Do collection work | |
538 { | |
539 // Note on ref discovery: For what appear to be historical reasons, | |
540 // GCH enables and disabled (by enqueing) refs discovery. | |
541 // In the future this should be moved into the generation's | |
542 // collect method so that ref discovery and enqueueing concerns | |
543 // are local to a generation. The collect method could return | |
544 // an appropriate indication in the case that notification on | |
545 // the ref lock was needed. This will make the treatment of | |
546 // weak refs more uniform (and indeed remove such concerns | |
547 // from GCH). XXX | |
548 | |
549 HandleMark hm; // Discard invalid handles created during gc | |
550 save_marks(); // save marks for all gens | |
551 // We want to discover references, but not process them yet. | |
552 // This mode is disabled in process_discovered_references if the | |
553 // generation does some collection work, or in | |
554 // enqueue_discovered_references if the generation returns | |
555 // without doing any work. | |
556 ReferenceProcessor* rp = _gens[i]->ref_processor(); | |
557 // If the discovery of ("weak") refs in this generation is | |
558 // atomic wrt other collectors in this configuration, we | |
559 // are guaranteed to have empty discovered ref lists. | |
560 if (rp->discovery_is_atomic()) { | |
561 rp->verify_no_references_recorded(); | |
562 rp->enable_discovery(); | |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
563 rp->setup_policy(clear_all_soft_refs); |
0 | 564 } else { |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
269
diff
changeset
|
565 // collect() below will enable discovery as appropriate |
0 | 566 } |
567 _gens[i]->collect(full, clear_all_soft_refs, size, is_tlab); | |
568 if (!rp->enqueuing_is_done()) { | |
569 rp->enqueue_discovered_references(); | |
570 } else { | |
571 rp->set_enqueuing_is_done(false); | |
572 } | |
573 rp->verify_no_references_recorded(); | |
574 } | |
575 max_level_collected = i; | |
576 | |
577 // Determine if allocation request was met. | |
578 if (size > 0) { | |
579 if (!is_tlab || _gens[i]->supports_tlab_allocation()) { | |
580 if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) { | |
581 size = 0; | |
582 } | |
583 } | |
584 } | |
585 | |
586 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
587 | |
588 _gens[i]->stat_record()->accumulated_time.stop(); | |
589 | |
590 update_gc_stats(i, full); | |
591 | |
592 if (VerifyAfterGC && i >= VerifyGCLevel && | |
593 total_collections() >= VerifyGCStartAt) { | |
594 HandleMark hm; // Discard invalid handles created during verification | |
595 gclog_or_tty->print(" VerifyAfterGC:"); | |
596 Universe::verify(false); | |
597 } | |
598 | |
599 if (PrintGCDetails) { | |
600 gclog_or_tty->print(":"); | |
601 _gens[i]->print_heap_change(prev_used); | |
602 } | |
603 } | |
604 } | |
605 | |
606 // Update "complete" boolean wrt what actually transpired -- | |
607 // for instance, a promotion failure could have led to | |
608 // a whole heap collection. | |
609 complete = complete || (max_level_collected == n_gens() - 1); | |
610 | |
615
c6c601a0f2d6
6797870: Add -XX:+{HeapDump,PrintClassHistogram}{Before,After}FullGC
ysr
parents:
546
diff
changeset
|
611 if (complete) { // We did a "major" collection |
c6c601a0f2d6
6797870: Add -XX:+{HeapDump,PrintClassHistogram}{Before,After}FullGC
ysr
parents:
546
diff
changeset
|
612 post_full_gc_dump(); // do any post full gc dumps |
c6c601a0f2d6
6797870: Add -XX:+{HeapDump,PrintClassHistogram}{Before,After}FullGC
ysr
parents:
546
diff
changeset
|
613 } |
c6c601a0f2d6
6797870: Add -XX:+{HeapDump,PrintClassHistogram}{Before,After}FullGC
ysr
parents:
546
diff
changeset
|
614 |
0 | 615 if (PrintGCDetails) { |
616 print_heap_change(gch_prev_used); | |
617 | |
618 // Print perm gen info for full GC with PrintGCDetails flag. | |
619 if (complete) { | |
620 print_perm_heap_change(perm_prev_used); | |
621 } | |
622 } | |
623 | |
624 for (int j = max_level_collected; j >= 0; j -= 1) { | |
625 // Adjust generation sizes. | |
626 _gens[j]->compute_new_size(); | |
627 } | |
628 | |
629 if (complete) { | |
630 // Ask the permanent generation to adjust size for full collections | |
631 perm()->compute_new_size(); | |
632 update_full_collections_completed(); | |
633 } | |
634 | |
635 // Track memory usage and detect low memory after GC finishes | |
636 MemoryService::track_memory_usage(); | |
637 | |
638 gc_epilogue(complete); | |
639 | |
640 if (must_restore_marks_for_biased_locking) { | |
641 BiasedLocking::restore_marks(); | |
642 } | |
643 } | |
644 | |
645 AdaptiveSizePolicy* sp = gen_policy()->size_policy(); | |
646 AdaptiveSizePolicyOutput(sp, total_collections()); | |
647 | |
648 if (PrintHeapAtGC) { | |
649 Universe::print_heap_after_gc(); | |
650 } | |
651 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
517
diff
changeset
|
652 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
517
diff
changeset
|
653 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
517
diff
changeset
|
654 #endif |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
517
diff
changeset
|
655 |
0 | 656 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
657 tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); | |
658 vm_exit(-1); | |
659 } | |
660 } | |
661 | |
662 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) { | |
663 return collector_policy()->satisfy_failed_allocation(size, is_tlab); | |
664 } | |
665 | |
666 void GenCollectedHeap::set_par_threads(int t) { | |
667 SharedHeap::set_par_threads(t); | |
668 _gen_process_strong_tasks->set_par_threads(t); | |
669 } | |
670 | |
671 class AssertIsPermClosure: public OopClosure { | |
672 public: | |
673 void do_oop(oop* p) { | |
674 assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm."); | |
675 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
676 void do_oop(narrowOop* p) { ShouldNotReachHere(); } |
0 | 677 }; |
678 static AssertIsPermClosure assert_is_perm_closure; | |
679 | |
680 void GenCollectedHeap:: | |
681 gen_process_strong_roots(int level, | |
682 bool younger_gens_as_roots, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
683 bool activate_scope, |
0 | 684 bool collecting_perm_gen, |
685 SharedHeap::ScanningOption so, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
686 OopsInGenClosure* not_older_gens, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
687 bool do_code_roots, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
688 OopsInGenClosure* older_gens) { |
0 | 689 // General strong roots. |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
690 |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
691 if (!do_code_roots) { |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
692 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
693 not_older_gens, NULL, older_gens); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
694 } else { |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
695 bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active()); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
696 CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
697 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
698 not_older_gens, &code_roots, older_gens); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
699 } |
0 | 700 |
701 if (younger_gens_as_roots) { | |
702 if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { | |
703 for (int i = 0; i < level; i++) { | |
704 not_older_gens->set_generation(_gens[i]); | |
705 _gens[i]->oop_iterate(not_older_gens); | |
706 } | |
707 not_older_gens->reset_generation(); | |
708 } | |
709 } | |
710 // When collection is parallel, all threads get to cooperate to do | |
711 // older-gen scanning. | |
712 for (int i = level+1; i < _n_gens; i++) { | |
713 older_gens->set_generation(_gens[i]); | |
714 rem_set()->younger_refs_iterate(_gens[i], older_gens); | |
715 older_gens->reset_generation(); | |
716 } | |
717 | |
718 _gen_process_strong_tasks->all_tasks_completed(); | |
719 } | |
720 | |
721 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
722 CodeBlobClosure* code_roots, |
0 | 723 OopClosure* non_root_closure) { |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
724 SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure); |
0 | 725 // "Local" "weak" refs |
726 for (int i = 0; i < _n_gens; i++) { | |
727 _gens[i]->ref_processor()->weak_oops_do(root_closure); | |
728 } | |
729 } | |
730 | |
731 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ | |
732 void GenCollectedHeap:: \ | |
733 oop_since_save_marks_iterate(int level, \ | |
734 OopClosureType* cur, \ | |
735 OopClosureType* older) { \ | |
736 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \ | |
737 for (int i = level+1; i < n_gens(); i++) { \ | |
738 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \ | |
739 } \ | |
740 perm_gen()->oop_since_save_marks_iterate##nv_suffix(older); \ | |
741 } | |
742 | |
743 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) | |
744 | |
745 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN | |
746 | |
747 bool GenCollectedHeap::no_allocs_since_save_marks(int level) { | |
748 for (int i = level; i < _n_gens; i++) { | |
749 if (!_gens[i]->no_allocs_since_save_marks()) return false; | |
750 } | |
751 return perm_gen()->no_allocs_since_save_marks(); | |
752 } | |
753 | |
754 bool GenCollectedHeap::supports_inline_contig_alloc() const { | |
755 return _gens[0]->supports_inline_contig_alloc(); | |
756 } | |
757 | |
758 HeapWord** GenCollectedHeap::top_addr() const { | |
759 return _gens[0]->top_addr(); | |
760 } | |
761 | |
762 HeapWord** GenCollectedHeap::end_addr() const { | |
763 return _gens[0]->end_addr(); | |
764 } | |
765 | |
766 size_t GenCollectedHeap::unsafe_max_alloc() { | |
767 return _gens[0]->unsafe_max_alloc_nogc(); | |
768 } | |
769 | |
770 // public collection interfaces | |
771 | |
772 void GenCollectedHeap::collect(GCCause::Cause cause) { | |
773 if (should_do_concurrent_full_gc(cause)) { | |
774 #ifndef SERIALGC | |
775 // mostly concurrent full collection | |
776 collect_mostly_concurrent(cause); | |
777 #else // SERIALGC | |
778 ShouldNotReachHere(); | |
779 #endif // SERIALGC | |
780 } else { | |
781 #ifdef ASSERT | |
782 if (cause == GCCause::_scavenge_alot) { | |
783 // minor collection only | |
784 collect(cause, 0); | |
785 } else { | |
786 // Stop-the-world full collection | |
787 collect(cause, n_gens() - 1); | |
788 } | |
789 #else | |
790 // Stop-the-world full collection | |
791 collect(cause, n_gens() - 1); | |
792 #endif | |
793 } | |
794 } | |
795 | |
796 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) { | |
797 // The caller doesn't have the Heap_lock | |
798 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | |
799 MutexLocker ml(Heap_lock); | |
800 collect_locked(cause, max_level); | |
801 } | |
802 | |
803 // This interface assumes that it's being called by the | |
804 // vm thread. It collects the heap assuming that the | |
805 // heap lock is already held and that we are executing in | |
806 // the context of the vm thread. | |
807 void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { | |
808 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
809 assert(Heap_lock->is_locked(), "Precondition#2"); | |
810 GCCauseSetter gcs(this, cause); | |
811 switch (cause) { | |
812 case GCCause::_heap_inspection: | |
813 case GCCause::_heap_dump: { | |
814 HandleMark hm; | |
815 do_full_collection(false, // don't clear all soft refs | |
816 n_gens() - 1); | |
817 break; | |
818 } | |
819 default: // XXX FIX ME | |
820 ShouldNotReachHere(); // Unexpected use of this function | |
821 } | |
822 } | |
823 | |
824 void GenCollectedHeap::collect_locked(GCCause::Cause cause) { | |
825 // The caller has the Heap_lock | |
826 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); | |
827 collect_locked(cause, n_gens() - 1); | |
828 } | |
829 | |
830 // this is the private collection interface | |
831 // The Heap_lock is expected to be held on entry. | |
832 | |
833 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { | |
834 if (_preloading_shared_classes) { | |
835 warning("\nThe permanent generation is not large enough to preload " | |
836 "requested classes.\nUse -XX:PermSize= to increase the initial " | |
837 "size of the permanent generation.\n"); | |
838 vm_exit(2); | |
839 } | |
840 // Read the GC count while holding the Heap_lock | |
841 unsigned int gc_count_before = total_collections(); | |
842 unsigned int full_gc_count_before = total_full_collections(); | |
843 { | |
844 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
845 VM_GenCollectFull op(gc_count_before, full_gc_count_before, | |
846 cause, max_level); | |
847 VMThread::execute(&op); | |
848 } | |
849 } | |
850 | |
851 #ifndef SERIALGC | |
852 bool GenCollectedHeap::create_cms_collector() { | |
853 | |
854 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) || | |
855 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) && | |
856 _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep, | |
857 "Unexpected generation kinds"); | |
858 // Skip two header words in the block content verification | |
859 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) | |
860 CMSCollector* collector = new CMSCollector( | |
861 (ConcurrentMarkSweepGeneration*)_gens[1], | |
862 (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(), | |
863 _rem_set->as_CardTableRS(), | |
864 (ConcurrentMarkSweepPolicy*) collector_policy()); | |
865 | |
866 if (collector == NULL || !collector->completed_initialization()) { | |
867 if (collector) { | |
868 delete collector; // Be nice in embedded situation | |
869 } | |
870 vm_shutdown_during_initialization("Could not create CMS collector"); | |
871 return false; | |
872 } | |
873 return true; // success | |
874 } | |
875 | |
876 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) { | |
877 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock"); | |
878 | |
879 MutexLocker ml(Heap_lock); | |
880 // Read the GC counts while holding the Heap_lock | |
881 unsigned int full_gc_count_before = total_full_collections(); | |
882 unsigned int gc_count_before = total_collections(); | |
883 { | |
884 MutexUnlocker mu(Heap_lock); | |
885 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause); | |
886 VMThread::execute(&op); | |
887 } | |
888 } | |
889 #endif // SERIALGC | |
890 | |
891 | |
892 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, | |
893 int max_level) { | |
894 int local_max_level; | |
895 if (!incremental_collection_will_fail() && | |
896 gc_cause() == GCCause::_gc_locker) { | |
897 local_max_level = 0; | |
898 } else { | |
899 local_max_level = max_level; | |
900 } | |
901 | |
902 do_collection(true /* full */, | |
903 clear_all_soft_refs /* clear_all_soft_refs */, | |
904 0 /* size */, | |
905 false /* is_tlab */, | |
906 local_max_level /* max_level */); | |
907 // Hack XXX FIX ME !!! | |
908 // A scavenge may not have been attempted, or may have | |
909 // been attempted and failed, because the old gen was too full | |
910 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && | |
911 incremental_collection_will_fail()) { | |
912 if (PrintGCDetails) { | |
913 gclog_or_tty->print_cr("GC locker: Trying a full collection " | |
914 "because scavenge failed"); | |
915 } | |
916 // This time allow the old gen to be collected as well | |
917 do_collection(true /* full */, | |
918 clear_all_soft_refs /* clear_all_soft_refs */, | |
919 0 /* size */, | |
920 false /* is_tlab */, | |
921 n_gens() - 1 /* max_level */); | |
922 } | |
923 } | |
924 | |
925 // Returns "TRUE" iff "p" points into the allocated area of the heap. | |
926 bool GenCollectedHeap::is_in(const void* p) const { | |
927 #ifndef ASSERT | |
928 guarantee(VerifyBeforeGC || | |
929 VerifyDuringGC || | |
930 VerifyBeforeExit || | |
1155
4e6abf09f540
6912062: disassembler plugin needs to produce symbolic information in product mode
jrose
parents:
989
diff
changeset
|
931 PrintAssembly || |
4e6abf09f540
6912062: disassembler plugin needs to produce symbolic information in product mode
jrose
parents:
989
diff
changeset
|
932 tty->count() != 0 || // already printing |
0 | 933 VerifyAfterGC, "too expensive"); |
934 #endif | |
935 // This might be sped up with a cache of the last generation that | |
936 // answered yes. | |
937 for (int i = 0; i < _n_gens; i++) { | |
938 if (_gens[i]->is_in(p)) return true; | |
939 } | |
940 if (_perm_gen->as_gen()->is_in(p)) return true; | |
941 // Otherwise... | |
942 return false; | |
943 } | |
944 | |
945 // Returns "TRUE" iff "p" points into the allocated area of the heap. | |
946 bool GenCollectedHeap::is_in_youngest(void* p) { | |
947 return _gens[0]->is_in(p); | |
948 } | |
949 | |
950 void GenCollectedHeap::oop_iterate(OopClosure* cl) { | |
951 for (int i = 0; i < _n_gens; i++) { | |
952 _gens[i]->oop_iterate(cl); | |
953 } | |
954 } | |
955 | |
956 void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) { | |
957 for (int i = 0; i < _n_gens; i++) { | |
958 _gens[i]->oop_iterate(mr, cl); | |
959 } | |
960 } | |
961 | |
962 void GenCollectedHeap::object_iterate(ObjectClosure* cl) { | |
963 for (int i = 0; i < _n_gens; i++) { | |
964 _gens[i]->object_iterate(cl); | |
965 } | |
966 perm_gen()->object_iterate(cl); | |
967 } | |
968 | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
969 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
970 for (int i = 0; i < _n_gens; i++) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
971 _gens[i]->safe_object_iterate(cl); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
972 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
973 perm_gen()->safe_object_iterate(cl); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
974 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
975 |
0 | 976 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { |
977 for (int i = 0; i < _n_gens; i++) { | |
978 _gens[i]->object_iterate_since_last_GC(cl); | |
979 } | |
980 } | |
981 | |
982 Space* GenCollectedHeap::space_containing(const void* addr) const { | |
983 for (int i = 0; i < _n_gens; i++) { | |
984 Space* res = _gens[i]->space_containing(addr); | |
985 if (res != NULL) return res; | |
986 } | |
987 Space* res = perm_gen()->space_containing(addr); | |
988 if (res != NULL) return res; | |
989 // Otherwise... | |
990 assert(false, "Could not find containing space"); | |
991 return NULL; | |
992 } | |
993 | |
994 | |
995 HeapWord* GenCollectedHeap::block_start(const void* addr) const { | |
996 assert(is_in_reserved(addr), "block_start of address outside of heap"); | |
997 for (int i = 0; i < _n_gens; i++) { | |
998 if (_gens[i]->is_in_reserved(addr)) { | |
999 assert(_gens[i]->is_in(addr), | |
1000 "addr should be in allocated part of generation"); | |
1001 return _gens[i]->block_start(addr); | |
1002 } | |
1003 } | |
1004 if (perm_gen()->is_in_reserved(addr)) { | |
1005 assert(perm_gen()->is_in(addr), | |
1006 "addr should be in allocated part of perm gen"); | |
1007 return perm_gen()->block_start(addr); | |
1008 } | |
1009 assert(false, "Some generation should contain the address"); | |
1010 return NULL; | |
1011 } | |
1012 | |
1013 size_t GenCollectedHeap::block_size(const HeapWord* addr) const { | |
1014 assert(is_in_reserved(addr), "block_size of address outside of heap"); | |
1015 for (int i = 0; i < _n_gens; i++) { | |
1016 if (_gens[i]->is_in_reserved(addr)) { | |
1017 assert(_gens[i]->is_in(addr), | |
1018 "addr should be in allocated part of generation"); | |
1019 return _gens[i]->block_size(addr); | |
1020 } | |
1021 } | |
1022 if (perm_gen()->is_in_reserved(addr)) { | |
1023 assert(perm_gen()->is_in(addr), | |
1024 "addr should be in allocated part of perm gen"); | |
1025 return perm_gen()->block_size(addr); | |
1026 } | |
1027 assert(false, "Some generation should contain the address"); | |
1028 return 0; | |
1029 } | |
1030 | |
1031 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { | |
1032 assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); | |
1033 assert(block_start(addr) == addr, "addr must be a block start"); | |
1034 for (int i = 0; i < _n_gens; i++) { | |
1035 if (_gens[i]->is_in_reserved(addr)) { | |
1036 return _gens[i]->block_is_obj(addr); | |
1037 } | |
1038 } | |
1039 if (perm_gen()->is_in_reserved(addr)) { | |
1040 return perm_gen()->block_is_obj(addr); | |
1041 } | |
1042 assert(false, "Some generation should contain the address"); | |
1043 return false; | |
1044 } | |
1045 | |
1046 bool GenCollectedHeap::supports_tlab_allocation() const { | |
1047 for (int i = 0; i < _n_gens; i += 1) { | |
1048 if (_gens[i]->supports_tlab_allocation()) { | |
1049 return true; | |
1050 } | |
1051 } | |
1052 return false; | |
1053 } | |
1054 | |
1055 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { | |
1056 size_t result = 0; | |
1057 for (int i = 0; i < _n_gens; i += 1) { | |
1058 if (_gens[i]->supports_tlab_allocation()) { | |
1059 result += _gens[i]->tlab_capacity(); | |
1060 } | |
1061 } | |
1062 return result; | |
1063 } | |
1064 | |
1065 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { | |
1066 size_t result = 0; | |
1067 for (int i = 0; i < _n_gens; i += 1) { | |
1068 if (_gens[i]->supports_tlab_allocation()) { | |
1069 result += _gens[i]->unsafe_max_tlab_alloc(); | |
1070 } | |
1071 } | |
1072 return result; | |
1073 } | |
1074 | |
1075 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { | |
1076 bool gc_overhead_limit_was_exceeded; | |
1077 HeapWord* result = mem_allocate(size /* size */, | |
1078 false /* is_large_noref */, | |
1079 true /* is_tlab */, | |
1080 &gc_overhead_limit_was_exceeded); | |
1081 return result; | |
1082 } | |
1083 | |
1084 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size | |
1085 // from the list headed by "*prev_ptr". | |
1086 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) { | |
1087 bool first = true; | |
1088 size_t min_size = 0; // "first" makes this conceptually infinite. | |
1089 ScratchBlock **smallest_ptr, *smallest; | |
1090 ScratchBlock *cur = *prev_ptr; | |
1091 while (cur) { | |
1092 assert(*prev_ptr == cur, "just checking"); | |
1093 if (first || cur->num_words < min_size) { | |
1094 smallest_ptr = prev_ptr; | |
1095 smallest = cur; | |
1096 min_size = smallest->num_words; | |
1097 first = false; | |
1098 } | |
1099 prev_ptr = &cur->next; | |
1100 cur = cur->next; | |
1101 } | |
1102 smallest = *smallest_ptr; | |
1103 *smallest_ptr = smallest->next; | |
1104 return smallest; | |
1105 } | |
1106 | |
1107 // Sort the scratch block list headed by res into decreasing size order, | |
1108 // and set "res" to the result. | |
1109 static void sort_scratch_list(ScratchBlock*& list) { | |
1110 ScratchBlock* sorted = NULL; | |
1111 ScratchBlock* unsorted = list; | |
1112 while (unsorted) { | |
1113 ScratchBlock *smallest = removeSmallestScratch(&unsorted); | |
1114 smallest->next = sorted; | |
1115 sorted = smallest; | |
1116 } | |
1117 list = sorted; | |
1118 } | |
1119 | |
1120 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, | |
1121 size_t max_alloc_words) { | |
1122 ScratchBlock* res = NULL; | |
1123 for (int i = 0; i < _n_gens; i++) { | |
1124 _gens[i]->contribute_scratch(res, requestor, max_alloc_words); | |
1125 } | |
1126 sort_scratch_list(res); | |
1127 return res; | |
1128 } | |
1129 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1130 void GenCollectedHeap::release_scratch() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1131 for (int i = 0; i < _n_gens; i++) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1132 _gens[i]->reset_scratch(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1133 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1134 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1135 |
0 | 1136 size_t GenCollectedHeap::large_typearray_limit() { |
1137 return gen_policy()->large_typearray_limit(); | |
1138 } | |
1139 | |
1140 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { | |
1141 void do_generation(Generation* gen) { | |
1142 gen->prepare_for_verify(); | |
1143 } | |
1144 }; | |
1145 | |
1146 void GenCollectedHeap::prepare_for_verify() { | |
1147 ensure_parsability(false); // no need to retire TLABs | |
1148 GenPrepareForVerifyClosure blk; | |
1149 generation_iterate(&blk, false); | |
1150 perm_gen()->prepare_for_verify(); | |
1151 } | |
1152 | |
1153 | |
1154 void GenCollectedHeap::generation_iterate(GenClosure* cl, | |
1155 bool old_to_young) { | |
1156 if (old_to_young) { | |
1157 for (int i = _n_gens-1; i >= 0; i--) { | |
1158 cl->do_generation(_gens[i]); | |
1159 } | |
1160 } else { | |
1161 for (int i = 0; i < _n_gens; i++) { | |
1162 cl->do_generation(_gens[i]); | |
1163 } | |
1164 } | |
1165 } | |
1166 | |
1167 void GenCollectedHeap::space_iterate(SpaceClosure* cl) { | |
1168 for (int i = 0; i < _n_gens; i++) { | |
1169 _gens[i]->space_iterate(cl, true); | |
1170 } | |
1171 perm_gen()->space_iterate(cl, true); | |
1172 } | |
1173 | |
1174 bool GenCollectedHeap::is_maximal_no_gc() const { | |
1175 for (int i = 0; i < _n_gens; i++) { // skip perm gen | |
1176 if (!_gens[i]->is_maximal_no_gc()) { | |
1177 return false; | |
1178 } | |
1179 } | |
1180 return true; | |
1181 } | |
1182 | |
1183 void GenCollectedHeap::save_marks() { | |
1184 for (int i = 0; i < _n_gens; i++) { | |
1185 _gens[i]->save_marks(); | |
1186 } | |
1187 perm_gen()->save_marks(); | |
1188 } | |
1189 | |
1190 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) { | |
1191 for (int i = 0; i <= collectedGen; i++) { | |
1192 _gens[i]->compute_new_size(); | |
1193 } | |
1194 } | |
1195 | |
1196 GenCollectedHeap* GenCollectedHeap::heap() { | |
1197 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); | |
1198 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); | |
1199 return _gch; | |
1200 } | |
1201 | |
1202 | |
1203 void GenCollectedHeap::prepare_for_compaction() { | |
1204 Generation* scanning_gen = _gens[_n_gens-1]; | |
1205 // Start by compacting into same gen. | |
1206 CompactPoint cp(scanning_gen, NULL, NULL); | |
1207 while (scanning_gen != NULL) { | |
1208 scanning_gen->prepare_for_compaction(&cp); | |
1209 scanning_gen = prev_gen(scanning_gen); | |
1210 } | |
1211 } | |
1212 | |
1213 GCStats* GenCollectedHeap::gc_stats(int level) const { | |
1214 return _gens[level]->gc_stats(); | |
1215 } | |
1216 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
647
diff
changeset
|
1217 void GenCollectedHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) { |
0 | 1218 if (!silent) { |
1219 gclog_or_tty->print("permgen "); | |
1220 } | |
1221 perm_gen()->verify(allow_dirty); | |
1222 for (int i = _n_gens-1; i >= 0; i--) { | |
1223 Generation* g = _gens[i]; | |
1224 if (!silent) { | |
1225 gclog_or_tty->print(g->name()); | |
1226 gclog_or_tty->print(" "); | |
1227 } | |
1228 g->verify(allow_dirty); | |
1229 } | |
1230 if (!silent) { | |
1231 gclog_or_tty->print("remset "); | |
1232 } | |
1233 rem_set()->verify(); | |
1234 if (!silent) { | |
1235 gclog_or_tty->print("ref_proc "); | |
1236 } | |
1237 ReferenceProcessor::verify(); | |
1238 } | |
1239 | |
1240 void GenCollectedHeap::print() const { print_on(tty); } | |
1241 void GenCollectedHeap::print_on(outputStream* st) const { | |
1242 for (int i = 0; i < _n_gens; i++) { | |
1243 _gens[i]->print_on(st); | |
1244 } | |
1245 perm_gen()->print_on(st); | |
1246 } | |
1247 | |
1248 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
1249 if (workers() != NULL) { | |
1250 workers()->threads_do(tc); | |
1251 } | |
1252 #ifndef SERIALGC | |
1253 if (UseConcMarkSweepGC) { | |
1254 ConcurrentMarkSweepThread::threads_do(tc); | |
1255 } | |
1256 #endif // SERIALGC | |
1257 } | |
1258 | |
1259 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const { | |
1260 #ifndef SERIALGC | |
1261 if (UseParNewGC) { | |
1262 workers()->print_worker_threads_on(st); | |
1263 } | |
1264 if (UseConcMarkSweepGC) { | |
1265 ConcurrentMarkSweepThread::print_all_on(st); | |
1266 } | |
1267 #endif // SERIALGC | |
1268 } | |
1269 | |
1270 void GenCollectedHeap::print_tracing_info() const { | |
1271 if (TraceGen0Time) { | |
1272 get_gen(0)->print_summary_info(); | |
1273 } | |
1274 if (TraceGen1Time) { | |
1275 get_gen(1)->print_summary_info(); | |
1276 } | |
1277 } | |
1278 | |
1279 void GenCollectedHeap::print_heap_change(size_t prev_used) const { | |
1280 if (PrintGCDetails && Verbose) { | |
1281 gclog_or_tty->print(" " SIZE_FORMAT | |
1282 "->" SIZE_FORMAT | |
1283 "(" SIZE_FORMAT ")", | |
1284 prev_used, used(), capacity()); | |
1285 } else { | |
1286 gclog_or_tty->print(" " SIZE_FORMAT "K" | |
1287 "->" SIZE_FORMAT "K" | |
1288 "(" SIZE_FORMAT "K)", | |
1289 prev_used / K, used() / K, capacity() / K); | |
1290 } | |
1291 } | |
1292 | |
1293 //New method to print perm gen info with PrintGCDetails flag | |
1294 void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const { | |
1295 gclog_or_tty->print(", [%s :", perm_gen()->short_name()); | |
1296 perm_gen()->print_heap_change(perm_prev_used); | |
1297 gclog_or_tty->print("]"); | |
1298 } | |
1299 | |
1300 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure { | |
1301 private: | |
1302 bool _full; | |
1303 public: | |
1304 void do_generation(Generation* gen) { | |
1305 gen->gc_prologue(_full); | |
1306 } | |
1307 GenGCPrologueClosure(bool full) : _full(full) {}; | |
1308 }; | |
1309 | |
1310 void GenCollectedHeap::gc_prologue(bool full) { | |
1311 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); | |
1312 | |
1313 always_do_update_barrier = false; | |
1314 // Fill TLAB's and such | |
1315 CollectedHeap::accumulate_statistics_all_tlabs(); | |
1316 ensure_parsability(true); // retire TLABs | |
1317 | |
1318 // Call allocation profiler | |
1319 AllocationProfiler::iterate_since_last_gc(); | |
1320 // Walk generations | |
1321 GenGCPrologueClosure blk(full); | |
1322 generation_iterate(&blk, false); // not old-to-young. | |
1323 perm_gen()->gc_prologue(full); | |
1324 }; | |
1325 | |
1326 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure { | |
1327 private: | |
1328 bool _full; | |
1329 public: | |
1330 void do_generation(Generation* gen) { | |
1331 gen->gc_epilogue(_full); | |
1332 } | |
1333 GenGCEpilogueClosure(bool full) : _full(full) {}; | |
1334 }; | |
1335 | |
1336 void GenCollectedHeap::gc_epilogue(bool full) { | |
1337 // Remember if a partial collection of the heap failed, and | |
1338 // we did a complete collection. | |
1339 if (full && incremental_collection_will_fail()) { | |
1340 set_last_incremental_collection_failed(); | |
1341 } else { | |
1342 clear_last_incremental_collection_failed(); | |
1343 } | |
1344 // Clear the flag, if set; the generation gc_epilogues will set the | |
1345 // flag again if the condition persists despite the collection. | |
1346 clear_incremental_collection_will_fail(); | |
1347 | |
1348 #ifdef COMPILER2 | |
1349 assert(DerivedPointerTable::is_empty(), "derived pointer present"); | |
1350 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr())); | |
1351 guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps"); | |
1352 #endif /* COMPILER2 */ | |
1353 | |
1354 resize_all_tlabs(); | |
1355 | |
1356 GenGCEpilogueClosure blk(full); | |
1357 generation_iterate(&blk, false); // not old-to-young. | |
1358 perm_gen()->gc_epilogue(full); | |
1359 | |
1360 always_do_update_barrier = UseConcMarkSweepGC; | |
1361 }; | |
1362 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1363 #ifndef PRODUCT |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1364 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1365 private: |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1366 public: |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1367 void do_generation(Generation* gen) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1368 gen->record_spaces_top(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1369 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1370 }; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1371 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1372 void GenCollectedHeap::record_gen_tops_before_GC() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1373 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1374 GenGCSaveTopsBeforeGCClosure blk; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1375 generation_iterate(&blk, false); // not old-to-young. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1376 perm_gen()->record_spaces_top(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1377 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1378 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1379 #endif // not PRODUCT |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1380 |
0 | 1381 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure { |
1382 public: | |
1383 void do_generation(Generation* gen) { | |
1384 gen->ensure_parsability(); | |
1385 } | |
1386 }; | |
1387 | |
1388 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) { | |
1389 CollectedHeap::ensure_parsability(retire_tlabs); | |
1390 GenEnsureParsabilityClosure ep_cl; | |
1391 generation_iterate(&ep_cl, false); | |
1392 perm_gen()->ensure_parsability(); | |
1393 } | |
1394 | |
1395 oop GenCollectedHeap::handle_failed_promotion(Generation* gen, | |
1396 oop obj, | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1397 size_t obj_size) { |
0 | 1398 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
1399 HeapWord* result = NULL; | |
1400 | |
1401 // First give each higher generation a chance to allocate the promoted object. | |
1402 Generation* allocator = next_gen(gen); | |
1403 if (allocator != NULL) { | |
1404 do { | |
1405 result = allocator->allocate(obj_size, false); | |
1406 } while (result == NULL && (allocator = next_gen(allocator)) != NULL); | |
1407 } | |
1408 | |
1409 if (result == NULL) { | |
1410 // Then give gen and higher generations a chance to expand and allocate the | |
1411 // object. | |
1412 do { | |
1413 result = gen->expand_and_allocate(obj_size, false); | |
1414 } while (result == NULL && (gen = next_gen(gen)) != NULL); | |
1415 } | |
1416 | |
1417 if (result != NULL) { | |
1418 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); | |
1419 } | |
1420 return oop(result); | |
1421 } | |
1422 | |
1423 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure { | |
1424 jlong _time; // in ms | |
1425 jlong _now; // in ms | |
1426 | |
1427 public: | |
1428 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { } | |
1429 | |
1430 jlong time() { return _time; } | |
1431 | |
1432 void do_generation(Generation* gen) { | |
1433 _time = MIN2(_time, gen->time_of_last_gc(_now)); | |
1434 } | |
1435 }; | |
1436 | |
1437 jlong GenCollectedHeap::millis_since_last_gc() { | |
1438 jlong now = os::javaTimeMillis(); | |
1439 GenTimeOfLastGCClosure tolgc_cl(now); | |
1440 // iterate over generations getting the oldest | |
1441 // time that a generation was collected | |
1442 generation_iterate(&tolgc_cl, false); | |
1443 tolgc_cl.do_generation(perm_gen()); | |
1444 // XXX Despite the assert above, since javaTimeMillis() | |
1445 // doesnot guarantee monotonically increasing return | |
1446 // values (note, i didn't say "strictly monotonic"), | |
1447 // we need to guard against getting back a time | |
1448 // later than now. This should be fixed by basing | |
1449 // on someting like gethrtime() which guarantees | |
1450 // monotonicity. Note that cond_wait() is susceptible | |
1451 // to a similar problem, because its interface is | |
1452 // based on absolute time in the form of the | |
1453 // system time's notion of UCT. See also 4506635 | |
1454 // for yet another problem of similar nature. XXX | |
1455 jlong retVal = now - tolgc_cl.time(); | |
1456 if (retVal < 0) { | |
1457 NOT_PRODUCT(warning("time warp: %d", retVal);) | |
1458 return 0; | |
1459 } | |
1460 return retVal; | |
1461 } |