Mercurial > hg > truffle
annotate src/share/vm/memory/genCollectedHeap.cpp @ 3772:6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
Summary: Perform a heap verification after the first phase of G1's full GC using objects' mark words to determine liveness. The third parameter of the heap verification routines, which was used in G1 to determine which marking bitmap to use in liveness calculations, has been changed from a boolean to an enum with values defined for using the mark word, and the 'prev' and 'next' bitmaps.
Reviewed-by: tonyp, ysr
author | johnc |
---|---|
date | Tue, 14 Jun 2011 11:01:10 -0700 |
parents | 2aa9ddbb9e60 |
children | c9ca3f51cf41 |
rev | line source |
---|---|
0 | 1 /* |
2426
1d1603768966
7010070: Update all 2010 Oracle-changed OpenJDK files to have the proper copyright dates - second pass
trims
parents:
2177
diff
changeset
|
2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1520
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1520
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1520
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "classfile/symbolTable.hpp" | |
27 #include "classfile/systemDictionary.hpp" | |
28 #include "classfile/vmSymbols.hpp" | |
29 #include "code/icBuffer.hpp" | |
30 #include "gc_implementation/shared/collectorCounters.hpp" | |
31 #include "gc_implementation/shared/vmGCOperations.hpp" | |
32 #include "gc_interface/collectedHeap.inline.hpp" | |
33 #include "memory/compactPermGen.hpp" | |
34 #include "memory/filemap.hpp" | |
35 #include "memory/gcLocker.inline.hpp" | |
36 #include "memory/genCollectedHeap.hpp" | |
37 #include "memory/genOopClosures.inline.hpp" | |
38 #include "memory/generation.inline.hpp" | |
39 #include "memory/generationSpec.hpp" | |
40 #include "memory/permGen.hpp" | |
41 #include "memory/resourceArea.hpp" | |
42 #include "memory/sharedHeap.hpp" | |
43 #include "memory/space.hpp" | |
44 #include "oops/oop.inline.hpp" | |
45 #include "oops/oop.inline2.hpp" | |
46 #include "runtime/aprofiler.hpp" | |
47 #include "runtime/biasedLocking.hpp" | |
48 #include "runtime/fprofiler.hpp" | |
49 #include "runtime/handles.hpp" | |
50 #include "runtime/handles.inline.hpp" | |
51 #include "runtime/java.hpp" | |
52 #include "runtime/vmThread.hpp" | |
53 #include "services/memoryService.hpp" | |
54 #include "utilities/vmError.hpp" | |
55 #include "utilities/workgroup.hpp" | |
56 #ifndef SERIALGC | |
57 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" | |
58 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp" | |
59 #endif | |
0 | 60 |
61 GenCollectedHeap* GenCollectedHeap::_gch; | |
62 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;) | |
63 | |
64 // The set of potentially parallel tasks in strong root scanning. | |
65 enum GCH_process_strong_roots_tasks { | |
66 // We probably want to parallelize both of these internally, but for now... | |
67 GCH_PS_younger_gens, | |
68 // Leave this one last. | |
69 GCH_PS_NumElements | |
70 }; | |
71 | |
72 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) : | |
73 SharedHeap(policy), | |
74 _gen_policy(policy), | |
75 _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)), | |
76 _full_collections_completed(0) | |
77 { | |
78 if (_gen_process_strong_tasks == NULL || | |
79 !_gen_process_strong_tasks->valid()) { | |
80 vm_exit_during_initialization("Failed necessary allocation."); | |
81 } | |
82 assert(policy != NULL, "Sanity check"); | |
83 _preloading_shared_classes = false; | |
84 } | |
85 | |
86 jint GenCollectedHeap::initialize() { | |
1166 | 87 CollectedHeap::pre_initialize(); |
88 | |
0 | 89 int i; |
90 _n_gens = gen_policy()->number_of_generations(); | |
91 | |
92 // While there are no constraints in the GC code that HeapWordSize | |
93 // be any particular value, there are multiple other areas in the | |
94 // system which believe this to be true (e.g. oop->object_size in some | |
95 // cases incorrectly returns the size in wordSize units rather than | |
96 // HeapWordSize). | |
97 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); | |
98 | |
99 // The heap must be at least as aligned as generations. | |
100 size_t alignment = Generation::GenGrain; | |
101 | |
102 _gen_specs = gen_policy()->generations(); | |
103 PermanentGenerationSpec *perm_gen_spec = | |
104 collector_policy()->permanent_generation(); | |
105 | |
106 // Make sure the sizes are all aligned. | |
107 for (i = 0; i < _n_gens; i++) { | |
108 _gen_specs[i]->align(alignment); | |
109 } | |
110 perm_gen_spec->align(alignment); | |
111 | |
112 // If we are dumping the heap, then allocate a wasted block of address | |
113 // space in order to push the heap to a lower address. This extra | |
114 // address range allows for other (or larger) libraries to be loaded | |
115 // without them occupying the space required for the shared spaces. | |
116 | |
117 if (DumpSharedSpaces) { | |
118 uintx reserved = 0; | |
119 uintx block_size = 64*1024*1024; | |
120 while (reserved < SharedDummyBlockSize) { | |
121 char* dummy = os::reserve_memory(block_size); | |
122 reserved += block_size; | |
123 } | |
124 } | |
125 | |
126 // Allocate space for the heap. | |
127 | |
128 char* heap_address; | |
129 size_t total_reserved = 0; | |
130 int n_covered_regions = 0; | |
131 ReservedSpace heap_rs(0); | |
132 | |
133 heap_address = allocate(alignment, perm_gen_spec, &total_reserved, | |
134 &n_covered_regions, &heap_rs); | |
135 | |
136 if (UseSharedSpaces) { | |
137 if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) { | |
138 if (heap_rs.is_reserved()) { | |
139 heap_rs.release(); | |
140 } | |
141 FileMapInfo* mapinfo = FileMapInfo::current_info(); | |
142 mapinfo->fail_continue("Unable to reserve shared region."); | |
143 allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions, | |
144 &heap_rs); | |
145 } | |
146 } | |
147 | |
148 if (!heap_rs.is_reserved()) { | |
149 vm_shutdown_during_initialization( | |
150 "Could not reserve enough space for object heap"); | |
151 return JNI_ENOMEM; | |
152 } | |
153 | |
154 _reserved = MemRegion((HeapWord*)heap_rs.base(), | |
155 (HeapWord*)(heap_rs.base() + heap_rs.size())); | |
156 | |
157 // It is important to do this in a way such that concurrent readers can't | |
158 // temporarily think somethings in the heap. (Seen this happen in asserts.) | |
159 _reserved.set_word_size(0); | |
160 _reserved.set_start((HeapWord*)heap_rs.base()); | |
161 size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size() | |
162 - perm_gen_spec->misc_code_size(); | |
163 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size)); | |
164 | |
165 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions); | |
166 set_barrier_set(rem_set()->bs()); | |
1166 | 167 |
0 | 168 _gch = this; |
169 | |
170 for (i = 0; i < _n_gens; i++) { | |
171 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), | |
172 UseSharedSpaces, UseSharedSpaces); | |
173 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set()); | |
174 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size()); | |
175 } | |
176 _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set()); | |
177 | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
178 clear_incremental_collection_failed(); |
0 | 179 |
180 #ifndef SERIALGC | |
181 // If we are running CMS, create the collector responsible | |
182 // for collecting the CMS generations. | |
183 if (collector_policy()->is_concurrent_mark_sweep_policy()) { | |
184 bool success = create_cms_collector(); | |
185 if (!success) return JNI_ENOMEM; | |
186 } | |
187 #endif // SERIALGC | |
188 | |
189 return JNI_OK; | |
190 } | |
191 | |
192 | |
193 char* GenCollectedHeap::allocate(size_t alignment, | |
194 PermanentGenerationSpec* perm_gen_spec, | |
195 size_t* _total_reserved, | |
196 int* _n_covered_regions, | |
197 ReservedSpace* heap_rs){ | |
198 const char overflow_msg[] = "The size of the object heap + VM data exceeds " | |
199 "the maximum representable size"; | |
200 | |
201 // Now figure out the total size. | |
202 size_t total_reserved = 0; | |
203 int n_covered_regions = 0; | |
204 const size_t pageSize = UseLargePages ? | |
205 os::large_page_size() : os::vm_page_size(); | |
206 | |
207 for (int i = 0; i < _n_gens; i++) { | |
208 total_reserved += _gen_specs[i]->max_size(); | |
209 if (total_reserved < _gen_specs[i]->max_size()) { | |
210 vm_exit_during_initialization(overflow_msg); | |
211 } | |
212 n_covered_regions += _gen_specs[i]->n_covered_regions(); | |
213 } | |
1626
1a11430e0326
6888573: class data sharing does not always disable large pages
jcoomes
parents:
1552
diff
changeset
|
214 assert(total_reserved % pageSize == 0, |
1a11430e0326
6888573: class data sharing does not always disable large pages
jcoomes
parents:
1552
diff
changeset
|
215 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize=" |
1a11430e0326
6888573: class data sharing does not always disable large pages
jcoomes
parents:
1552
diff
changeset
|
216 SIZE_FORMAT, total_reserved, pageSize)); |
0 | 217 total_reserved += perm_gen_spec->max_size(); |
1626
1a11430e0326
6888573: class data sharing does not always disable large pages
jcoomes
parents:
1552
diff
changeset
|
218 assert(total_reserved % pageSize == 0, |
1a11430e0326
6888573: class data sharing does not always disable large pages
jcoomes
parents:
1552
diff
changeset
|
219 err_msg("Perm size; total_reserved=" SIZE_FORMAT ", pageSize=" |
1a11430e0326
6888573: class data sharing does not always disable large pages
jcoomes
parents:
1552
diff
changeset
|
220 SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved, |
1a11430e0326
6888573: class data sharing does not always disable large pages
jcoomes
parents:
1552
diff
changeset
|
221 pageSize, perm_gen_spec->max_size())); |
0 | 222 |
223 if (total_reserved < perm_gen_spec->max_size()) { | |
224 vm_exit_during_initialization(overflow_msg); | |
225 } | |
226 n_covered_regions += perm_gen_spec->n_covered_regions(); | |
227 | |
228 // Add the size of the data area which shares the same reserved area | |
229 // as the heap, but which is not actually part of the heap. | |
230 size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size(); | |
231 | |
232 total_reserved += s; | |
233 if (total_reserved < s) { | |
234 vm_exit_during_initialization(overflow_msg); | |
235 } | |
236 | |
237 if (UseLargePages) { | |
238 assert(total_reserved != 0, "total_reserved cannot be 0"); | |
239 total_reserved = round_to(total_reserved, os::large_page_size()); | |
240 if (total_reserved < os::large_page_size()) { | |
241 vm_exit_during_initialization(overflow_msg); | |
242 } | |
243 } | |
244 | |
245 // Calculate the address at which the heap must reside in order for | |
246 // the shared data to be at the required address. | |
247 | |
248 char* heap_address; | |
249 if (UseSharedSpaces) { | |
250 | |
251 // Calculate the address of the first word beyond the heap. | |
252 FileMapInfo* mapinfo = FileMapInfo::current_info(); | |
253 int lr = CompactingPermGenGen::n_regions - 1; | |
254 size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment); | |
255 heap_address = mapinfo->region_base(lr) + capacity; | |
256 | |
257 // Calculate the address of the first word of the heap. | |
258 heap_address -= total_reserved; | |
259 } else { | |
260 heap_address = NULL; // any address will do. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
261 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
262 heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
263 *_total_reserved = total_reserved; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
264 *_n_covered_regions = n_covered_regions; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
265 *heap_rs = ReservedHeapSpace(total_reserved, alignment, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
266 UseLargePages, heap_address); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
267 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
268 if (heap_address != NULL && !heap_rs->is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
269 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
270 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
271 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
272 heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
273 *heap_rs = ReservedHeapSpace(total_reserved, alignment, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
274 UseLargePages, heap_address); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
275 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
276 if (heap_address != NULL && !heap_rs->is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
277 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
278 heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
279 assert(heap_address == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
280 *heap_rs = ReservedHeapSpace(total_reserved, alignment, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
281 UseLargePages, heap_address); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
282 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
283 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
284 return heap_address; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
615
diff
changeset
|
285 } |
0 | 286 } |
287 | |
288 *_total_reserved = total_reserved; | |
289 *_n_covered_regions = n_covered_regions; | |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
196
diff
changeset
|
290 *heap_rs = ReservedHeapSpace(total_reserved, alignment, |
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
196
diff
changeset
|
291 UseLargePages, heap_address); |
0 | 292 |
293 return heap_address; | |
294 } | |
295 | |
296 | |
297 void GenCollectedHeap::post_initialize() { | |
298 SharedHeap::post_initialize(); | |
299 TwoGenerationCollectorPolicy *policy = | |
300 (TwoGenerationCollectorPolicy *)collector_policy(); | |
301 guarantee(policy->is_two_generation_policy(), "Illegal policy type"); | |
302 DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0); | |
303 assert(def_new_gen->kind() == Generation::DefNew || | |
304 def_new_gen->kind() == Generation::ParNew || | |
305 def_new_gen->kind() == Generation::ASParNew, | |
306 "Wrong generation kind"); | |
307 | |
308 Generation* old_gen = get_gen(1); | |
309 assert(old_gen->kind() == Generation::ConcurrentMarkSweep || | |
310 old_gen->kind() == Generation::ASConcurrentMarkSweep || | |
311 old_gen->kind() == Generation::MarkSweepCompact, | |
312 "Wrong generation kind"); | |
313 | |
314 policy->initialize_size_policy(def_new_gen->eden()->capacity(), | |
315 old_gen->capacity(), | |
316 def_new_gen->from()->capacity()); | |
317 policy->initialize_gc_policy_counters(); | |
318 } | |
319 | |
320 void GenCollectedHeap::ref_processing_init() { | |
321 SharedHeap::ref_processing_init(); | |
322 for (int i = 0; i < _n_gens; i++) { | |
323 _gens[i]->ref_processor_init(); | |
324 } | |
325 } | |
326 | |
327 size_t GenCollectedHeap::capacity() const { | |
328 size_t res = 0; | |
329 for (int i = 0; i < _n_gens; i++) { | |
330 res += _gens[i]->capacity(); | |
331 } | |
332 return res; | |
333 } | |
334 | |
335 size_t GenCollectedHeap::used() const { | |
336 size_t res = 0; | |
337 for (int i = 0; i < _n_gens; i++) { | |
338 res += _gens[i]->used(); | |
339 } | |
340 return res; | |
341 } | |
342 | |
343 // Save the "used_region" for generations level and lower, | |
344 // and, if perm is true, for perm gen. | |
345 void GenCollectedHeap::save_used_regions(int level, bool perm) { | |
346 assert(level < _n_gens, "Illegal level parameter"); | |
347 for (int i = level; i >= 0; i--) { | |
348 _gens[i]->save_used_region(); | |
349 } | |
350 if (perm) { | |
351 perm_gen()->save_used_region(); | |
352 } | |
353 } | |
354 | |
355 size_t GenCollectedHeap::max_capacity() const { | |
356 size_t res = 0; | |
357 for (int i = 0; i < _n_gens; i++) { | |
358 res += _gens[i]->max_capacity(); | |
359 } | |
360 return res; | |
361 } | |
362 | |
363 // Update the _full_collections_completed counter | |
364 // at the end of a stop-world full GC. | |
365 unsigned int GenCollectedHeap::update_full_collections_completed() { | |
366 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); | |
367 assert(_full_collections_completed <= _total_full_collections, | |
368 "Can't complete more collections than were started"); | |
369 _full_collections_completed = _total_full_collections; | |
370 ml.notify_all(); | |
371 return _full_collections_completed; | |
372 } | |
373 | |
374 // Update the _full_collections_completed counter, as appropriate, | |
375 // at the end of a concurrent GC cycle. Note the conditional update | |
376 // below to allow this method to be called by a concurrent collector | |
377 // without synchronizing in any manner with the VM thread (which | |
378 // may already have initiated a STW full collection "concurrently"). | |
379 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) { | |
380 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); | |
381 assert((_full_collections_completed <= _total_full_collections) && | |
382 (count <= _total_full_collections), | |
383 "Can't complete more collections than were started"); | |
384 if (count > _full_collections_completed) { | |
385 _full_collections_completed = count; | |
386 ml.notify_all(); | |
387 } | |
388 return _full_collections_completed; | |
389 } | |
390 | |
391 | |
392 #ifndef PRODUCT | |
393 // Override of memory state checking method in CollectedHeap: | |
394 // Some collectors (CMS for example) can't have badHeapWordVal written | |
395 // in the first two words of an object. (For instance , in the case of | |
396 // CMS these words hold state used to synchronize between certain | |
397 // (concurrent) GC steps and direct allocating mutators.) | |
398 // The skip_header_HeapWords() method below, allows us to skip | |
399 // over the requisite number of HeapWord's. Note that (for | |
400 // generational collectors) this means that those many words are | |
401 // skipped in each object, irrespective of the generation in which | |
402 // that object lives. The resultant loss of precision seems to be | |
403 // harmless and the pain of avoiding that imprecision appears somewhat | |
404 // higher than we are prepared to pay for such rudimentary debugging | |
405 // support. | |
406 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, | |
407 size_t size) { | |
408 if (CheckMemoryInitialization && ZapUnusedHeapArea) { | |
409 // We are asked to check a size in HeapWords, | |
410 // but the memory is mangled in juint words. | |
411 juint* start = (juint*) (addr + skip_header_HeapWords()); | |
412 juint* end = (juint*) (addr + size); | |
413 for (juint* slot = start; slot < end; slot += 1) { | |
414 assert(*slot == badHeapWordVal, | |
415 "Found non badHeapWordValue in pre-allocation check"); | |
416 } | |
417 } | |
418 } | |
419 #endif | |
420 | |
421 HeapWord* GenCollectedHeap::attempt_allocation(size_t size, | |
422 bool is_tlab, | |
423 bool first_only) { | |
424 HeapWord* res; | |
425 for (int i = 0; i < _n_gens; i++) { | |
426 if (_gens[i]->should_allocate(size, is_tlab)) { | |
427 res = _gens[i]->allocate(size, is_tlab); | |
428 if (res != NULL) return res; | |
429 else if (first_only) break; | |
430 } | |
431 } | |
432 // Otherwise... | |
433 return NULL; | |
434 } | |
435 | |
436 HeapWord* GenCollectedHeap::mem_allocate(size_t size, | |
437 bool is_large_noref, | |
438 bool is_tlab, | |
439 bool* gc_overhead_limit_was_exceeded) { | |
440 return collector_policy()->mem_allocate_work(size, | |
441 is_tlab, | |
442 gc_overhead_limit_was_exceeded); | |
443 } | |
444 | |
445 bool GenCollectedHeap::must_clear_all_soft_refs() { | |
446 return _gc_cause == GCCause::_last_ditch_collection; | |
447 } | |
448 | |
449 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { | |
1520
bb843ebc7c55
6919638: CMS: ExplicitGCInvokesConcurrent misinteracts with gc locker
ysr
parents:
1387
diff
changeset
|
450 return UseConcMarkSweepGC && |
bb843ebc7c55
6919638: CMS: ExplicitGCInvokesConcurrent misinteracts with gc locker
ysr
parents:
1387
diff
changeset
|
451 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || |
bb843ebc7c55
6919638: CMS: ExplicitGCInvokesConcurrent misinteracts with gc locker
ysr
parents:
1387
diff
changeset
|
452 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); |
0 | 453 } |
454 | |
455 void GenCollectedHeap::do_collection(bool full, | |
456 bool clear_all_soft_refs, | |
457 size_t size, | |
458 bool is_tlab, | |
459 int max_level) { | |
460 bool prepared_for_verification = false; | |
461 ResourceMark rm; | |
462 DEBUG_ONLY(Thread* my_thread = Thread::current();) | |
463 | |
464 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
465 assert(my_thread->is_VM_thread() || | |
466 my_thread->is_ConcurrentGC_thread(), | |
467 "incorrect thread type capability"); | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1170
diff
changeset
|
468 assert(Heap_lock->is_locked(), |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1170
diff
changeset
|
469 "the requesting thread should have the Heap_lock"); |
0 | 470 guarantee(!is_gc_active(), "collection is not reentrant"); |
471 assert(max_level < n_gens(), "sanity check"); | |
472 | |
473 if (GC_locker::check_active_before_gc()) { | |
474 return; // GC is disabled (e.g. JNI GetXXXCritical operation) | |
475 } | |
476 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1170
diff
changeset
|
477 const bool do_clear_all_soft_refs = clear_all_soft_refs || |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1170
diff
changeset
|
478 collector_policy()->should_clear_all_soft_refs(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1170
diff
changeset
|
479 |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1170
diff
changeset
|
480 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1170
diff
changeset
|
481 |
0 | 482 const size_t perm_prev_used = perm_gen()->used(); |
483 | |
484 if (PrintHeapAtGC) { | |
485 Universe::print_heap_before_gc(); | |
486 if (Verbose) { | |
487 gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause())); | |
488 } | |
489 } | |
490 | |
491 { | |
492 FlagSetting fl(_is_gc_active, true); | |
493 | |
494 bool complete = full && (max_level == (n_gens()-1)); | |
495 const char* gc_cause_str = "GC "; | |
496 if (complete) { | |
497 GCCause::Cause cause = gc_cause(); | |
498 if (cause == GCCause::_java_lang_system_gc) { | |
499 gc_cause_str = "Full GC (System) "; | |
500 } else { | |
501 gc_cause_str = "Full GC "; | |
502 } | |
503 } | |
504 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
505 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
506 TraceTime t(gc_cause_str, PrintGCDetails, false, gclog_or_tty); | |
507 | |
508 gc_prologue(complete); | |
509 increment_total_collections(complete); | |
510 | |
511 size_t gch_prev_used = used(); | |
512 | |
513 int starting_level = 0; | |
514 if (full) { | |
515 // Search for the oldest generation which will collect all younger | |
516 // generations, and start collection loop there. | |
517 for (int i = max_level; i >= 0; i--) { | |
518 if (_gens[i]->full_collects_younger_generations()) { | |
519 starting_level = i; | |
520 break; | |
521 } | |
522 } | |
523 } | |
524 | |
525 bool must_restore_marks_for_biased_locking = false; | |
526 | |
527 int max_level_collected = starting_level; | |
528 for (int i = starting_level; i <= max_level; i++) { | |
529 if (_gens[i]->should_collect(full, size, is_tlab)) { | |
880
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
530 if (i == n_gens() - 1) { // a major collection is to happen |
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
531 if (!complete) { |
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
532 // The full_collections increment was missed above. |
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
533 increment_total_full_collections(); |
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
534 } |
615
c6c601a0f2d6
6797870: Add -XX:+{HeapDump,PrintClassHistogram}{Before,After}FullGC
ysr
parents:
546
diff
changeset
|
535 pre_full_gc_dump(); // do any pre full gc dumps |
880
2b4230d1e589
6862295: JDWP threadid changes during debugging session (leading to ingored breakpoints)
dcubed
parents:
457
diff
changeset
|
536 } |
0 | 537 // Timer for individual generations. Last argument is false: no CR |
538 TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty); | |
539 TraceCollectorStats tcs(_gens[i]->counters()); | |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
2426
diff
changeset
|
540 TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause()); |
0 | 541 |
542 size_t prev_used = _gens[i]->used(); | |
543 _gens[i]->stat_record()->invocations++; | |
544 _gens[i]->stat_record()->accumulated_time.start(); | |
545 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
546 // Must be done anew before each collection because |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
547 // a previous collection will do mangling and will |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
548 // change top of some spaces. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
549 record_gen_tops_before_GC(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
550 |
0 | 551 if (PrintGC && Verbose) { |
552 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, | |
553 i, | |
554 _gens[i]->stat_record()->invocations, | |
555 size*HeapWordSize); | |
556 } | |
557 | |
558 if (VerifyBeforeGC && i >= VerifyGCLevel && | |
559 total_collections() >= VerifyGCStartAt) { | |
560 HandleMark hm; // Discard invalid handles created during verification | |
561 if (!prepared_for_verification) { | |
562 prepare_for_verify(); | |
563 prepared_for_verification = true; | |
564 } | |
565 gclog_or_tty->print(" VerifyBeforeGC:"); | |
566 Universe::verify(true); | |
567 } | |
568 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
569 | |
570 if (!must_restore_marks_for_biased_locking && | |
571 _gens[i]->performs_in_place_marking()) { | |
572 // We perform this mark word preservation work lazily | |
573 // because it's only at this point that we know whether we | |
574 // absolutely have to do it; we want to avoid doing it for | |
575 // scavenge-only collections where it's unnecessary | |
576 must_restore_marks_for_biased_locking = true; | |
577 BiasedLocking::preserve_marks(); | |
578 } | |
579 | |
580 // Do collection work | |
581 { | |
582 // Note on ref discovery: For what appear to be historical reasons, | |
583 // GCH enables and disabled (by enqueing) refs discovery. | |
584 // In the future this should be moved into the generation's | |
585 // collect method so that ref discovery and enqueueing concerns | |
586 // are local to a generation. The collect method could return | |
587 // an appropriate indication in the case that notification on | |
588 // the ref lock was needed. This will make the treatment of | |
589 // weak refs more uniform (and indeed remove such concerns | |
590 // from GCH). XXX | |
591 | |
592 HandleMark hm; // Discard invalid handles created during gc | |
593 save_marks(); // save marks for all gens | |
594 // We want to discover references, but not process them yet. | |
595 // This mode is disabled in process_discovered_references if the | |
596 // generation does some collection work, or in | |
597 // enqueue_discovered_references if the generation returns | |
598 // without doing any work. | |
599 ReferenceProcessor* rp = _gens[i]->ref_processor(); | |
600 // If the discovery of ("weak") refs in this generation is | |
601 // atomic wrt other collectors in this configuration, we | |
602 // are guaranteed to have empty discovered ref lists. | |
603 if (rp->discovery_is_atomic()) { | |
604 rp->verify_no_references_recorded(); | |
605 rp->enable_discovery(); | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1170
diff
changeset
|
606 rp->setup_policy(do_clear_all_soft_refs); |
0 | 607 } else { |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
269
diff
changeset
|
608 // collect() below will enable discovery as appropriate |
0 | 609 } |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1170
diff
changeset
|
610 _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab); |
0 | 611 if (!rp->enqueuing_is_done()) { |
612 rp->enqueue_discovered_references(); | |
613 } else { | |
614 rp->set_enqueuing_is_done(false); | |
615 } | |
616 rp->verify_no_references_recorded(); | |
617 } | |
618 max_level_collected = i; | |
619 | |
620 // Determine if allocation request was met. | |
621 if (size > 0) { | |
622 if (!is_tlab || _gens[i]->supports_tlab_allocation()) { | |
623 if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) { | |
624 size = 0; | |
625 } | |
626 } | |
627 } | |
628 | |
629 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
630 | |
631 _gens[i]->stat_record()->accumulated_time.stop(); | |
632 | |
633 update_gc_stats(i, full); | |
634 | |
635 if (VerifyAfterGC && i >= VerifyGCLevel && | |
636 total_collections() >= VerifyGCStartAt) { | |
637 HandleMark hm; // Discard invalid handles created during verification | |
638 gclog_or_tty->print(" VerifyAfterGC:"); | |
639 Universe::verify(false); | |
640 } | |
641 | |
642 if (PrintGCDetails) { | |
643 gclog_or_tty->print(":"); | |
644 _gens[i]->print_heap_change(prev_used); | |
645 } | |
646 } | |
647 } | |
648 | |
649 // Update "complete" boolean wrt what actually transpired -- | |
650 // for instance, a promotion failure could have led to | |
651 // a whole heap collection. | |
652 complete = complete || (max_level_collected == n_gens() - 1); | |
653 | |
615
c6c601a0f2d6
6797870: Add -XX:+{HeapDump,PrintClassHistogram}{Before,After}FullGC
ysr
parents:
546
diff
changeset
|
654 if (complete) { // We did a "major" collection |
c6c601a0f2d6
6797870: Add -XX:+{HeapDump,PrintClassHistogram}{Before,After}FullGC
ysr
parents:
546
diff
changeset
|
655 post_full_gc_dump(); // do any post full gc dumps |
c6c601a0f2d6
6797870: Add -XX:+{HeapDump,PrintClassHistogram}{Before,After}FullGC
ysr
parents:
546
diff
changeset
|
656 } |
c6c601a0f2d6
6797870: Add -XX:+{HeapDump,PrintClassHistogram}{Before,After}FullGC
ysr
parents:
546
diff
changeset
|
657 |
0 | 658 if (PrintGCDetails) { |
659 print_heap_change(gch_prev_used); | |
660 | |
661 // Print perm gen info for full GC with PrintGCDetails flag. | |
662 if (complete) { | |
663 print_perm_heap_change(perm_prev_used); | |
664 } | |
665 } | |
666 | |
667 for (int j = max_level_collected; j >= 0; j -= 1) { | |
668 // Adjust generation sizes. | |
669 _gens[j]->compute_new_size(); | |
670 } | |
671 | |
672 if (complete) { | |
673 // Ask the permanent generation to adjust size for full collections | |
674 perm()->compute_new_size(); | |
675 update_full_collections_completed(); | |
676 } | |
677 | |
678 // Track memory usage and detect low memory after GC finishes | |
679 MemoryService::track_memory_usage(); | |
680 | |
681 gc_epilogue(complete); | |
682 | |
683 if (must_restore_marks_for_biased_locking) { | |
684 BiasedLocking::restore_marks(); | |
685 } | |
686 } | |
687 | |
688 AdaptiveSizePolicy* sp = gen_policy()->size_policy(); | |
689 AdaptiveSizePolicyOutput(sp, total_collections()); | |
690 | |
691 if (PrintHeapAtGC) { | |
692 Universe::print_heap_after_gc(); | |
693 } | |
694 | |
546
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
517
diff
changeset
|
695 #ifdef TRACESPINNING |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
517
diff
changeset
|
696 ParallelTaskTerminator::print_termination_counts(); |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
517
diff
changeset
|
697 #endif |
05c6d52fa7a9
6690928: Use spinning in combination with yields for workstealing termination.
jmasa
parents:
517
diff
changeset
|
698 |
0 | 699 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
700 tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); | |
701 vm_exit(-1); | |
702 } | |
703 } | |
704 | |
705 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) { | |
706 return collector_policy()->satisfy_failed_allocation(size, is_tlab); | |
707 } | |
708 | |
709 void GenCollectedHeap::set_par_threads(int t) { | |
710 SharedHeap::set_par_threads(t); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1681
diff
changeset
|
711 _gen_process_strong_tasks->set_n_threads(t); |
0 | 712 } |
713 | |
714 void GenCollectedHeap:: | |
715 gen_process_strong_roots(int level, | |
716 bool younger_gens_as_roots, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
717 bool activate_scope, |
0 | 718 bool collecting_perm_gen, |
719 SharedHeap::ScanningOption so, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
720 OopsInGenClosure* not_older_gens, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
721 bool do_code_roots, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
722 OopsInGenClosure* older_gens) { |
0 | 723 // General strong roots. |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
724 |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
725 if (!do_code_roots) { |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
726 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
727 not_older_gens, NULL, older_gens); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
728 } else { |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
729 bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active()); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
730 CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
731 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
732 not_older_gens, &code_roots, older_gens); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
733 } |
0 | 734 |
735 if (younger_gens_as_roots) { | |
736 if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { | |
737 for (int i = 0; i < level; i++) { | |
738 not_older_gens->set_generation(_gens[i]); | |
739 _gens[i]->oop_iterate(not_older_gens); | |
740 } | |
741 not_older_gens->reset_generation(); | |
742 } | |
743 } | |
744 // When collection is parallel, all threads get to cooperate to do | |
745 // older-gen scanning. | |
746 for (int i = level+1; i < _n_gens; i++) { | |
747 older_gens->set_generation(_gens[i]); | |
748 rem_set()->younger_refs_iterate(_gens[i], older_gens); | |
749 older_gens->reset_generation(); | |
750 } | |
751 | |
752 _gen_process_strong_tasks->all_tasks_completed(); | |
753 } | |
754 | |
755 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
756 CodeBlobClosure* code_roots, |
0 | 757 OopClosure* non_root_closure) { |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
881
diff
changeset
|
758 SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure); |
0 | 759 // "Local" "weak" refs |
760 for (int i = 0; i < _n_gens; i++) { | |
761 _gens[i]->ref_processor()->weak_oops_do(root_closure); | |
762 } | |
763 } | |
764 | |
765 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ | |
766 void GenCollectedHeap:: \ | |
767 oop_since_save_marks_iterate(int level, \ | |
768 OopClosureType* cur, \ | |
769 OopClosureType* older) { \ | |
770 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \ | |
771 for (int i = level+1; i < n_gens(); i++) { \ | |
772 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \ | |
773 } \ | |
774 perm_gen()->oop_since_save_marks_iterate##nv_suffix(older); \ | |
775 } | |
776 | |
777 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) | |
778 | |
779 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN | |
780 | |
781 bool GenCollectedHeap::no_allocs_since_save_marks(int level) { | |
782 for (int i = level; i < _n_gens; i++) { | |
783 if (!_gens[i]->no_allocs_since_save_marks()) return false; | |
784 } | |
785 return perm_gen()->no_allocs_since_save_marks(); | |
786 } | |
787 | |
788 bool GenCollectedHeap::supports_inline_contig_alloc() const { | |
789 return _gens[0]->supports_inline_contig_alloc(); | |
790 } | |
791 | |
792 HeapWord** GenCollectedHeap::top_addr() const { | |
793 return _gens[0]->top_addr(); | |
794 } | |
795 | |
796 HeapWord** GenCollectedHeap::end_addr() const { | |
797 return _gens[0]->end_addr(); | |
798 } | |
799 | |
800 size_t GenCollectedHeap::unsafe_max_alloc() { | |
801 return _gens[0]->unsafe_max_alloc_nogc(); | |
802 } | |
803 | |
804 // public collection interfaces | |
805 | |
806 void GenCollectedHeap::collect(GCCause::Cause cause) { | |
807 if (should_do_concurrent_full_gc(cause)) { | |
808 #ifndef SERIALGC | |
809 // mostly concurrent full collection | |
810 collect_mostly_concurrent(cause); | |
811 #else // SERIALGC | |
812 ShouldNotReachHere(); | |
813 #endif // SERIALGC | |
814 } else { | |
815 #ifdef ASSERT | |
816 if (cause == GCCause::_scavenge_alot) { | |
817 // minor collection only | |
818 collect(cause, 0); | |
819 } else { | |
820 // Stop-the-world full collection | |
821 collect(cause, n_gens() - 1); | |
822 } | |
823 #else | |
824 // Stop-the-world full collection | |
825 collect(cause, n_gens() - 1); | |
826 #endif | |
827 } | |
828 } | |
829 | |
830 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) { | |
831 // The caller doesn't have the Heap_lock | |
832 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | |
833 MutexLocker ml(Heap_lock); | |
834 collect_locked(cause, max_level); | |
835 } | |
836 | |
837 // This interface assumes that it's being called by the | |
838 // vm thread. It collects the heap assuming that the | |
839 // heap lock is already held and that we are executing in | |
840 // the context of the vm thread. | |
841 void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { | |
842 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
843 assert(Heap_lock->is_locked(), "Precondition#2"); | |
844 GCCauseSetter gcs(this, cause); | |
845 switch (cause) { | |
846 case GCCause::_heap_inspection: | |
847 case GCCause::_heap_dump: { | |
848 HandleMark hm; | |
849 do_full_collection(false, // don't clear all soft refs | |
850 n_gens() - 1); | |
851 break; | |
852 } | |
853 default: // XXX FIX ME | |
854 ShouldNotReachHere(); // Unexpected use of this function | |
855 } | |
856 } | |
857 | |
858 void GenCollectedHeap::collect_locked(GCCause::Cause cause) { | |
859 // The caller has the Heap_lock | |
860 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); | |
861 collect_locked(cause, n_gens() - 1); | |
862 } | |
863 | |
864 // this is the private collection interface | |
865 // The Heap_lock is expected to be held on entry. | |
866 | |
867 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { | |
868 if (_preloading_shared_classes) { | |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
1994
diff
changeset
|
869 report_out_of_shared_space(SharedPermGen); |
0 | 870 } |
871 // Read the GC count while holding the Heap_lock | |
872 unsigned int gc_count_before = total_collections(); | |
873 unsigned int full_gc_count_before = total_full_collections(); | |
874 { | |
875 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back | |
876 VM_GenCollectFull op(gc_count_before, full_gc_count_before, | |
877 cause, max_level); | |
878 VMThread::execute(&op); | |
879 } | |
880 } | |
881 | |
882 #ifndef SERIALGC | |
883 bool GenCollectedHeap::create_cms_collector() { | |
884 | |
885 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) || | |
886 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) && | |
887 _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep, | |
888 "Unexpected generation kinds"); | |
889 // Skip two header words in the block content verification | |
890 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) | |
891 CMSCollector* collector = new CMSCollector( | |
892 (ConcurrentMarkSweepGeneration*)_gens[1], | |
893 (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(), | |
894 _rem_set->as_CardTableRS(), | |
895 (ConcurrentMarkSweepPolicy*) collector_policy()); | |
896 | |
897 if (collector == NULL || !collector->completed_initialization()) { | |
898 if (collector) { | |
899 delete collector; // Be nice in embedded situation | |
900 } | |
901 vm_shutdown_during_initialization("Could not create CMS collector"); | |
902 return false; | |
903 } | |
904 return true; // success | |
905 } | |
906 | |
907 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) { | |
908 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock"); | |
909 | |
910 MutexLocker ml(Heap_lock); | |
911 // Read the GC counts while holding the Heap_lock | |
912 unsigned int full_gc_count_before = total_full_collections(); | |
913 unsigned int gc_count_before = total_collections(); | |
914 { | |
915 MutexUnlocker mu(Heap_lock); | |
916 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause); | |
917 VMThread::execute(&op); | |
918 } | |
919 } | |
920 #endif // SERIALGC | |
921 | |
922 | |
923 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, | |
924 int max_level) { | |
925 int local_max_level; | |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
926 if (!incremental_collection_will_fail(false /* don't consult_young */) && |
0 | 927 gc_cause() == GCCause::_gc_locker) { |
928 local_max_level = 0; | |
929 } else { | |
930 local_max_level = max_level; | |
931 } | |
932 | |
933 do_collection(true /* full */, | |
934 clear_all_soft_refs /* clear_all_soft_refs */, | |
935 0 /* size */, | |
936 false /* is_tlab */, | |
937 local_max_level /* max_level */); | |
938 // Hack XXX FIX ME !!! | |
939 // A scavenge may not have been attempted, or may have | |
940 // been attempted and failed, because the old gen was too full | |
941 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && | |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
942 incremental_collection_will_fail(false /* don't consult_young */)) { |
0 | 943 if (PrintGCDetails) { |
944 gclog_or_tty->print_cr("GC locker: Trying a full collection " | |
945 "because scavenge failed"); | |
946 } | |
947 // This time allow the old gen to be collected as well | |
948 do_collection(true /* full */, | |
949 clear_all_soft_refs /* clear_all_soft_refs */, | |
950 0 /* size */, | |
951 false /* is_tlab */, | |
952 n_gens() - 1 /* max_level */); | |
953 } | |
954 } | |
955 | |
3377
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
956 bool GenCollectedHeap::is_in_young(oop p) { |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
957 bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start(); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
958 assert(result == _gens[0]->is_in_reserved(p), |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
959 err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p)); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
960 return result; |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
961 } |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
962 |
0 | 963 // Returns "TRUE" iff "p" points into the allocated area of the heap. |
964 bool GenCollectedHeap::is_in(const void* p) const { | |
965 #ifndef ASSERT | |
966 guarantee(VerifyBeforeGC || | |
967 VerifyDuringGC || | |
968 VerifyBeforeExit || | |
1155
4e6abf09f540
6912062: disassembler plugin needs to produce symbolic information in product mode
jrose
parents:
989
diff
changeset
|
969 PrintAssembly || |
4e6abf09f540
6912062: disassembler plugin needs to produce symbolic information in product mode
jrose
parents:
989
diff
changeset
|
970 tty->count() != 0 || // already printing |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1626
diff
changeset
|
971 VerifyAfterGC || |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1626
diff
changeset
|
972 VMError::fatal_error_in_progress(), "too expensive"); |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1626
diff
changeset
|
973 |
0 | 974 #endif |
975 // This might be sped up with a cache of the last generation that | |
976 // answered yes. | |
977 for (int i = 0; i < _n_gens; i++) { | |
978 if (_gens[i]->is_in(p)) return true; | |
979 } | |
980 if (_perm_gen->as_gen()->is_in(p)) return true; | |
981 // Otherwise... | |
982 return false; | |
983 } | |
984 | |
3377
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
985 #ifdef ASSERT |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
986 // Don't implement this by using is_in_young(). This method is used |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
987 // in some cases to check that is_in_young() is correct. |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
988 bool GenCollectedHeap::is_in_partial_collection(const void* p) { |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
989 assert(is_in_reserved(p) || p == NULL, |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
990 "Does not work if address is non-null and outside of the heap"); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
991 // The order of the generations is young (low addr), old, perm (high addr) |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
992 return p < _gens[_n_gens - 2]->reserved().end() && p != NULL; |
0 | 993 } |
3377
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3356
diff
changeset
|
994 #endif |
0 | 995 |
996 void GenCollectedHeap::oop_iterate(OopClosure* cl) { | |
997 for (int i = 0; i < _n_gens; i++) { | |
998 _gens[i]->oop_iterate(cl); | |
999 } | |
1000 } | |
1001 | |
1002 void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) { | |
1003 for (int i = 0; i < _n_gens; i++) { | |
1004 _gens[i]->oop_iterate(mr, cl); | |
1005 } | |
1006 } | |
1007 | |
1008 void GenCollectedHeap::object_iterate(ObjectClosure* cl) { | |
1009 for (int i = 0; i < _n_gens; i++) { | |
1010 _gens[i]->object_iterate(cl); | |
1011 } | |
1012 perm_gen()->object_iterate(cl); | |
1013 } | |
1014 | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
1015 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
1016 for (int i = 0; i < _n_gens; i++) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
1017 _gens[i]->safe_object_iterate(cl); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
1018 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
1019 perm_gen()->safe_object_iterate(cl); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
1020 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
1021 |
0 | 1022 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { |
1023 for (int i = 0; i < _n_gens; i++) { | |
1024 _gens[i]->object_iterate_since_last_GC(cl); | |
1025 } | |
1026 } | |
1027 | |
1028 Space* GenCollectedHeap::space_containing(const void* addr) const { | |
1029 for (int i = 0; i < _n_gens; i++) { | |
1030 Space* res = _gens[i]->space_containing(addr); | |
1031 if (res != NULL) return res; | |
1032 } | |
1033 Space* res = perm_gen()->space_containing(addr); | |
1034 if (res != NULL) return res; | |
1035 // Otherwise... | |
1036 assert(false, "Could not find containing space"); | |
1037 return NULL; | |
1038 } | |
1039 | |
1040 | |
1041 HeapWord* GenCollectedHeap::block_start(const void* addr) const { | |
1042 assert(is_in_reserved(addr), "block_start of address outside of heap"); | |
1043 for (int i = 0; i < _n_gens; i++) { | |
1044 if (_gens[i]->is_in_reserved(addr)) { | |
1045 assert(_gens[i]->is_in(addr), | |
1046 "addr should be in allocated part of generation"); | |
1047 return _gens[i]->block_start(addr); | |
1048 } | |
1049 } | |
1050 if (perm_gen()->is_in_reserved(addr)) { | |
1051 assert(perm_gen()->is_in(addr), | |
1052 "addr should be in allocated part of perm gen"); | |
1053 return perm_gen()->block_start(addr); | |
1054 } | |
1055 assert(false, "Some generation should contain the address"); | |
1056 return NULL; | |
1057 } | |
1058 | |
1059 size_t GenCollectedHeap::block_size(const HeapWord* addr) const { | |
1060 assert(is_in_reserved(addr), "block_size of address outside of heap"); | |
1061 for (int i = 0; i < _n_gens; i++) { | |
1062 if (_gens[i]->is_in_reserved(addr)) { | |
1063 assert(_gens[i]->is_in(addr), | |
1064 "addr should be in allocated part of generation"); | |
1065 return _gens[i]->block_size(addr); | |
1066 } | |
1067 } | |
1068 if (perm_gen()->is_in_reserved(addr)) { | |
1069 assert(perm_gen()->is_in(addr), | |
1070 "addr should be in allocated part of perm gen"); | |
1071 return perm_gen()->block_size(addr); | |
1072 } | |
1073 assert(false, "Some generation should contain the address"); | |
1074 return 0; | |
1075 } | |
1076 | |
1077 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { | |
1078 assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); | |
1079 assert(block_start(addr) == addr, "addr must be a block start"); | |
1080 for (int i = 0; i < _n_gens; i++) { | |
1081 if (_gens[i]->is_in_reserved(addr)) { | |
1082 return _gens[i]->block_is_obj(addr); | |
1083 } | |
1084 } | |
1085 if (perm_gen()->is_in_reserved(addr)) { | |
1086 return perm_gen()->block_is_obj(addr); | |
1087 } | |
1088 assert(false, "Some generation should contain the address"); | |
1089 return false; | |
1090 } | |
1091 | |
1092 bool GenCollectedHeap::supports_tlab_allocation() const { | |
1093 for (int i = 0; i < _n_gens; i += 1) { | |
1094 if (_gens[i]->supports_tlab_allocation()) { | |
1095 return true; | |
1096 } | |
1097 } | |
1098 return false; | |
1099 } | |
1100 | |
1101 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { | |
1102 size_t result = 0; | |
1103 for (int i = 0; i < _n_gens; i += 1) { | |
1104 if (_gens[i]->supports_tlab_allocation()) { | |
1105 result += _gens[i]->tlab_capacity(); | |
1106 } | |
1107 } | |
1108 return result; | |
1109 } | |
1110 | |
1111 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { | |
1112 size_t result = 0; | |
1113 for (int i = 0; i < _n_gens; i += 1) { | |
1114 if (_gens[i]->supports_tlab_allocation()) { | |
1115 result += _gens[i]->unsafe_max_tlab_alloc(); | |
1116 } | |
1117 } | |
1118 return result; | |
1119 } | |
1120 | |
1121 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { | |
1122 bool gc_overhead_limit_was_exceeded; | |
1123 HeapWord* result = mem_allocate(size /* size */, | |
1124 false /* is_large_noref */, | |
1125 true /* is_tlab */, | |
1126 &gc_overhead_limit_was_exceeded); | |
1127 return result; | |
1128 } | |
1129 | |
1130 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size | |
1131 // from the list headed by "*prev_ptr". | |
1132 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) { | |
1133 bool first = true; | |
1134 size_t min_size = 0; // "first" makes this conceptually infinite. | |
1135 ScratchBlock **smallest_ptr, *smallest; | |
1136 ScratchBlock *cur = *prev_ptr; | |
1137 while (cur) { | |
1138 assert(*prev_ptr == cur, "just checking"); | |
1139 if (first || cur->num_words < min_size) { | |
1140 smallest_ptr = prev_ptr; | |
1141 smallest = cur; | |
1142 min_size = smallest->num_words; | |
1143 first = false; | |
1144 } | |
1145 prev_ptr = &cur->next; | |
1146 cur = cur->next; | |
1147 } | |
1148 smallest = *smallest_ptr; | |
1149 *smallest_ptr = smallest->next; | |
1150 return smallest; | |
1151 } | |
1152 | |
1153 // Sort the scratch block list headed by res into decreasing size order, | |
1154 // and set "res" to the result. | |
1155 static void sort_scratch_list(ScratchBlock*& list) { | |
1156 ScratchBlock* sorted = NULL; | |
1157 ScratchBlock* unsorted = list; | |
1158 while (unsorted) { | |
1159 ScratchBlock *smallest = removeSmallestScratch(&unsorted); | |
1160 smallest->next = sorted; | |
1161 sorted = smallest; | |
1162 } | |
1163 list = sorted; | |
1164 } | |
1165 | |
1166 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, | |
1167 size_t max_alloc_words) { | |
1168 ScratchBlock* res = NULL; | |
1169 for (int i = 0; i < _n_gens; i++) { | |
1170 _gens[i]->contribute_scratch(res, requestor, max_alloc_words); | |
1171 } | |
1172 sort_scratch_list(res); | |
1173 return res; | |
1174 } | |
1175 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1176 void GenCollectedHeap::release_scratch() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1177 for (int i = 0; i < _n_gens; i++) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1178 _gens[i]->reset_scratch(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1179 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1180 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1181 |
0 | 1182 size_t GenCollectedHeap::large_typearray_limit() { |
1183 return gen_policy()->large_typearray_limit(); | |
1184 } | |
1185 | |
1186 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { | |
1187 void do_generation(Generation* gen) { | |
1188 gen->prepare_for_verify(); | |
1189 } | |
1190 }; | |
1191 | |
1192 void GenCollectedHeap::prepare_for_verify() { | |
1193 ensure_parsability(false); // no need to retire TLABs | |
1194 GenPrepareForVerifyClosure blk; | |
1195 generation_iterate(&blk, false); | |
1196 perm_gen()->prepare_for_verify(); | |
1197 } | |
1198 | |
1199 | |
1200 void GenCollectedHeap::generation_iterate(GenClosure* cl, | |
1201 bool old_to_young) { | |
1202 if (old_to_young) { | |
1203 for (int i = _n_gens-1; i >= 0; i--) { | |
1204 cl->do_generation(_gens[i]); | |
1205 } | |
1206 } else { | |
1207 for (int i = 0; i < _n_gens; i++) { | |
1208 cl->do_generation(_gens[i]); | |
1209 } | |
1210 } | |
1211 } | |
1212 | |
1213 void GenCollectedHeap::space_iterate(SpaceClosure* cl) { | |
1214 for (int i = 0; i < _n_gens; i++) { | |
1215 _gens[i]->space_iterate(cl, true); | |
1216 } | |
1217 perm_gen()->space_iterate(cl, true); | |
1218 } | |
1219 | |
1220 bool GenCollectedHeap::is_maximal_no_gc() const { | |
1221 for (int i = 0; i < _n_gens; i++) { // skip perm gen | |
1222 if (!_gens[i]->is_maximal_no_gc()) { | |
1223 return false; | |
1224 } | |
1225 } | |
1226 return true; | |
1227 } | |
1228 | |
1229 void GenCollectedHeap::save_marks() { | |
1230 for (int i = 0; i < _n_gens; i++) { | |
1231 _gens[i]->save_marks(); | |
1232 } | |
1233 perm_gen()->save_marks(); | |
1234 } | |
1235 | |
1236 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) { | |
1237 for (int i = 0; i <= collectedGen; i++) { | |
1238 _gens[i]->compute_new_size(); | |
1239 } | |
1240 } | |
1241 | |
1242 GenCollectedHeap* GenCollectedHeap::heap() { | |
1243 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); | |
1244 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); | |
1245 return _gch; | |
1246 } | |
1247 | |
1248 | |
1249 void GenCollectedHeap::prepare_for_compaction() { | |
1250 Generation* scanning_gen = _gens[_n_gens-1]; | |
1251 // Start by compacting into same gen. | |
1252 CompactPoint cp(scanning_gen, NULL, NULL); | |
1253 while (scanning_gen != NULL) { | |
1254 scanning_gen->prepare_for_compaction(&cp); | |
1255 scanning_gen = prev_gen(scanning_gen); | |
1256 } | |
1257 } | |
1258 | |
1259 GCStats* GenCollectedHeap::gc_stats(int level) const { | |
1260 return _gens[level]->gc_stats(); | |
1261 } | |
1262 | |
3772
6747fd0512e0
7004681: G1: Extend marking verification to Full GCs
johnc
parents:
3377
diff
changeset
|
1263 void GenCollectedHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) { |
0 | 1264 if (!silent) { |
1265 gclog_or_tty->print("permgen "); | |
1266 } | |
1267 perm_gen()->verify(allow_dirty); | |
1268 for (int i = _n_gens-1; i >= 0; i--) { | |
1269 Generation* g = _gens[i]; | |
1270 if (!silent) { | |
1271 gclog_or_tty->print(g->name()); | |
1272 gclog_or_tty->print(" "); | |
1273 } | |
1274 g->verify(allow_dirty); | |
1275 } | |
1276 if (!silent) { | |
1277 gclog_or_tty->print("remset "); | |
1278 } | |
1279 rem_set()->verify(); | |
1280 if (!silent) { | |
1281 gclog_or_tty->print("ref_proc "); | |
1282 } | |
1283 ReferenceProcessor::verify(); | |
1284 } | |
1285 | |
1286 void GenCollectedHeap::print() const { print_on(tty); } | |
1287 void GenCollectedHeap::print_on(outputStream* st) const { | |
1288 for (int i = 0; i < _n_gens; i++) { | |
1289 _gens[i]->print_on(st); | |
1290 } | |
1291 perm_gen()->print_on(st); | |
1292 } | |
1293 | |
1294 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const { | |
1295 if (workers() != NULL) { | |
1296 workers()->threads_do(tc); | |
1297 } | |
1298 #ifndef SERIALGC | |
1299 if (UseConcMarkSweepGC) { | |
1300 ConcurrentMarkSweepThread::threads_do(tc); | |
1301 } | |
1302 #endif // SERIALGC | |
1303 } | |
1304 | |
1305 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const { | |
1306 #ifndef SERIALGC | |
1307 if (UseParNewGC) { | |
1308 workers()->print_worker_threads_on(st); | |
1309 } | |
1310 if (UseConcMarkSweepGC) { | |
1311 ConcurrentMarkSweepThread::print_all_on(st); | |
1312 } | |
1313 #endif // SERIALGC | |
1314 } | |
1315 | |
1316 void GenCollectedHeap::print_tracing_info() const { | |
1317 if (TraceGen0Time) { | |
1318 get_gen(0)->print_summary_info(); | |
1319 } | |
1320 if (TraceGen1Time) { | |
1321 get_gen(1)->print_summary_info(); | |
1322 } | |
1323 } | |
1324 | |
1325 void GenCollectedHeap::print_heap_change(size_t prev_used) const { | |
1326 if (PrintGCDetails && Verbose) { | |
1327 gclog_or_tty->print(" " SIZE_FORMAT | |
1328 "->" SIZE_FORMAT | |
1329 "(" SIZE_FORMAT ")", | |
1330 prev_used, used(), capacity()); | |
1331 } else { | |
1332 gclog_or_tty->print(" " SIZE_FORMAT "K" | |
1333 "->" SIZE_FORMAT "K" | |
1334 "(" SIZE_FORMAT "K)", | |
1335 prev_used / K, used() / K, capacity() / K); | |
1336 } | |
1337 } | |
1338 | |
1339 //New method to print perm gen info with PrintGCDetails flag | |
1340 void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const { | |
1341 gclog_or_tty->print(", [%s :", perm_gen()->short_name()); | |
1342 perm_gen()->print_heap_change(perm_prev_used); | |
1343 gclog_or_tty->print("]"); | |
1344 } | |
1345 | |
1346 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure { | |
1347 private: | |
1348 bool _full; | |
1349 public: | |
1350 void do_generation(Generation* gen) { | |
1351 gen->gc_prologue(_full); | |
1352 } | |
1353 GenGCPrologueClosure(bool full) : _full(full) {}; | |
1354 }; | |
1355 | |
1356 void GenCollectedHeap::gc_prologue(bool full) { | |
1357 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); | |
1358 | |
1359 always_do_update_barrier = false; | |
1360 // Fill TLAB's and such | |
1361 CollectedHeap::accumulate_statistics_all_tlabs(); | |
1362 ensure_parsability(true); // retire TLABs | |
1363 | |
1364 // Call allocation profiler | |
1365 AllocationProfiler::iterate_since_last_gc(); | |
1366 // Walk generations | |
1367 GenGCPrologueClosure blk(full); | |
1368 generation_iterate(&blk, false); // not old-to-young. | |
1369 perm_gen()->gc_prologue(full); | |
1370 }; | |
1371 | |
1372 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure { | |
1373 private: | |
1374 bool _full; | |
1375 public: | |
1376 void do_generation(Generation* gen) { | |
1377 gen->gc_epilogue(_full); | |
1378 } | |
1379 GenGCEpilogueClosure(bool full) : _full(full) {}; | |
1380 }; | |
1381 | |
1382 void GenCollectedHeap::gc_epilogue(bool full) { | |
1383 #ifdef COMPILER2 | |
1384 assert(DerivedPointerTable::is_empty(), "derived pointer present"); | |
1385 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr())); | |
1386 guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps"); | |
1387 #endif /* COMPILER2 */ | |
1388 | |
1389 resize_all_tlabs(); | |
1390 | |
1391 GenGCEpilogueClosure blk(full); | |
1392 generation_iterate(&blk, false); // not old-to-young. | |
1393 perm_gen()->gc_epilogue(full); | |
1394 | |
1395 always_do_update_barrier = UseConcMarkSweepGC; | |
1396 }; | |
1397 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1398 #ifndef PRODUCT |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1399 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1400 private: |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1401 public: |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1402 void do_generation(Generation* gen) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1403 gen->record_spaces_top(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1404 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1405 }; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1406 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1407 void GenCollectedHeap::record_gen_tops_before_GC() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1408 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1409 GenGCSaveTopsBeforeGCClosure blk; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1410 generation_iterate(&blk, false); // not old-to-young. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1411 perm_gen()->record_spaces_top(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1412 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1413 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1414 #endif // not PRODUCT |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1415 |
0 | 1416 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure { |
1417 public: | |
1418 void do_generation(Generation* gen) { | |
1419 gen->ensure_parsability(); | |
1420 } | |
1421 }; | |
1422 | |
1423 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) { | |
1424 CollectedHeap::ensure_parsability(retire_tlabs); | |
1425 GenEnsureParsabilityClosure ep_cl; | |
1426 generation_iterate(&ep_cl, false); | |
1427 perm_gen()->ensure_parsability(); | |
1428 } | |
1429 | |
1430 oop GenCollectedHeap::handle_failed_promotion(Generation* gen, | |
1431 oop obj, | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1432 size_t obj_size) { |
0 | 1433 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
1434 HeapWord* result = NULL; | |
1435 | |
1436 // First give each higher generation a chance to allocate the promoted object. | |
1437 Generation* allocator = next_gen(gen); | |
1438 if (allocator != NULL) { | |
1439 do { | |
1440 result = allocator->allocate(obj_size, false); | |
1441 } while (result == NULL && (allocator = next_gen(allocator)) != NULL); | |
1442 } | |
1443 | |
1444 if (result == NULL) { | |
1445 // Then give gen and higher generations a chance to expand and allocate the | |
1446 // object. | |
1447 do { | |
1448 result = gen->expand_and_allocate(obj_size, false); | |
1449 } while (result == NULL && (gen = next_gen(gen)) != NULL); | |
1450 } | |
1451 | |
1452 if (result != NULL) { | |
1453 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); | |
1454 } | |
1455 return oop(result); | |
1456 } | |
1457 | |
1458 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure { | |
1459 jlong _time; // in ms | |
1460 jlong _now; // in ms | |
1461 | |
1462 public: | |
1463 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { } | |
1464 | |
1465 jlong time() { return _time; } | |
1466 | |
1467 void do_generation(Generation* gen) { | |
1468 _time = MIN2(_time, gen->time_of_last_gc(_now)); | |
1469 } | |
1470 }; | |
1471 | |
1472 jlong GenCollectedHeap::millis_since_last_gc() { | |
1473 jlong now = os::javaTimeMillis(); | |
1474 GenTimeOfLastGCClosure tolgc_cl(now); | |
1475 // iterate over generations getting the oldest | |
1476 // time that a generation was collected | |
1477 generation_iterate(&tolgc_cl, false); | |
1478 tolgc_cl.do_generation(perm_gen()); | |
1479 // XXX Despite the assert above, since javaTimeMillis() | |
1480 // doesnot guarantee monotonically increasing return | |
1481 // values (note, i didn't say "strictly monotonic"), | |
1482 // we need to guard against getting back a time | |
1483 // later than now. This should be fixed by basing | |
1484 // on someting like gethrtime() which guarantees | |
1485 // monotonicity. Note that cond_wait() is susceptible | |
1486 // to a similar problem, because its interface is | |
1487 // based on absolute time in the form of the | |
1488 // system time's notion of UCT. See also 4506635 | |
1489 // for yet another problem of similar nature. XXX | |
1490 jlong retVal = now - tolgc_cl.time(); | |
1491 if (retVal < 0) { | |
1492 NOT_PRODUCT(warning("time warp: %d", retVal);) | |
1493 return 0; | |
1494 } | |
1495 return retVal; | |
1496 } |