Mercurial > hg > graal-compiler
comparison src/share/vm/memory/genCollectedHeap.hpp @ 6725:da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author | coleenp |
---|---|
date | Sat, 01 Sep 2012 13:25:18 -0400 |
parents | b632e80fc9dc |
children | 7b835924c31c |
comparison
equal
deleted
inserted
replaced
6724:36d1d483d5d6 | 6725:da91efe96a93 |
---|---|
41 friend class TenuredGeneration; | 41 friend class TenuredGeneration; |
42 friend class ConcurrentMarkSweepGeneration; | 42 friend class ConcurrentMarkSweepGeneration; |
43 friend class CMSCollector; | 43 friend class CMSCollector; |
44 friend class GenMarkSweep; | 44 friend class GenMarkSweep; |
45 friend class VM_GenCollectForAllocation; | 45 friend class VM_GenCollectForAllocation; |
46 friend class VM_GenCollectForPermanentAllocation; | |
47 friend class VM_GenCollectFull; | 46 friend class VM_GenCollectFull; |
48 friend class VM_GenCollectFullConcurrent; | 47 friend class VM_GenCollectFullConcurrent; |
49 friend class VM_GC_HeapInspection; | 48 friend class VM_GC_HeapInspection; |
50 friend class VM_HeapDumper; | 49 friend class VM_HeapDumper; |
51 friend class HeapInspection; | 50 friend class HeapInspection; |
84 SubTasksDone* gen_process_strong_tasks() { return _gen_process_strong_tasks; } | 83 SubTasksDone* gen_process_strong_tasks() { return _gen_process_strong_tasks; } |
85 | 84 |
86 // In block contents verification, the number of header words to skip | 85 // In block contents verification, the number of header words to skip |
87 NOT_PRODUCT(static size_t _skip_header_HeapWords;) | 86 NOT_PRODUCT(static size_t _skip_header_HeapWords;) |
88 | 87 |
89 // GC is not allowed during the dump of the shared classes. Keep track | |
90 // of this in order to provide an reasonable error message when terminating. | |
91 bool _preloading_shared_classes; | |
92 | |
93 protected: | 88 protected: |
94 // Directs each generation up to and including "collectedGen" to recompute | 89 // Directs each generation up to and including "collectedGen" to recompute |
95 // its desired size. | 90 // its desired size. |
96 void compute_new_generation_sizes(int collectedGen); | 91 void compute_new_generation_sizes(int collectedGen); |
97 | 92 |
114 // have handled it (including collection, expansion, etc.) | 109 // have handled it (including collection, expansion, etc.) |
115 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab); | 110 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab); |
116 | 111 |
117 // Callback from VM_GenCollectFull operation. | 112 // Callback from VM_GenCollectFull operation. |
118 // Perform a full collection of the first max_level+1 generations. | 113 // Perform a full collection of the first max_level+1 generations. |
114 virtual void do_full_collection(bool clear_all_soft_refs); | |
119 void do_full_collection(bool clear_all_soft_refs, int max_level); | 115 void do_full_collection(bool clear_all_soft_refs, int max_level); |
120 | 116 |
121 // Does the "cause" of GC indicate that | 117 // Does the "cause" of GC indicate that |
122 // we absolutely __must__ clear soft refs? | 118 // we absolutely __must__ clear soft refs? |
123 bool must_clear_all_soft_refs(); | 119 bool must_clear_all_soft_refs(); |
127 | 123 |
128 GCStats* gc_stats(int level) const; | 124 GCStats* gc_stats(int level) const; |
129 | 125 |
130 // Returns JNI_OK on success | 126 // Returns JNI_OK on success |
131 virtual jint initialize(); | 127 virtual jint initialize(); |
132 char* allocate(size_t alignment, PermanentGenerationSpec* perm_gen_spec, | 128 char* allocate(size_t alignment, |
133 size_t* _total_reserved, int* _n_covered_regions, | 129 size_t* _total_reserved, int* _n_covered_regions, |
134 ReservedSpace* heap_rs); | 130 ReservedSpace* heap_rs); |
135 | 131 |
136 // Does operations required after initialization has been done. | 132 // Does operations required after initialization has been done. |
137 void post_initialize(); | 133 void post_initialize(); |
143 return CollectedHeap::GenCollectedHeap; | 139 return CollectedHeap::GenCollectedHeap; |
144 } | 140 } |
145 | 141 |
146 // The generational collector policy. | 142 // The generational collector policy. |
147 GenCollectorPolicy* gen_policy() const { return _gen_policy; } | 143 GenCollectorPolicy* gen_policy() const { return _gen_policy; } |
144 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); } | |
148 | 145 |
149 // Adaptive size policy | 146 // Adaptive size policy |
150 virtual AdaptiveSizePolicy* size_policy() { | 147 virtual AdaptiveSizePolicy* size_policy() { |
151 return gen_policy()->size_policy(); | 148 return gen_policy()->size_policy(); |
152 } | 149 } |
153 | 150 |
154 size_t capacity() const; | 151 size_t capacity() const; |
155 size_t used() const; | 152 size_t used() const; |
156 | 153 |
157 // Save the "used_region" for generations level and lower, | 154 // Save the "used_region" for generations level and lower. |
158 // and, if perm is true, for perm gen. | 155 void save_used_regions(int level); |
159 void save_used_regions(int level, bool perm); | |
160 | 156 |
161 size_t max_capacity() const; | 157 size_t max_capacity() const; |
162 | 158 |
163 HeapWord* mem_allocate(size_t size, | 159 HeapWord* mem_allocate(size_t size, |
164 bool* gc_overhead_limit_was_exceeded); | 160 bool* gc_overhead_limit_was_exceeded); |
183 // Perform a full collection of the heap; intended for use in implementing | 179 // Perform a full collection of the heap; intended for use in implementing |
184 // "System.gc". This implies as full a collection as the CollectedHeap | 180 // "System.gc". This implies as full a collection as the CollectedHeap |
185 // supports. Caller does not hold the Heap_lock on entry. | 181 // supports. Caller does not hold the Heap_lock on entry. |
186 void collect(GCCause::Cause cause); | 182 void collect(GCCause::Cause cause); |
187 | 183 |
188 // This interface assumes that it's being called by the | |
189 // vm thread. It collects the heap assuming that the | |
190 // heap lock is already held and that we are executing in | |
191 // the context of the vm thread. | |
192 void collect_as_vm_thread(GCCause::Cause cause); | |
193 | |
194 // The same as above but assume that the caller holds the Heap_lock. | 184 // The same as above but assume that the caller holds the Heap_lock. |
195 void collect_locked(GCCause::Cause cause); | 185 void collect_locked(GCCause::Cause cause); |
196 | 186 |
197 // Perform a full collection of the first max_level+1 generations. | 187 // Perform a full collection of the first max_level+1 generations. |
198 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. | 188 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. |
226 virtual bool is_scavengable(const void* addr) { | 216 virtual bool is_scavengable(const void* addr) { |
227 return is_in_young((oop)addr); | 217 return is_in_young((oop)addr); |
228 } | 218 } |
229 | 219 |
230 // Iteration functions. | 220 // Iteration functions. |
231 void oop_iterate(OopClosure* cl); | 221 void oop_iterate(ExtendedOopClosure* cl); |
232 void oop_iterate(MemRegion mr, OopClosure* cl); | 222 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); |
233 void object_iterate(ObjectClosure* cl); | 223 void object_iterate(ObjectClosure* cl); |
234 void safe_object_iterate(ObjectClosure* cl); | 224 void safe_object_iterate(ObjectClosure* cl); |
235 void object_iterate_since_last_GC(ObjectClosure* cl); | 225 void object_iterate_since_last_GC(ObjectClosure* cl); |
236 Space* space_containing(const void* addr) const; | 226 Space* space_containing(const void* addr) const; |
237 | 227 |
292 // but unfortunately the flag UseSerialGC need not necessarily always | 282 // but unfortunately the flag UseSerialGC need not necessarily always |
293 // be set when DefNew+Tenured are being used. | 283 // be set when DefNew+Tenured are being used. |
294 return is_in_young(new_obj); | 284 return is_in_young(new_obj); |
295 } | 285 } |
296 | 286 |
297 // Can a compiler elide a store barrier when it writes | |
298 // a permanent oop into the heap? Applies when the compiler | |
299 // is storing x to the heap, where x->is_perm() is true. | |
300 virtual bool can_elide_permanent_oop_store_barriers() const { | |
301 // CMS needs to see all, even intra-generational, ref updates. | |
302 return !UseConcMarkSweepGC; | |
303 } | |
304 | |
305 // The "requestor" generation is performing some garbage collection | 287 // The "requestor" generation is performing some garbage collection |
306 // action for which it would be useful to have scratch space. The | 288 // action for which it would be useful to have scratch space. The |
307 // requestor promises to allocate no more than "max_alloc_words" in any | 289 // requestor promises to allocate no more than "max_alloc_words" in any |
308 // older generation (via promotion say.) Any blocks of space that can | 290 // older generation (via promotion say.) Any blocks of space that can |
309 // be provided are returned as a list of ScratchBlocks, sorted by | 291 // be provided are returned as a list of ScratchBlocks, sorted by |
336 // to "now". | 318 // to "now". |
337 void update_time_of_last_gc(jlong now) { | 319 void update_time_of_last_gc(jlong now) { |
338 for (int i = 0; i < _n_gens; i++) { | 320 for (int i = 0; i < _n_gens; i++) { |
339 _gens[i]->update_time_of_last_gc(now); | 321 _gens[i]->update_time_of_last_gc(now); |
340 } | 322 } |
341 perm_gen()->update_time_of_last_gc(now); | |
342 } | 323 } |
343 | 324 |
344 // Update the gc statistics for each generation. | 325 // Update the gc statistics for each generation. |
345 // "level" is the level of the lastest collection | 326 // "level" is the level of the lastest collection |
346 void update_gc_stats(int current_level, bool full) { | 327 void update_gc_stats(int current_level, bool full) { |
347 for (int i = 0; i < _n_gens; i++) { | 328 for (int i = 0; i < _n_gens; i++) { |
348 _gens[i]->update_gc_stats(current_level, full); | 329 _gens[i]->update_gc_stats(current_level, full); |
349 } | 330 } |
350 perm_gen()->update_gc_stats(current_level, full); | |
351 } | 331 } |
352 | 332 |
353 // Override. | 333 // Override. |
354 bool no_gc_in_progress() { return !is_gc_active(); } | 334 bool no_gc_in_progress() { return !is_gc_active(); } |
355 | 335 |
365 virtual void gc_threads_do(ThreadClosure* tc) const; | 345 virtual void gc_threads_do(ThreadClosure* tc) const; |
366 virtual void print_tracing_info() const; | 346 virtual void print_tracing_info() const; |
367 | 347 |
368 // PrintGC, PrintGCDetails support | 348 // PrintGC, PrintGCDetails support |
369 void print_heap_change(size_t prev_used) const; | 349 void print_heap_change(size_t prev_used) const; |
370 void print_perm_heap_change(size_t perm_prev_used) const; | |
371 | 350 |
372 // The functions below are helper functions that a subclass of | 351 // The functions below are helper functions that a subclass of |
373 // "CollectedHeap" can use in the implementation of its virtual | 352 // "CollectedHeap" can use in the implementation of its virtual |
374 // functions. | 353 // functions. |
375 | 354 |
376 class GenClosure : public StackObj { | 355 class GenClosure : public StackObj { |
377 public: | 356 public: |
378 virtual void do_generation(Generation* gen) = 0; | 357 virtual void do_generation(Generation* gen) = 0; |
379 }; | 358 }; |
380 | 359 |
381 // Apply "cl.do_generation" to all generations in the heap (not including | 360 // Apply "cl.do_generation" to all generations in the heap |
382 // the permanent generation). If "old_to_young" determines the order. | 361 // If "old_to_young" determines the order. |
383 void generation_iterate(GenClosure* cl, bool old_to_young); | 362 void generation_iterate(GenClosure* cl, bool old_to_young); |
384 | 363 |
385 void space_iterate(SpaceClosure* cl); | 364 void space_iterate(SpaceClosure* cl); |
386 | 365 |
387 // Return "true" if all generations (but perm) have reached the | 366 // Return "true" if all generations have reached the |
388 // maximal committed limit that they can reach, without a garbage | 367 // maximal committed limit that they can reach, without a garbage |
389 // collection. | 368 // collection. |
390 virtual bool is_maximal_no_gc() const; | 369 virtual bool is_maximal_no_gc() const; |
391 | 370 |
392 // Return the generation before "gen", or else NULL. | 371 // Return the generation before "gen", or else NULL. |
427 // from older generations; "not_older_gens" is used everywhere else.) | 406 // from older generations; "not_older_gens" is used everywhere else.) |
428 // If "younger_gens_as_roots" is false, younger generations are | 407 // If "younger_gens_as_roots" is false, younger generations are |
429 // not scanned as roots; in this case, the caller must be arranging to | 408 // not scanned as roots; in this case, the caller must be arranging to |
430 // scan the younger generations itself. (For example, a generation might | 409 // scan the younger generations itself. (For example, a generation might |
431 // explicitly mark reachable objects in younger generations, to avoid | 410 // explicitly mark reachable objects in younger generations, to avoid |
432 // excess storage retention.) If "collecting_perm_gen" is false, then | 411 // excess storage retention.) |
433 // roots that may only contain references to permGen objects are not | 412 // The "so" argument determines which of the roots |
434 // scanned; instead, the older_gens closure is applied to all outgoing | |
435 // references in the perm gen. The "so" argument determines which of the roots | |
436 // the closure is applied to: | 413 // the closure is applied to: |
437 // "SO_None" does none; | 414 // "SO_None" does none; |
438 // "SO_AllClasses" applies the closure to all entries in the SystemDictionary; | 415 // "SO_AllClasses" applies the closure to all entries in the SystemDictionary; |
439 // "SO_SystemClasses" to all the "system" classes and loaders; | 416 // "SO_SystemClasses" to all the "system" classes and loaders; |
440 // "SO_Strings" applies the closure to all entries in the StringTable. | 417 // "SO_Strings" applies the closure to all entries in the StringTable. |
441 void gen_process_strong_roots(int level, | 418 void gen_process_strong_roots(int level, |
442 bool younger_gens_as_roots, | 419 bool younger_gens_as_roots, |
443 // The remaining arguments are in an order | 420 // The remaining arguments are in an order |
444 // consistent with SharedHeap::process_strong_roots: | 421 // consistent with SharedHeap::process_strong_roots: |
445 bool activate_scope, | 422 bool activate_scope, |
446 bool collecting_perm_gen, | 423 bool is_scavenging, |
447 SharedHeap::ScanningOption so, | 424 SharedHeap::ScanningOption so, |
448 OopsInGenClosure* not_older_gens, | 425 OopsInGenClosure* not_older_gens, |
449 bool do_code_roots, | 426 bool do_code_roots, |
450 OopsInGenClosure* older_gens); | 427 OopsInGenClosure* older_gens, |
428 KlassClosure* klass_closure); | |
451 | 429 |
452 // Apply "blk" to all the weak roots of the system. These include | 430 // Apply "blk" to all the weak roots of the system. These include |
453 // JNI weak roots, the code cache, system dictionary, symbol table, | 431 // JNI weak roots, the code cache, system dictionary, symbol table, |
454 // string table, and referents of reachable weak refs. | 432 // string table, and referents of reachable weak refs. |
455 void gen_process_weak_roots(OopClosure* root_closure, | 433 void gen_process_weak_roots(OopClosure* root_closure, |
461 // in other generations, it should call this method. | 439 // in other generations, it should call this method. |
462 void save_marks(); | 440 void save_marks(); |
463 | 441 |
464 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects | 442 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects |
465 // allocated since the last call to save_marks in generations at or above | 443 // allocated since the last call to save_marks in generations at or above |
466 // "level" (including the permanent generation.) The "cur" closure is | 444 // "level". The "cur" closure is |
467 // applied to references in the generation at "level", and the "older" | 445 // applied to references in the generation at "level", and the "older" |
468 // closure to older (and permanent) generations. | 446 // closure to older generations. |
469 #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \ | 447 #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \ |
470 void oop_since_save_marks_iterate(int level, \ | 448 void oop_since_save_marks_iterate(int level, \ |
471 OopClosureType* cur, \ | 449 OopClosureType* cur, \ |
472 OopClosureType* older); | 450 OopClosureType* older); |
473 | 451 |
474 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL) | 452 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL) |
475 | 453 |
476 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL | 454 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL |
477 | 455 |
478 // Returns "true" iff no allocations have occurred in any generation at | 456 // Returns "true" iff no allocations have occurred in any generation at |
479 // "level" or above (including the permanent generation) since the last | 457 // "level" or above since the last |
480 // call to "save_marks". | 458 // call to "save_marks". |
481 bool no_allocs_since_save_marks(int level); | 459 bool no_allocs_since_save_marks(int level); |
482 | 460 |
483 // Returns true if an incremental collection is likely to fail. | 461 // Returns true if an incremental collection is likely to fail. |
484 // We optionally consult the young gen, if asked to do so; | 462 // We optionally consult the young gen, if asked to do so; |
504 } | 482 } |
505 void clear_incremental_collection_failed() { | 483 void clear_incremental_collection_failed() { |
506 _incremental_collection_failed = false; | 484 _incremental_collection_failed = false; |
507 } | 485 } |
508 | 486 |
509 // Promotion of obj into gen failed. Try to promote obj to higher non-perm | 487 // Promotion of obj into gen failed. Try to promote obj to higher |
510 // gens in ascending order; return the new location of obj if successful. | 488 // gens in ascending order; return the new location of obj if successful. |
511 // Otherwise, try expand-and-allocate for obj in each generation starting at | 489 // Otherwise, try expand-and-allocate for obj in each generation starting at |
512 // gen; return the new location of obj if successful. Otherwise, return NULL. | 490 // gen; return the new location of obj if successful. Otherwise, return NULL. |
513 oop handle_failed_promotion(Generation* gen, | 491 oop handle_failed_promotion(Generation* gen, |
514 oop obj, | 492 oop obj, |
545 void record_gen_tops_before_GC() PRODUCT_RETURN; | 523 void record_gen_tops_before_GC() PRODUCT_RETURN; |
546 | 524 |
547 protected: | 525 protected: |
548 virtual void gc_prologue(bool full); | 526 virtual void gc_prologue(bool full); |
549 virtual void gc_epilogue(bool full); | 527 virtual void gc_epilogue(bool full); |
550 | |
551 public: | |
552 virtual void preload_and_dump(TRAPS) KERNEL_RETURN; | |
553 }; | 528 }; |
554 | 529 |
555 #endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP | 530 #endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP |