comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 6948:e522a00b91aa

Merge with http://hg.openjdk.java.net/hsx/hsx25/hotspot/ after NPG - C++ build works
author Doug Simon <doug.simon@oracle.com>
date Mon, 12 Nov 2012 23:14:12 +0100
parents 2e6857353b2c
children ad747ee9d0b1 9412b1915547
comparison
equal deleted inserted replaced
6711:ae13cc658b80 6948:e522a00b91aa
43 // may combine concurrent marking with parallel, incremental compaction of 43 // may combine concurrent marking with parallel, incremental compaction of
44 // heap subsets that will yield large amounts of garbage. 44 // heap subsets that will yield large amounts of garbage.
45 45
46 class HeapRegion; 46 class HeapRegion;
47 class HRRSCleanupTask; 47 class HRRSCleanupTask;
48 class PermanentGenerationSpec;
49 class GenerationSpec; 48 class GenerationSpec;
50 class OopsInHeapRegionClosure; 49 class OopsInHeapRegionClosure;
50 class G1KlassScanClosure;
51 class G1ScanHeapEvacClosure; 51 class G1ScanHeapEvacClosure;
52 class ObjectClosure; 52 class ObjectClosure;
53 class SpaceClosure; 53 class SpaceClosure;
54 class CompactibleSpaceClosure; 54 class CompactibleSpaceClosure;
55 class Space; 55 class Space;
188 188
189 class RefineCardTableEntryClosure; 189 class RefineCardTableEntryClosure;
190 190
191 class G1CollectedHeap : public SharedHeap { 191 class G1CollectedHeap : public SharedHeap {
192 friend class VM_G1CollectForAllocation; 192 friend class VM_G1CollectForAllocation;
193 friend class VM_GenCollectForPermanentAllocation;
194 friend class VM_G1CollectFull; 193 friend class VM_G1CollectFull;
195 friend class VM_G1IncCollectionPause; 194 friend class VM_G1IncCollectionPause;
196 friend class VMStructs; 195 friend class VMStructs;
197 friend class MutatorAllocRegion; 196 friend class MutatorAllocRegion;
198 friend class SurvivorGCAllocRegion; 197 friend class SurvivorGCAllocRegion;
223 // The one and only G1CollectedHeap, so static functions can find it. 222 // The one and only G1CollectedHeap, so static functions can find it.
224 static G1CollectedHeap* _g1h; 223 static G1CollectedHeap* _g1h;
225 224
226 static size_t _humongous_object_threshold_in_words; 225 static size_t _humongous_object_threshold_in_words;
227 226
228 // Storage for the G1 heap (excludes the permanent generation). 227 // Storage for the G1 heap.
229 VirtualSpace _g1_storage; 228 VirtualSpace _g1_storage;
230 MemRegion _g1_reserved; 229 MemRegion _g1_reserved;
231 230
232 // The part of _g1_storage that is currently committed. 231 // The part of _g1_storage that is currently committed.
233 MemRegion _g1_committed; 232 MemRegion _g1_committed;
325 324
326 // It initializes the GC alloc regions at the start of a GC. 325 // It initializes the GC alloc regions at the start of a GC.
327 void init_gc_alloc_regions(); 326 void init_gc_alloc_regions();
328 327
329 // It releases the GC alloc regions at the end of a GC. 328 // It releases the GC alloc regions at the end of a GC.
330 void release_gc_alloc_regions(); 329 void release_gc_alloc_regions(uint no_of_gc_workers);
331 330
332 // It does any cleanup that needs to be done on the GC alloc regions 331 // It does any cleanup that needs to be done on the GC alloc regions
333 // before a Full GC. 332 // before a Full GC.
334 void abandon_gc_alloc_regions(); 333 void abandon_gc_alloc_regions();
335 334
406 void print_hrs_post_compaction(); 405 void print_hrs_post_compaction();
407 406
408 double verify(bool guard, const char* msg); 407 double verify(bool guard, const char* msg);
409 void verify_before_gc(); 408 void verify_before_gc();
410 void verify_after_gc(); 409 void verify_after_gc();
410
411 void log_gc_header();
412 void log_gc_footer(double pause_time_sec);
411 413
412 // These are macros so that, if the assert fires, we get the correct 414 // These are macros so that, if the assert fires, we get the correct
413 // line number, file, etc. 415 // line number, file, etc.
414 416
415 #define heap_locking_asserts_err_msg(_extra_message_) \ 417 #define heap_locking_asserts_err_msg(_extra_message_) \
628 bool clear_all_soft_refs, 630 bool clear_all_soft_refs,
629 size_t word_size); 631 size_t word_size);
630 632
631 // Callback from VM_G1CollectFull operation. 633 // Callback from VM_G1CollectFull operation.
632 // Perform a full collection. 634 // Perform a full collection.
633 void do_full_collection(bool clear_all_soft_refs); 635 virtual void do_full_collection(bool clear_all_soft_refs);
634 636
635 // Resize the heap if necessary after a full collection. If this is 637 // Resize the heap if necessary after a full collection. If this is
636 // after a collect-for allocation, "word_size" is the allocation size, 638 // after a collect-for allocation, "word_size" is the allocation size,
637 // and will be considered part of the used portion of the heap. 639 // and will be considered part of the used portion of the heap.
638 void resize_if_necessary_after_full_collection(size_t word_size); 640 void resize_if_necessary_after_full_collection(size_t word_size);
648 // allocated block, or else "NULL". 650 // allocated block, or else "NULL".
649 HeapWord* expand_and_allocate(size_t word_size); 651 HeapWord* expand_and_allocate(size_t word_size);
650 652
651 // Process any reference objects discovered during 653 // Process any reference objects discovered during
652 // an incremental evacuation pause. 654 // an incremental evacuation pause.
653 void process_discovered_references(); 655 void process_discovered_references(uint no_of_gc_workers);
654 656
655 // Enqueue any remaining discovered references 657 // Enqueue any remaining discovered references
656 // after processing. 658 // after processing.
657 void enqueue_discovered_references(); 659 void enqueue_discovered_references(uint no_of_gc_workers);
658 660
659 public: 661 public:
660 662
661 G1MonitoringSupport* g1mm() { 663 G1MonitoringSupport* g1mm() {
662 assert(_g1mm != NULL, "should have been initialized"); 664 assert(_g1mm != NULL, "should have been initialized");
803 // statistics or updating free lists. 805 // statistics or updating free lists.
804 void abandon_collection_set(HeapRegion* cs_head); 806 void abandon_collection_set(HeapRegion* cs_head);
805 807
806 // Applies "scan_non_heap_roots" to roots outside the heap, 808 // Applies "scan_non_heap_roots" to roots outside the heap,
807 // "scan_rs" to roots inside the heap (having done "set_region" to 809 // "scan_rs" to roots inside the heap (having done "set_region" to
808 // indicate the region in which the root resides), and does "scan_perm" 810 // indicate the region in which the root resides),
809 // (setting the generation to the perm generation.) If "scan_rs" is 811 // and does "scan_metadata" If "scan_rs" is
810 // NULL, then this step is skipped. The "worker_i" 812 // NULL, then this step is skipped. The "worker_i"
811 // param is for use with parallel roots processing, and should be 813 // param is for use with parallel roots processing, and should be
812 // the "i" of the calling parallel worker thread's work(i) function. 814 // the "i" of the calling parallel worker thread's work(i) function.
813 // In the sequential case this param will be ignored. 815 // In the sequential case this param will be ignored.
814 void g1_process_strong_roots(bool collecting_perm_gen, 816 void g1_process_strong_roots(bool is_scavenging,
815 ScanningOption so, 817 ScanningOption so,
816 OopClosure* scan_non_heap_roots, 818 OopClosure* scan_non_heap_roots,
817 OopsInHeapRegionClosure* scan_rs, 819 OopsInHeapRegionClosure* scan_rs,
818 OopsInGenClosure* scan_perm, 820 G1KlassScanClosure* scan_klasses,
819 int worker_i); 821 int worker_i);
820 822
821 // Apply "blk" to all the weak roots of the system. These include 823 // Apply "blk" to all the weak roots of the system. These include
822 // JNI weak roots, the code cache, system dictionary, symbol table, 824 // JNI weak roots, the code cache, system dictionary, symbol table,
823 // string table, and referents of reachable weak refs. 825 // string table, and referents of reachable weak refs.
1069 // Must call the initialize method afterwards. 1071 // Must call the initialize method afterwards.
1070 // May not return if something goes wrong. 1072 // May not return if something goes wrong.
1071 G1CollectedHeap(G1CollectorPolicy* policy); 1073 G1CollectedHeap(G1CollectorPolicy* policy);
1072 1074
1073 // Initialize the G1CollectedHeap to have the initial and 1075 // Initialize the G1CollectedHeap to have the initial and
1074 // maximum sizes, permanent generation, and remembered and barrier sets 1076 // maximum sizes and remembered and barrier sets
1075 // specified by the policy object. 1077 // specified by the policy object.
1076 jint initialize(); 1078 jint initialize();
1077 1079
1078 // Initialize weak reference processing. 1080 // Initialize weak reference processing.
1079 virtual void ref_processing_init(); 1081 virtual void ref_processing_init();
1097 return CollectedHeap::G1CollectedHeap; 1099 return CollectedHeap::G1CollectedHeap;
1098 } 1100 }
1099 1101
1100 // The current policy object for the collector. 1102 // The current policy object for the collector.
1101 G1CollectorPolicy* g1_policy() const { return _g1_policy; } 1103 G1CollectorPolicy* g1_policy() const { return _g1_policy; }
1104
1105 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) g1_policy(); }
1102 1106
1103 // Adaptive size policy. No such thing for g1. 1107 // Adaptive size policy. No such thing for g1.
1104 virtual AdaptiveSizePolicy* size_policy() { return NULL; } 1108 virtual AdaptiveSizePolicy* size_policy() { return NULL; }
1105 1109
1106 // The rem set and barrier set. 1110 // The rem set and barrier set.
1275 // "CollectedHeap" supports. 1279 // "CollectedHeap" supports.
1276 virtual void collect(GCCause::Cause cause); 1280 virtual void collect(GCCause::Cause cause);
1277 1281
1278 // The same as above but assume that the caller holds the Heap_lock. 1282 // The same as above but assume that the caller holds the Heap_lock.
1279 void collect_locked(GCCause::Cause cause); 1283 void collect_locked(GCCause::Cause cause);
1280
1281 // This interface assumes that it's being called by the
1282 // vm thread. It collects the heap assuming that the
1283 // heap lock is already held and that we are executing in
1284 // the context of the vm thread.
1285 virtual void collect_as_vm_thread(GCCause::Cause cause);
1286 1284
1287 // True iff a evacuation has failed in the most-recent collection. 1285 // True iff a evacuation has failed in the most-recent collection.
1288 bool evacuation_failed() { return _evacuation_failed; } 1286 bool evacuation_failed() { return _evacuation_failed; }
1289 1287
1290 // It will free a region if it has allocated objects in it that are 1288 // It will free a region if it has allocated objects in it that are
1315 // Return "TRUE" iff the given object address is within the collection 1313 // Return "TRUE" iff the given object address is within the collection
1316 // set. 1314 // set.
1317 inline bool obj_in_cs(oop obj); 1315 inline bool obj_in_cs(oop obj);
1318 1316
1319 // Return "TRUE" iff the given object address is in the reserved 1317 // Return "TRUE" iff the given object address is in the reserved
1320 // region of g1 (excluding the permanent generation). 1318 // region of g1.
1321 bool is_in_g1_reserved(const void* p) const { 1319 bool is_in_g1_reserved(const void* p) const {
1322 return _g1_reserved.contains(p); 1320 return _g1_reserved.contains(p);
1323 } 1321 }
1324 1322
1325 // Returns a MemRegion that corresponds to the space that has been 1323 // Returns a MemRegion that corresponds to the space that has been
1342 1340
1343 // Iteration functions. 1341 // Iteration functions.
1344 1342
1345 // Iterate over all the ref-containing fields of all objects, calling 1343 // Iterate over all the ref-containing fields of all objects, calling
1346 // "cl.do_oop" on each. 1344 // "cl.do_oop" on each.
1347 virtual void oop_iterate(OopClosure* cl) { 1345 virtual void oop_iterate(ExtendedOopClosure* cl);
1348 oop_iterate(cl, true);
1349 }
1350 void oop_iterate(OopClosure* cl, bool do_perm);
1351 1346
1352 // Same as above, restricted to a memory region. 1347 // Same as above, restricted to a memory region.
1353 virtual void oop_iterate(MemRegion mr, OopClosure* cl) { 1348 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
1354 oop_iterate(mr, cl, true);
1355 }
1356 void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm);
1357 1349
1358 // Iterate over all objects, calling "cl.do_object" on each. 1350 // Iterate over all objects, calling "cl.do_object" on each.
1359 virtual void object_iterate(ObjectClosure* cl) { 1351 virtual void object_iterate(ObjectClosure* cl);
1360 object_iterate(cl, true); 1352
1361 }
1362 virtual void safe_object_iterate(ObjectClosure* cl) { 1353 virtual void safe_object_iterate(ObjectClosure* cl) {
1363 object_iterate(cl, true); 1354 object_iterate(cl);
1364 } 1355 }
1365 void object_iterate(ObjectClosure* cl, bool do_perm);
1366 1356
1367 // Iterate over all objects allocated since the last collection, calling 1357 // Iterate over all objects allocated since the last collection, calling
1368 // "cl.do_object" on each. The heap must have been initialized properly 1358 // "cl.do_object" on each. The heap must have been initialized properly
1369 // to support this function, or else this call will fail. 1359 // to support this function, or else this call will fail.
1370 virtual void object_iterate_since_last_GC(ObjectClosure* cl); 1360 virtual void object_iterate_since_last_GC(ObjectClosure* cl);
1522 // information for young gen objects. 1512 // information for young gen objects.
1523 virtual bool can_elide_initializing_store_barrier(oop new_obj) { 1513 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
1524 return is_in_young(new_obj); 1514 return is_in_young(new_obj);
1525 } 1515 }
1526 1516
1527 // Can a compiler elide a store barrier when it writes
1528 // a permanent oop into the heap? Applies when the compiler
1529 // is storing x to the heap, where x->is_perm() is true.
1530 virtual bool can_elide_permanent_oop_store_barriers() const {
1531 // At least until perm gen collection is also G1-ified, at
1532 // which point this should return false.
1533 return true;
1534 }
1535
1536 // Returns "true" iff the given word_size is "very large". 1517 // Returns "true" iff the given word_size is "very large".
1537 static bool isHumongous(size_t word_size) { 1518 static bool isHumongous(size_t word_size) {
1538 // Note this has to be strictly greater-than as the TLABs 1519 // Note this has to be strictly greater-than as the TLABs
1539 // are capped at the humongous thresold and we want to 1520 // are capped at the humongous thresold and we want to
1540 // ensure that we don't try to allocate a TLAB as 1521 // ensure that we don't try to allocate a TLAB as
1655 1636
1656 // Determine if an object is dead, given only the object itself. 1637 // Determine if an object is dead, given only the object itself.
1657 // This will find the region to which the object belongs and 1638 // This will find the region to which the object belongs and
1658 // then call the region version of the same function. 1639 // then call the region version of the same function.
1659 1640
1660 // Added if it is in permanent gen it isn't dead.
1661 // Added if it is NULL it isn't dead. 1641 // Added if it is NULL it isn't dead.
1662 1642
1663 bool is_obj_dead(const oop obj) const { 1643 bool is_obj_dead(const oop obj) const {
1664 const HeapRegion* hr = heap_region_containing(obj); 1644 const HeapRegion* hr = heap_region_containing(obj);
1665 if (hr == NULL) { 1645 if (hr == NULL) {
1666 if (Universe::heap()->is_in_permanent(obj)) 1646 if (obj == NULL) return false;
1667 return false;
1668 else if (obj == NULL) return false;
1669 else return true; 1647 else return true;
1670 } 1648 }
1671 else return is_obj_dead(obj, hr); 1649 else return is_obj_dead(obj, hr);
1672 } 1650 }
1673 1651
1674 bool is_obj_ill(const oop obj) const { 1652 bool is_obj_ill(const oop obj) const {
1675 const HeapRegion* hr = heap_region_containing(obj); 1653 const HeapRegion* hr = heap_region_containing(obj);
1676 if (hr == NULL) { 1654 if (hr == NULL) {
1677 if (Universe::heap()->is_in_permanent(obj)) 1655 if (obj == NULL) return false;
1678 return false;
1679 else if (obj == NULL) return false;
1680 else return true; 1656 else return true;
1681 } 1657 }
1682 else return is_obj_ill(obj, hr); 1658 else return is_obj_ill(obj, hr);
1683 } 1659 }
1684 1660
1873 HeapWord* obj = NULL; 1849 HeapWord* obj = NULL;
1874 size_t gclab_word_size = _g1h->desired_plab_sz(purpose); 1850 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1875 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { 1851 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1876 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); 1852 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1877 add_to_alloc_buffer_waste(alloc_buf->words_remaining()); 1853 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1878 alloc_buf->flush_stats_and_retire(_g1h->stats_for_purpose(purpose), 1854 alloc_buf->retire(false /* end_of_gc */, false /* retain */);
1879 false /* end_of_gc */,
1880 false /* retain */);
1881 1855
1882 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size); 1856 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1883 if (buf == NULL) return NULL; // Let caller handle allocation failure. 1857 if (buf == NULL) return NULL; // Let caller handle allocation failure.
1884 // Otherwise. 1858 // Otherwise.
1885 alloc_buf->set_word_size(gclab_word_size); 1859 alloc_buf->set_word_size(gclab_word_size);