comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 18041:52b4284cb496

Merge with jdk8u20-b26
author Gilles Duboscq <duboscq@ssw.jku.at>
date Wed, 15 Oct 2014 16:02:50 +0200
parents 4ca6dc0799b6 a45a4f5a9609
children 7848fc12602b
comparison
equal deleted inserted replaced
17606:45d7b2c7029d 18041:52b4284cb496
1 /* 1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
32 #include "gc_implementation/g1/g1MonitoringSupport.hpp" 32 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
33 #include "gc_implementation/g1/g1RemSet.hpp" 33 #include "gc_implementation/g1/g1RemSet.hpp"
34 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 34 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
35 #include "gc_implementation/g1/g1YCTypes.hpp" 35 #include "gc_implementation/g1/g1YCTypes.hpp"
36 #include "gc_implementation/g1/heapRegionSeq.hpp" 36 #include "gc_implementation/g1/heapRegionSeq.hpp"
37 #include "gc_implementation/g1/heapRegionSets.hpp" 37 #include "gc_implementation/g1/heapRegionSet.hpp"
38 #include "gc_implementation/shared/hSpaceCounters.hpp" 38 #include "gc_implementation/shared/hSpaceCounters.hpp"
39 #include "gc_implementation/shared/parGCAllocBuffer.hpp" 39 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
40 #include "memory/barrierSet.hpp" 40 #include "memory/barrierSet.hpp"
41 #include "memory/memRegion.hpp" 41 #include "memory/memRegion.hpp"
42 #include "memory/sharedHeap.hpp" 42 #include "memory/sharedHeap.hpp"
207 friend class MutatorAllocRegion; 207 friend class MutatorAllocRegion;
208 friend class SurvivorGCAllocRegion; 208 friend class SurvivorGCAllocRegion;
209 friend class OldGCAllocRegion; 209 friend class OldGCAllocRegion;
210 210
211 // Closures used in implementation. 211 // Closures used in implementation.
212 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> 212 template <G1Barrier barrier, bool do_mark_object>
213 friend class G1ParCopyClosure; 213 friend class G1ParCopyClosure;
214 friend class G1IsAliveClosure; 214 friend class G1IsAliveClosure;
215 friend class G1EvacuateFollowersClosure; 215 friend class G1EvacuateFollowersClosure;
216 friend class G1ParScanThreadState; 216 friend class G1ParScanThreadState;
217 friend class G1ParScanClosureSuper; 217 friend class G1ParScanClosureSuper;
241 241
242 // The part of _g1_storage that is currently committed. 242 // The part of _g1_storage that is currently committed.
243 MemRegion _g1_committed; 243 MemRegion _g1_committed;
244 244
245 // The master free list. It will satisfy all new region allocations. 245 // The master free list. It will satisfy all new region allocations.
246 MasterFreeRegionList _free_list; 246 FreeRegionList _free_list;
247 247
248 // The secondary free list which contains regions that have been 248 // The secondary free list which contains regions that have been
249 // freed up during the cleanup process. This will be appended to the 249 // freed up during the cleanup process. This will be appended to the
250 // master free list when appropriate. 250 // master free list when appropriate.
251 SecondaryFreeRegionList _secondary_free_list; 251 FreeRegionList _secondary_free_list;
252 252
253 // It keeps track of the old regions. 253 // It keeps track of the old regions.
254 MasterOldRegionSet _old_set; 254 HeapRegionSet _old_set;
255 255
256 // It keeps track of the humongous regions. 256 // It keeps track of the humongous regions.
257 MasterHumongousRegionSet _humongous_set; 257 HeapRegionSet _humongous_set;
258 258
259 // The number of regions we could create by expansion. 259 // The number of regions we could create by expansion.
260 uint _expansion_regions; 260 uint _expansion_regions;
261 261
262 // The block offset table for the G1 heap. 262 // The block offset table for the G1 heap.
495 // This is the second level of trying to allocate a new region. If 495 // This is the second level of trying to allocate a new region. If
496 // new_region() didn't find a region on the free_list, this call will 496 // new_region() didn't find a region on the free_list, this call will
497 // check whether there's anything available on the 497 // check whether there's anything available on the
498 // secondary_free_list and/or wait for more regions to appear on 498 // secondary_free_list and/or wait for more regions to appear on
499 // that list, if _free_regions_coming is set. 499 // that list, if _free_regions_coming is set.
500 HeapRegion* new_region_try_secondary_free_list(); 500 HeapRegion* new_region_try_secondary_free_list(bool is_old);
501 501
502 // Try to allocate a single non-humongous HeapRegion sufficient for 502 // Try to allocate a single non-humongous HeapRegion sufficient for
503 // an allocation of the given word_size. If do_expand is true, 503 // an allocation of the given word_size. If do_expand is true,
504 // attempt to expand the heap if necessary to satisfy the allocation 504 // attempt to expand the heap if necessary to satisfy the allocation
505 // request. 505 // request. If the region is to be used as an old region or for a
506 HeapRegion* new_region(size_t word_size, bool do_expand); 506 // humongous object, set is_old to true. If not, to false.
507 HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
507 508
508 // Attempt to satisfy a humongous allocation request of the given 509 // Attempt to satisfy a humongous allocation request of the given
509 // size by finding a contiguous set of free regions of num_regions 510 // size by finding a contiguous set of free regions of num_regions
510 // length and remove them from the master free list. Return the 511 // length and remove them from the master free list. Return the
511 // index of the first region or G1_NULL_HRS_INDEX if the search 512 // index of the first region or G1_NULL_HRS_INDEX if the search
604 // allocation region, either by picking one or expanding the 605 // allocation region, either by picking one or expanding the
605 // heap, and then allocate a block of the given size. The block 606 // heap, and then allocate a block of the given size. The block
606 // may not be a humongous - it must fit into a single heap region. 607 // may not be a humongous - it must fit into a single heap region.
607 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); 608 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
608 609
610 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
611 HeapRegion* alloc_region,
612 bool par,
613 size_t word_size);
614
609 // Ensure that no further allocations can happen in "r", bearing in mind 615 // Ensure that no further allocations can happen in "r", bearing in mind
610 // that parallel threads might be attempting allocations. 616 // that parallel threads might be attempting allocations.
611 void par_allocate_remaining_space(HeapRegion* r); 617 void par_allocate_remaining_space(HeapRegion* r);
612 618
613 // Allocation attempt during GC for a survivor object / PLAB. 619 // Allocation attempt during GC for a survivor object / PLAB.
701 assert(!_in_cset_fast_test_base[index], "invariant"); 707 assert(!_in_cset_fast_test_base[index], "invariant");
702 _in_cset_fast_test_base[index] = true; 708 _in_cset_fast_test_base[index] = true;
703 } 709 }
704 710
705 // This is a fast test on whether a reference points into the 711 // This is a fast test on whether a reference points into the
706 // collection set or not. It does not assume that the reference 712 // collection set or not. Assume that the reference
707 // points into the heap; if it doesn't, it will return false. 713 // points into the heap.
708 bool in_cset_fast_test(oop obj) { 714 inline bool in_cset_fast_test(oop obj);
709 assert(_in_cset_fast_test != NULL, "sanity");
710 if (_g1_committed.contains((HeapWord*) obj)) {
711 // no need to subtract the bottom of the heap from obj,
712 // _in_cset_fast_test is biased
713 uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
714 bool ret = _in_cset_fast_test[index];
715 // let's make sure the result is consistent with what the slower
716 // test returns
717 assert( ret || !obj_in_cs(obj), "sanity");
718 assert(!ret || obj_in_cs(obj), "sanity");
719 return ret;
720 } else {
721 return false;
722 }
723 }
724 715
725 void clear_cset_fast_test() { 716 void clear_cset_fast_test() {
726 assert(_in_cset_fast_test_base != NULL, "sanity"); 717 assert(_in_cset_fast_test_base != NULL, "sanity");
727 memset(_in_cset_fast_test_base, false, 718 memset(_in_cset_fast_test_base, false,
728 (size_t) _in_cset_fast_test_length * sizeof(bool)); 719 (size_t) _in_cset_fast_test_length * sizeof(bool));
758 749
759 G1YCType yc_type(); 750 G1YCType yc_type();
760 751
761 G1HRPrinter* hr_printer() { return &_hr_printer; } 752 G1HRPrinter* hr_printer() { return &_hr_printer; }
762 753
754 // Frees a non-humongous region by initializing its contents and
755 // adding it to the free list that's passed as a parameter (this is
756 // usually a local list which will be appended to the master free
757 // list later). The used bytes of freed regions are accumulated in
758 // pre_used. If par is true, the region's RSet will not be freed
759 // up. The assumption is that this will be done later.
760 // The locked parameter indicates if the caller has already taken
761 // care of proper synchronization. This may allow some optimizations.
762 void free_region(HeapRegion* hr,
763 FreeRegionList* free_list,
764 bool par,
765 bool locked = false);
766
767 // Frees a humongous region by collapsing it into individual regions
768 // and calling free_region() for each of them. The freed regions
769 // will be added to the free list that's passed as a parameter (this
770 // is usually a local list which will be appended to the master free
771 // list later). The used bytes of freed regions are accumulated in
772 // pre_used. If par is true, the region's RSet will not be freed
773 // up. The assumption is that this will be done later.
774 void free_humongous_region(HeapRegion* hr,
775 FreeRegionList* free_list,
776 bool par);
763 protected: 777 protected:
764 778
765 // Shrink the garbage-first heap by at most the given size (in bytes!). 779 // Shrink the garbage-first heap by at most the given size (in bytes!).
766 // (Rounds down to a HeapRegion boundary.) 780 // (Rounds down to a HeapRegion boundary.)
767 virtual void shrink(size_t expand_bytes); 781 virtual void shrink(size_t expand_bytes);
834 void g1_process_strong_roots(bool is_scavenging, 848 void g1_process_strong_roots(bool is_scavenging,
835 ScanningOption so, 849 ScanningOption so,
836 OopClosure* scan_non_heap_roots, 850 OopClosure* scan_non_heap_roots,
837 OopsInHeapRegionClosure* scan_rs, 851 OopsInHeapRegionClosure* scan_rs,
838 G1KlassScanClosure* scan_klasses, 852 G1KlassScanClosure* scan_klasses,
839 int worker_i); 853 uint worker_i);
840 854
841 // Apply "blk" to all the weak roots of the system. These include 855 // Apply "blk" to all the weak roots of the system. These include
842 // JNI weak roots, the code cache, system dictionary, symbol table, 856 // JNI weak roots, the code cache, system dictionary, symbol table,
843 // string table, and referents of reachable weak refs. 857 // string table, and referents of reachable weak refs.
844 void g1_process_weak_roots(OopClosure* root_closure); 858 void g1_process_weak_roots(OopClosure* root_closure);
845
846 // Frees a non-humongous region by initializing its contents and
847 // adding it to the free list that's passed as a parameter (this is
848 // usually a local list which will be appended to the master free
849 // list later). The used bytes of freed regions are accumulated in
850 // pre_used. If par is true, the region's RSet will not be freed
851 // up. The assumption is that this will be done later.
852 void free_region(HeapRegion* hr,
853 size_t* pre_used,
854 FreeRegionList* free_list,
855 bool par);
856
857 // Frees a humongous region by collapsing it into individual regions
858 // and calling free_region() for each of them. The freed regions
859 // will be added to the free list that's passed as a parameter (this
860 // is usually a local list which will be appended to the master free
861 // list later). The used bytes of freed regions are accumulated in
862 // pre_used. If par is true, the region's RSet will not be freed
863 // up. The assumption is that this will be done later.
864 void free_humongous_region(HeapRegion* hr,
865 size_t* pre_used,
866 FreeRegionList* free_list,
867 HumongousRegionSet* humongous_proxy_set,
868 bool par);
869 859
870 // Notifies all the necessary spaces that the committed space has 860 // Notifies all the necessary spaces that the committed space has
871 // been updated (either expanded or shrunk). It should be called 861 // been updated (either expanded or shrunk). It should be called
872 // after _g1_storage is updated. 862 // after _g1_storage is updated.
873 void update_committed_space(HeapWord* old_end, HeapWord* new_end); 863 void update_committed_space(HeapWord* old_end, HeapWord* new_end);
1095 // Initialize the G1CollectedHeap to have the initial and 1085 // Initialize the G1CollectedHeap to have the initial and
1096 // maximum sizes and remembered and barrier sets 1086 // maximum sizes and remembered and barrier sets
1097 // specified by the policy object. 1087 // specified by the policy object.
1098 jint initialize(); 1088 jint initialize();
1099 1089
1090 virtual void stop();
1091
1100 // Return the (conservative) maximum heap alignment for any G1 heap 1092 // Return the (conservative) maximum heap alignment for any G1 heap
1101 static size_t conservative_max_heap_alignment(); 1093 static size_t conservative_max_heap_alignment();
1102 1094
1103 // Initialize weak reference processing. 1095 // Initialize weak reference processing.
1104 virtual void ref_processing_init(); 1096 virtual void ref_processing_init();
1157 // continues humongous regions too. 1149 // continues humongous regions too.
1158 void reset_gc_time_stamps(HeapRegion* hr); 1150 void reset_gc_time_stamps(HeapRegion* hr);
1159 1151
1160 void iterate_dirty_card_closure(CardTableEntryClosure* cl, 1152 void iterate_dirty_card_closure(CardTableEntryClosure* cl,
1161 DirtyCardQueue* into_cset_dcq, 1153 DirtyCardQueue* into_cset_dcq,
1162 bool concurrent, int worker_i); 1154 bool concurrent, uint worker_i);
1163 1155
1164 // The shared block offset table array. 1156 // The shared block offset table array.
1165 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; } 1157 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
1166 1158
1167 // Reference Processing accessors 1159 // Reference Processing accessors
1243 1235
1244 #ifdef ASSERT 1236 #ifdef ASSERT
1245 bool is_on_master_free_list(HeapRegion* hr) { 1237 bool is_on_master_free_list(HeapRegion* hr) {
1246 return hr->containing_set() == &_free_list; 1238 return hr->containing_set() == &_free_list;
1247 } 1239 }
1248
1249 bool is_in_humongous_set(HeapRegion* hr) {
1250 return hr->containing_set() == &_humongous_set;
1251 }
1252 #endif // ASSERT 1240 #endif // ASSERT
1253 1241
1254 // Wrapper for the region list operations that can be called from 1242 // Wrapper for the region list operations that can be called from
1255 // methods outside this class. 1243 // methods outside this class.
1256 1244
1257 void secondary_free_list_add_as_tail(FreeRegionList* list) { 1245 void secondary_free_list_add(FreeRegionList* list) {
1258 _secondary_free_list.add_as_tail(list); 1246 _secondary_free_list.add_ordered(list);
1259 } 1247 }
1260 1248
1261 void append_secondary_free_list() { 1249 void append_secondary_free_list() {
1262 _free_list.add_as_head(&_secondary_free_list); 1250 _free_list.add_ordered(&_secondary_free_list);
1263 } 1251 }
1264 1252
1265 void append_secondary_free_list_if_not_empty_with_lock() { 1253 void append_secondary_free_list_if_not_empty_with_lock() {
1266 // If the secondary free list looks empty there's no reason to 1254 // If the secondary free list looks empty there's no reason to
1267 // take the lock and then try to append it. 1255 // take the lock and then try to append it.
1269 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1257 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1270 append_secondary_free_list(); 1258 append_secondary_free_list();
1271 } 1259 }
1272 } 1260 }
1273 1261
1274 void old_set_remove(HeapRegion* hr) { 1262 inline void old_set_remove(HeapRegion* hr);
1275 _old_set.remove(hr);
1276 }
1277 1263
1278 size_t non_young_capacity_bytes() { 1264 size_t non_young_capacity_bytes() {
1279 return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes(); 1265 return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
1280 } 1266 }
1281 1267
1299 void collect_locked(GCCause::Cause cause); 1285 void collect_locked(GCCause::Cause cause);
1300 1286
1301 // True iff an evacuation has failed in the most-recent collection. 1287 // True iff an evacuation has failed in the most-recent collection.
1302 bool evacuation_failed() { return _evacuation_failed; } 1288 bool evacuation_failed() { return _evacuation_failed; }
1303 1289
1304 // It will free a region if it has allocated objects in it that are 1290 void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed);
1305 // all dead. It calls either free_region() or 1291 void prepend_to_freelist(FreeRegionList* list);
1306 // free_humongous_region() depending on the type of the region that 1292 void decrement_summary_bytes(size_t bytes);
1307 // is passed to it.
1308 void free_region_if_empty(HeapRegion* hr,
1309 size_t* pre_used,
1310 FreeRegionList* free_list,
1311 OldRegionSet* old_proxy_set,
1312 HumongousRegionSet* humongous_proxy_set,
1313 HRRSCleanupTask* hrrs_cleanup_task,
1314 bool par);
1315
1316 // It appends the free list to the master free list and updates the
1317 // master humongous list according to the contents of the proxy
1318 // list. It also adjusts the total used bytes according to pre_used
1319 // (if par is true, it will do so by taking the ParGCRareEvent_lock).
1320 void update_sets_after_freeing_regions(size_t pre_used,
1321 FreeRegionList* free_list,
1322 OldRegionSet* old_proxy_set,
1323 HumongousRegionSet* humongous_proxy_set,
1324 bool par);
1325 1293
1326 // Returns "TRUE" iff "p" points into the committed areas of the heap. 1294 // Returns "TRUE" iff "p" points into the committed areas of the heap.
1327 virtual bool is_in(const void* p) const; 1295 virtual bool is_in(const void* p) const;
1328 1296
1329 // Return "TRUE" iff the given object address is within the collection 1297 // Return "TRUE" iff the given object address is within the collection
1380 // Iterate over heap regions, in address order, terminating the 1348 // Iterate over heap regions, in address order, terminating the
1381 // iteration early if the "doHeapRegion" method returns "true". 1349 // iteration early if the "doHeapRegion" method returns "true".
1382 void heap_region_iterate(HeapRegionClosure* blk) const; 1350 void heap_region_iterate(HeapRegionClosure* blk) const;
1383 1351
1384 // Return the region with the given index. It assumes the index is valid. 1352 // Return the region with the given index. It assumes the index is valid.
1385 HeapRegion* region_at(uint index) const { return _hrs.at(index); } 1353 inline HeapRegion* region_at(uint index) const;
1386 1354
1387 // Divide the heap region sequence into "chunks" of some size (the number 1355 // Divide the heap region sequence into "chunks" of some size (the number
1388 // of regions divided by the number of parallel threads times some 1356 // of regions divided by the number of parallel threads times some
1389 // overpartition factor, currently 4). Assumes that this will be called 1357 // overpartition factor, currently 4). Assumes that this will be called
1390 // in parallel by ParallelGCThreads worker threads with discinct worker 1358 // in parallel by ParallelGCThreads worker threads with discinct worker
1421 // the time stamps. Called when we reset the GC time stamp. 1389 // the time stamps. Called when we reset the GC time stamp.
1422 void clear_cset_start_regions(); 1390 void clear_cset_start_regions();
1423 1391
1424 // Given the id of a worker, obtain or calculate a suitable 1392 // Given the id of a worker, obtain or calculate a suitable
1425 // starting region for iterating over the current collection set. 1393 // starting region for iterating over the current collection set.
1426 HeapRegion* start_cset_region_for_worker(int worker_i); 1394 HeapRegion* start_cset_region_for_worker(uint worker_i);
1427 1395
1428 // This is a convenience method that is used by the 1396 // This is a convenience method that is used by the
1429 // HeapRegionIterator classes to calculate the starting region for 1397 // HeapRegionIterator classes to calculate the starting region for
1430 // each worker so that they do not all start from the same region. 1398 // each worker so that they do not all start from the same region.
1431 HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers); 1399 HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers);
1482 virtual bool supports_heap_inspection() const { return true; } 1450 virtual bool supports_heap_inspection() const { return true; }
1483 1451
1484 // Section on thread-local allocation buffers (TLABs) 1452 // Section on thread-local allocation buffers (TLABs)
1485 // See CollectedHeap for semantics. 1453 // See CollectedHeap for semantics.
1486 1454
1487 virtual bool supports_tlab_allocation() const; 1455 bool supports_tlab_allocation() const;
1488 virtual size_t tlab_capacity(Thread* thr) const; 1456 size_t tlab_capacity(Thread* ignored) const;
1489 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; 1457 size_t tlab_used(Thread* ignored) const;
1458 size_t max_tlab_size() const;
1459 size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1490 1460
1491 // Can a compiler initialize a new object without store barriers? 1461 // Can a compiler initialize a new object without store barriers?
1492 // This permission only extends from the creation of a new object 1462 // This permission only extends from the creation of a new object
1493 // via a TLAB up to the first subsequent safepoint. If such permission 1463 // via a TLAB up to the first subsequent safepoint. If such permission
1494 // is granted for this heap type, the compiler promises to call 1464 // is granted for this heap type, the compiler promises to call
1507 1477
1508 virtual bool card_mark_must_follow_store() const { 1478 virtual bool card_mark_must_follow_store() const {
1509 return true; 1479 return true;
1510 } 1480 }
1511 1481
1512 bool is_in_young(const oop obj) { 1482 inline bool is_in_young(const oop obj);
1513 HeapRegion* hr = heap_region_containing(obj);
1514 return hr != NULL && hr->is_young();
1515 }
1516 1483
1517 #ifdef ASSERT 1484 #ifdef ASSERT
1518 virtual bool is_in_partial_collection(const void* p); 1485 virtual bool is_in_partial_collection(const void* p);
1519 #endif 1486 #endif
1520 1487
1523 // We don't need barriers for initializing stores to objects 1490 // We don't need barriers for initializing stores to objects
1524 // in the young gen: for the SATB pre-barrier, there is no 1491 // in the young gen: for the SATB pre-barrier, there is no
1525 // pre-value that needs to be remembered; for the remembered-set 1492 // pre-value that needs to be remembered; for the remembered-set
1526 // update logging post-barrier, we don't maintain remembered set 1493 // update logging post-barrier, we don't maintain remembered set
1527 // information for young gen objects. 1494 // information for young gen objects.
1528 virtual bool can_elide_initializing_store_barrier(oop new_obj) { 1495 virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
1529 return is_in_young(new_obj);
1530 }
1531 1496
1532 // Returns "true" iff the given word_size is "very large". 1497 // Returns "true" iff the given word_size is "very large".
1533 static bool isHumongous(size_t word_size) { 1498 static bool isHumongous(size_t word_size) {
1534 // Note this has to be strictly greater-than as the TLABs 1499 // Note this has to be strictly greater-than as the TLABs
1535 // are capped at the humongous thresold and we want to 1500 // are capped at the humongous thresold and we want to
1569 static G1CollectedHeap* heap(); 1534 static G1CollectedHeap* heap();
1570 1535
1571 void set_region_short_lived_locked(HeapRegion* hr); 1536 void set_region_short_lived_locked(HeapRegion* hr);
1572 // add appropriate methods for any other surv rate groups 1537 // add appropriate methods for any other surv rate groups
1573 1538
1574 YoungList* young_list() { return _young_list; } 1539 YoungList* young_list() const { return _young_list; }
1575 1540
1576 // debugging 1541 // debugging
1577 bool check_young_list_well_formed() { 1542 bool check_young_list_well_formed() {
1578 return _young_list->check_list_well_formed(); 1543 return _young_list->check_list_well_formed();
1579 } 1544 }
1619 // This will find the region to which the object belongs and 1584 // This will find the region to which the object belongs and
1620 // then call the region version of the same function. 1585 // then call the region version of the same function.
1621 1586
1622 // Added if it is NULL it isn't dead. 1587 // Added if it is NULL it isn't dead.
1623 1588
1624 bool is_obj_dead(const oop obj) const { 1589 inline bool is_obj_dead(const oop obj) const;
1625 const HeapRegion* hr = heap_region_containing(obj); 1590
1626 if (hr == NULL) { 1591 inline bool is_obj_ill(const oop obj) const;
1627 if (obj == NULL) return false;
1628 else return true;
1629 }
1630 else return is_obj_dead(obj, hr);
1631 }
1632
1633 bool is_obj_ill(const oop obj) const {
1634 const HeapRegion* hr = heap_region_containing(obj);
1635 if (hr == NULL) {
1636 if (obj == NULL) return false;
1637 else return true;
1638 }
1639 else return is_obj_ill(obj, hr);
1640 }
1641 1592
1642 bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo); 1593 bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
1643 HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo); 1594 HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
1644 bool is_marked(oop obj, VerifyOption vo); 1595 bool is_marked(oop obj, VerifyOption vo);
1645 const char* top_at_mark_start_str(VerifyOption vo); 1596 const char* top_at_mark_start_str(VerifyOption vo);
1672 // in the collection set to regions in to-space. In the event 1623 // in the collection set to regions in to-space. In the event
1673 // of an evacuation failure, nmethods that reference objects 1624 // of an evacuation failure, nmethods that reference objects
1674 // that were not successfullly evacuated are not migrated. 1625 // that were not successfullly evacuated are not migrated.
1675 void migrate_strong_code_roots(); 1626 void migrate_strong_code_roots();
1676 1627
1628 // Free up superfluous code root memory.
1629 void purge_code_root_memory();
1630
1677 // During an initial mark pause, mark all the code roots that 1631 // During an initial mark pause, mark all the code roots that
1678 // point into regions *not* in the collection set. 1632 // point into regions *not* in the collection set.
1679 void mark_strong_code_roots(uint worker_id); 1633 void mark_strong_code_roots(uint worker_id);
1680 1634
1681 // Rebuild the stong code root lists for each region 1635 // Rebuild the stong code root lists for each region
1682 // after a full GC 1636 // after a full GC
1683 void rebuild_strong_code_roots(); 1637 void rebuild_strong_code_roots();
1684 1638
1639 // Delete entries for dead interned string and clean up unreferenced symbols
1640 // in symbol table, possibly in parallel.
1641 void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
1642
1643 // Redirty logged cards in the refinement queue.
1644 void redirty_logged_cards();
1685 // Verification 1645 // Verification
1686 1646
1687 // The following is just to alert the verification code 1647 // The following is just to alert the verification code
1688 // that a full collection has occurred and that the 1648 // that a full collection has occurred and that the
1689 // remembered sets are no longer up to date. 1649 // remembered sets are no longer up to date.
1720 // parameter. The values for that parameter, and their meanings, 1680 // parameter. The values for that parameter, and their meanings,
1721 // are the same as those above. 1681 // are the same as those above.
1722 1682
1723 bool is_obj_dead_cond(const oop obj, 1683 bool is_obj_dead_cond(const oop obj,
1724 const HeapRegion* hr, 1684 const HeapRegion* hr,
1725 const VerifyOption vo) const { 1685 const VerifyOption vo) const;
1726 switch (vo) {
1727 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
1728 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
1729 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
1730 default: ShouldNotReachHere();
1731 }
1732 return false; // keep some compilers happy
1733 }
1734 1686
1735 bool is_obj_dead_cond(const oop obj, 1687 bool is_obj_dead_cond(const oop obj,
1736 const VerifyOption vo) const { 1688 const VerifyOption vo) const;
1737 switch (vo) {
1738 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
1739 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
1740 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
1741 default: ShouldNotReachHere();
1742 }
1743 return false; // keep some compilers happy
1744 }
1745 1689
1746 // Printing 1690 // Printing
1747 1691
1748 virtual void print_on(outputStream* st) const; 1692 virtual void print_on(outputStream* st) const;
1749 virtual void print_extended_on(outputStream* st) const; 1693 virtual void print_extended_on(outputStream* st) const;
1758 // The following two methods are helpful for debugging RSet issues. 1702 // The following two methods are helpful for debugging RSet issues.
1759 void print_cset_rsets() PRODUCT_RETURN; 1703 void print_cset_rsets() PRODUCT_RETURN;
1760 void print_all_rsets() PRODUCT_RETURN; 1704 void print_all_rsets() PRODUCT_RETURN;
1761 1705
1762 public: 1706 public:
1763 void stop_conc_gc_threads();
1764
1765 size_t pending_card_num(); 1707 size_t pending_card_num();
1766 size_t cards_scanned(); 1708 size_t cards_scanned();
1767 1709
1768 protected: 1710 protected:
1769 size_t _max_heap_capacity; 1711 size_t _max_heap_capacity;
1784 void retire(bool end_of_gc, bool retain) { 1726 void retire(bool end_of_gc, bool retain) {
1785 if (_retired) 1727 if (_retired)
1786 return; 1728 return;
1787 ParGCAllocBuffer::retire(end_of_gc, retain); 1729 ParGCAllocBuffer::retire(end_of_gc, retain);
1788 _retired = true; 1730 _retired = true;
1789 }
1790
1791 bool is_retired() {
1792 return _retired;
1793 }
1794 };
1795
1796 class G1ParGCAllocBufferContainer {
1797 protected:
1798 static int const _priority_max = 2;
1799 G1ParGCAllocBuffer* _priority_buffer[_priority_max];
1800
1801 public:
1802 G1ParGCAllocBufferContainer(size_t gclab_word_size) {
1803 for (int pr = 0; pr < _priority_max; ++pr) {
1804 _priority_buffer[pr] = new G1ParGCAllocBuffer(gclab_word_size);
1805 }
1806 }
1807
1808 ~G1ParGCAllocBufferContainer() {
1809 for (int pr = 0; pr < _priority_max; ++pr) {
1810 assert(_priority_buffer[pr]->is_retired(), "alloc buffers should all retire at this point.");
1811 delete _priority_buffer[pr];
1812 }
1813 }
1814
1815 HeapWord* allocate(size_t word_sz) {
1816 HeapWord* obj;
1817 for (int pr = 0; pr < _priority_max; ++pr) {
1818 obj = _priority_buffer[pr]->allocate(word_sz);
1819 if (obj != NULL) return obj;
1820 }
1821 return obj;
1822 }
1823
1824 bool contains(void* addr) {
1825 for (int pr = 0; pr < _priority_max; ++pr) {
1826 if (_priority_buffer[pr]->contains(addr)) return true;
1827 }
1828 return false;
1829 }
1830
1831 void undo_allocation(HeapWord* obj, size_t word_sz) {
1832 bool finish_undo;
1833 for (int pr = 0; pr < _priority_max; ++pr) {
1834 if (_priority_buffer[pr]->contains(obj)) {
1835 _priority_buffer[pr]->undo_allocation(obj, word_sz);
1836 finish_undo = true;
1837 }
1838 }
1839 if (!finish_undo) ShouldNotReachHere();
1840 }
1841
1842 size_t words_remaining() {
1843 size_t result = 0;
1844 for (int pr = 0; pr < _priority_max; ++pr) {
1845 result += _priority_buffer[pr]->words_remaining();
1846 }
1847 return result;
1848 }
1849
1850 size_t words_remaining_in_retired_buffer() {
1851 G1ParGCAllocBuffer* retired = _priority_buffer[0];
1852 return retired->words_remaining();
1853 }
1854
1855 void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
1856 for (int pr = 0; pr < _priority_max; ++pr) {
1857 _priority_buffer[pr]->flush_stats_and_retire(stats, end_of_gc, retain);
1858 }
1859 }
1860
1861 void update(bool end_of_gc, bool retain, HeapWord* buf, size_t word_sz) {
1862 G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
1863 retired_and_set->retire(end_of_gc, retain);
1864 retired_and_set->set_buf(buf);
1865 retired_and_set->set_word_size(word_sz);
1866 adjust_priority_order();
1867 }
1868
1869 private:
1870 void adjust_priority_order() {
1871 G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
1872
1873 int last = _priority_max - 1;
1874 for (int pr = 0; pr < last; ++pr) {
1875 _priority_buffer[pr] = _priority_buffer[pr + 1];
1876 }
1877 _priority_buffer[last] = retired_and_set;
1878 } 1731 }
1879 }; 1732 };
1880 1733
1881 class G1ParScanThreadState : public StackObj { 1734 class G1ParScanThreadState : public StackObj {
1882 protected: 1735 protected:
1884 RefToScanQueue* _refs; 1737 RefToScanQueue* _refs;
1885 DirtyCardQueue _dcq; 1738 DirtyCardQueue _dcq;
1886 G1SATBCardTableModRefBS* _ct_bs; 1739 G1SATBCardTableModRefBS* _ct_bs;
1887 G1RemSet* _g1_rem; 1740 G1RemSet* _g1_rem;
1888 1741
1889 G1ParGCAllocBufferContainer _surviving_alloc_buffer; 1742 G1ParGCAllocBuffer _surviving_alloc_buffer;
1890 G1ParGCAllocBufferContainer _tenured_alloc_buffer; 1743 G1ParGCAllocBuffer _tenured_alloc_buffer;
1891 G1ParGCAllocBufferContainer* _alloc_buffers[GCAllocPurposeCount]; 1744 G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1892 ageTable _age_table; 1745 ageTable _age_table;
1746
1747 G1ParScanClosure _scanner;
1893 1748
1894 size_t _alloc_buffer_waste; 1749 size_t _alloc_buffer_waste;
1895 size_t _undo_waste; 1750 size_t _undo_waste;
1896 1751
1897 OopsInHeapRegionClosure* _evac_failure_cl; 1752 OopsInHeapRegionClosure* _evac_failure_cl;
1898 G1ParScanHeapEvacClosure* _evac_cl;
1899 G1ParScanPartialArrayClosure* _partial_scan_cl;
1900 1753
1901 int _hash_seed; 1754 int _hash_seed;
1902 uint _queue_num; 1755 uint _queue_num;
1903 1756
1904 size_t _term_attempts; 1757 size_t _term_attempts;
1922 void add_to_undo_waste(size_t waste) { _undo_waste += waste; } 1775 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
1923 1776
1924 DirtyCardQueue& dirty_card_queue() { return _dcq; } 1777 DirtyCardQueue& dirty_card_queue() { return _dcq; }
1925 G1SATBCardTableModRefBS* ctbs() { return _ct_bs; } 1778 G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
1926 1779
1927 template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) { 1780 template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
1928 if (!from->is_survivor()) {
1929 _g1_rem->par_write_ref(from, p, tid);
1930 }
1931 }
1932 1781
1933 template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) { 1782 template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1934 // If the new value of the field points to the same region or 1783 // If the new value of the field points to the same region or
1935 // is the to-space, we don't need to include it in the Rset updates. 1784 // is the to-space, we don't need to include it in the Rset updates.
1936 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) { 1785 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1941 } 1790 }
1942 } 1791 }
1943 } 1792 }
1944 1793
1945 public: 1794 public:
1946 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num); 1795 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
1947 1796
1948 ~G1ParScanThreadState() { 1797 ~G1ParScanThreadState() {
1949 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC); 1798 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1950 } 1799 }
1951 1800
1952 RefToScanQueue* refs() { return _refs; } 1801 RefToScanQueue* refs() { return _refs; }
1953 ageTable* age_table() { return &_age_table; } 1802 ageTable* age_table() { return &_age_table; }
1954 1803
1955 G1ParGCAllocBufferContainer* alloc_buffer(GCAllocPurpose purpose) { 1804 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1956 return _alloc_buffers[purpose]; 1805 return _alloc_buffers[purpose];
1957 } 1806 }
1958 1807
1959 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; } 1808 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
1960 size_t undo_waste() const { return _undo_waste; } 1809 size_t undo_waste() const { return _undo_waste; }
1968 template <class T> void push_on_queue(T* ref) { 1817 template <class T> void push_on_queue(T* ref) {
1969 assert(verify_ref(ref), "sanity"); 1818 assert(verify_ref(ref), "sanity");
1970 refs()->push(ref); 1819 refs()->push(ref);
1971 } 1820 }
1972 1821
1973 template <class T> void update_rs(HeapRegion* from, T* p, int tid) { 1822 template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
1974 if (G1DeferredRSUpdate) {
1975 deferred_rs_update(from, p, tid);
1976 } else {
1977 immediate_rs_update(from, p, tid);
1978 }
1979 }
1980 1823
1981 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { 1824 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1982 HeapWord* obj = NULL; 1825 HeapWord* obj = NULL;
1983 size_t gclab_word_size = _g1h->desired_plab_sz(purpose); 1826 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1984 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { 1827 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1985 G1ParGCAllocBufferContainer* alloc_buf = alloc_buffer(purpose); 1828 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1829 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1830 alloc_buf->retire(false /* end_of_gc */, false /* retain */);
1986 1831
1987 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size); 1832 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1988 if (buf == NULL) return NULL; // Let caller handle allocation failure. 1833 if (buf == NULL) return NULL; // Let caller handle allocation failure.
1989 1834 // Otherwise.
1990 add_to_alloc_buffer_waste(alloc_buf->words_remaining_in_retired_buffer()); 1835 alloc_buf->set_word_size(gclab_word_size);
1991 alloc_buf->update(false /* end_of_gc */, false /* retain */, buf, gclab_word_size); 1836 alloc_buf->set_buf(buf);
1992 1837
1993 obj = alloc_buf->allocate(word_sz); 1838 obj = alloc_buf->allocate(word_sz);
1994 assert(obj != NULL, "buffer was definitely big enough..."); 1839 assert(obj != NULL, "buffer was definitely big enough...");
1995 } else { 1840 } else {
1996 obj = _g1h->par_allocate_during_gc(purpose, word_sz); 1841 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
2020 } 1865 }
2021 OopsInHeapRegionClosure* evac_failure_closure() { 1866 OopsInHeapRegionClosure* evac_failure_closure() {
2022 return _evac_failure_cl; 1867 return _evac_failure_cl;
2023 } 1868 }
2024 1869
2025 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
2026 _evac_cl = evac_cl;
2027 }
2028
2029 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
2030 _partial_scan_cl = partial_scan_cl;
2031 }
2032
2033 int* hash_seed() { return &_hash_seed; } 1870 int* hash_seed() { return &_hash_seed; }
2034 uint queue_num() { return _queue_num; } 1871 uint queue_num() { return _queue_num; }
2035 1872
2036 size_t term_attempts() const { return _term_attempts; } 1873 size_t term_attempts() const { return _term_attempts; }
2037 void note_term_attempt() { _term_attempts++; } 1874 void note_term_attempt() { _term_attempts++; }
2075 _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap), 1912 _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
2076 true /* end_of_gc */, 1913 true /* end_of_gc */,
2077 false /* retain */); 1914 false /* retain */);
2078 } 1915 }
2079 } 1916 }
2080 1917 private:
2081 template <class T> void deal_with_reference(T* ref_to_scan) { 1918 #define G1_PARTIAL_ARRAY_MASK 0x2
2082 if (has_partial_array_mask(ref_to_scan)) { 1919
2083 _partial_scan_cl->do_oop_nv(ref_to_scan); 1920 inline bool has_partial_array_mask(oop* ref) const {
2084 } else { 1921 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
2085 // Note: we can use "raw" versions of "region_containing" because 1922 }
2086 // "obj_to_scan" is definitely in the heap, and is not in a 1923
2087 // humongous region. 1924 // We never encode partial array oops as narrowOop*, so return false immediately.
2088 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); 1925 // This allows the compiler to create optimized code when popping references from
2089 _evac_cl->set_region(r); 1926 // the work queue.
2090 _evac_cl->do_oop_nv(ref_to_scan); 1927 inline bool has_partial_array_mask(narrowOop* ref) const {
1928 assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
1929 return false;
1930 }
1931
1932 // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
1933 // We always encode partial arrays as regular oop, to allow the
1934 // specialization for has_partial_array_mask() for narrowOops above.
1935 // This means that unintentional use of this method with narrowOops are caught
1936 // by the compiler.
1937 inline oop* set_partial_array_mask(oop obj) const {
1938 assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
1939 return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
1940 }
1941
1942 inline oop clear_partial_array_mask(oop* ref) const {
1943 return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
1944 }
1945
1946 inline void do_oop_partial_array(oop* p);
1947
1948 // This method is applied to the fields of the objects that have just been copied.
1949 template <class T> void do_oop_evac(T* p, HeapRegion* from) {
1950 assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
1951 "Reference should not be NULL here as such are never pushed to the task queue.");
1952 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
1953
1954 // Although we never intentionally push references outside of the collection
1955 // set, due to (benign) races in the claim mechanism during RSet scanning more
1956 // than one thread might claim the same card. So the same card may be
1957 // processed multiple times. So redo this check.
1958 if (_g1h->in_cset_fast_test(obj)) {
1959 oop forwardee;
1960 if (obj->is_forwarded()) {
1961 forwardee = obj->forwardee();
1962 } else {
1963 forwardee = copy_to_survivor_space(obj);
1964 }
1965 assert(forwardee != NULL, "forwardee should not be NULL");
1966 oopDesc::encode_store_heap_oop(p, forwardee);
2091 } 1967 }
2092 } 1968
2093 1969 assert(obj != NULL, "Must be");
2094 void deal_with_reference(StarTask ref) { 1970 update_rs(from, p, queue_num());
2095 assert(verify_task(ref), "sanity"); 1971 }
2096 if (ref.is_narrow()) { 1972 public:
2097 deal_with_reference((narrowOop*)ref); 1973
2098 } else { 1974 oop copy_to_survivor_space(oop const obj);
2099 deal_with_reference((oop*)ref); 1975
2100 } 1976 template <class T> inline void deal_with_reference(T* ref_to_scan);
2101 } 1977
2102 1978 inline void deal_with_reference(StarTask ref);
1979
1980 public:
2103 void trim_queue(); 1981 void trim_queue();
2104 }; 1982 };
2105 1983
2106 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 1984 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP