comparison src/share/vm/gc_implementation/g1/heapRegion.hpp @ 2152:0fa27f37d4d4

6977804: G1: remove the zero-filling thread Summary: This changeset removes the zero-filling thread from G1 and collapses the two free region lists we had before (the "free" and "unclean" lists) into one. The new free list uses the new heap region sets / lists abstractions that we'll ultimately use it to keep track of all regions in the heap. A heap region set was also introduced for the humongous regions. Finally, this change increases the concurrency between the thread that completes freeing regions (after a cleanup pause) and the rest of the system (before we'd have to wait for said thread to complete before allocating a new region). The changest also includes a lot of refactoring and code simplification. Reviewed-by: jcoomes, johnc
author tonyp
date Wed, 19 Jan 2011 19:30:42 -0500
parents b158bed62ef5
children 1216415d8e35
comparison
equal deleted inserted replaced
2151:cb913d743d09 2152:0fa27f37d4d4
48 class CompactibleSpace; 48 class CompactibleSpace;
49 class ContiguousSpace; 49 class ContiguousSpace;
50 class HeapRegionRemSet; 50 class HeapRegionRemSet;
51 class HeapRegionRemSetIterator; 51 class HeapRegionRemSetIterator;
52 class HeapRegion; 52 class HeapRegion;
53 class HeapRegionSetBase;
54
55 #define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
56 #define HR_FORMAT_PARAMS(__hr) (__hr)->hrs_index(), (__hr)->bottom(), \
57 (__hr)->top(), (__hr)->end()
53 58
54 // A dirty card to oop closure for heap regions. It 59 // A dirty card to oop closure for heap regions. It
55 // knows how to get the G1 heap and how to use the bitmap 60 // knows how to get the G1 heap and how to use the bitmap
56 // in the concurrent marker used by G1 to filter remembered 61 // in the concurrent marker used by G1 to filter remembered
57 // sets. 62 // sets.
225 HeapWord* _orig_end; 230 HeapWord* _orig_end;
226 231
227 // True iff the region is in current collection_set. 232 // True iff the region is in current collection_set.
228 bool _in_collection_set; 233 bool _in_collection_set;
229 234
230 // True iff the region is on the unclean list, waiting to be zero filled.
231 bool _is_on_unclean_list;
232
233 // True iff the region is on the free list, ready for allocation.
234 bool _is_on_free_list;
235
236 // Is this or has it been an allocation region in the current collection 235 // Is this or has it been an allocation region in the current collection
237 // pause. 236 // pause.
238 bool _is_gc_alloc_region; 237 bool _is_gc_alloc_region;
239 238
240 // True iff an attempt to evacuate an object in the region failed. 239 // True iff an attempt to evacuate an object in the region failed.
251 // next region in the young "generation" region set 250 // next region in the young "generation" region set
252 HeapRegion* _next_young_region; 251 HeapRegion* _next_young_region;
253 252
254 // Next region whose cards need cleaning 253 // Next region whose cards need cleaning
255 HeapRegion* _next_dirty_cards_region; 254 HeapRegion* _next_dirty_cards_region;
255
256 // Fields used by the HeapRegionSetBase class and subclasses.
257 HeapRegion* _next;
258 #ifdef ASSERT
259 HeapRegionSetBase* _containing_set;
260 #endif // ASSERT
261 bool _pending_removal;
256 262
257 // For parallel heapRegion traversal. 263 // For parallel heapRegion traversal.
258 jint _claimed; 264 jint _claimed;
259 265
260 // We use concurrent marking to determine the amount of live data 266 // We use concurrent marking to determine the amount of live data
303 _prev_top_at_mark_start = bot; 309 _prev_top_at_mark_start = bot;
304 _next_top_at_mark_start = bot; 310 _next_top_at_mark_start = bot;
305 _top_at_conc_mark_count = bot; 311 _top_at_conc_mark_count = bot;
306 } 312 }
307 313
308 jint _zfs; // A member of ZeroFillState. Protected by ZF_lock.
309 Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last)
310 // made it so.
311
312 void set_young_type(YoungType new_type) { 314 void set_young_type(YoungType new_type) {
313 //assert(_young_type != new_type, "setting the same type" ); 315 //assert(_young_type != new_type, "setting the same type" );
314 // TODO: add more assertions here 316 // TODO: add more assertions here
315 _young_type = new_type; 317 _young_type = new_type;
316 } 318 }
358 FinalCountClaimValue = 1, 360 FinalCountClaimValue = 1,
359 NoteEndClaimValue = 2, 361 NoteEndClaimValue = 2,
360 ScrubRemSetClaimValue = 3, 362 ScrubRemSetClaimValue = 3,
361 ParVerifyClaimValue = 4, 363 ParVerifyClaimValue = 4,
362 RebuildRSClaimValue = 5 364 RebuildRSClaimValue = 5
363 };
364
365 // Concurrent refinement requires contiguous heap regions (in which TLABs
366 // might be allocated) to be zero-filled. Each region therefore has a
367 // zero-fill-state.
368 enum ZeroFillState {
369 NotZeroFilled,
370 ZeroFilling,
371 ZeroFilled,
372 Allocated
373 }; 365 };
374 366
375 inline HeapWord* par_allocate_no_bot_updates(size_t word_size) { 367 inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
376 assert(is_young(), "we can only skip BOT updates on young regions"); 368 assert(is_young(), "we can only skip BOT updates on young regions");
377 return ContiguousSpace::par_allocate(word_size); 369 return ContiguousSpace::par_allocate(word_size);
454 // Makes the current region be a "continues humongous' 446 // Makes the current region be a "continues humongous'
455 // region. first_hr is the "start humongous" region of the series 447 // region. first_hr is the "start humongous" region of the series
456 // which this region will be part of. 448 // which this region will be part of.
457 void set_continuesHumongous(HeapRegion* first_hr); 449 void set_continuesHumongous(HeapRegion* first_hr);
458 450
451 // Unsets the humongous-related fields on the region.
452 void set_notHumongous();
453
459 // If the region has a remembered set, return a pointer to it. 454 // If the region has a remembered set, return a pointer to it.
460 HeapRegionRemSet* rem_set() const { 455 HeapRegionRemSet* rem_set() const {
461 return _rem_set; 456 return _rem_set;
462 } 457 }
463 458
500 assert(is_gc_alloc_region(), "should only invoke on member of CS."); 495 assert(is_gc_alloc_region(), "should only invoke on member of CS.");
501 assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS."); 496 assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS.");
502 _next_in_special_set = r; 497 _next_in_special_set = r;
503 } 498 }
504 499
505 bool is_on_free_list() { 500 // Methods used by the HeapRegionSetBase class and subclasses.
506 return _is_on_free_list; 501
507 } 502 // Getter and setter for the next field used to link regions into
508 503 // linked lists.
509 void set_on_free_list(bool b) { 504 HeapRegion* next() { return _next; }
510 _is_on_free_list = b; 505
511 } 506 void set_next(HeapRegion* next) { _next = next; }
512 507
513 HeapRegion* next_from_free_list() { 508 // Every region added to a set is tagged with a reference to that
514 assert(is_on_free_list(), 509 // set. This is used for doing consistency checking to make sure that
515 "Should only invoke on free space."); 510 // the contents of a set are as they should be and it's only
516 assert(_next_in_special_set == NULL || 511 // available in non-product builds.
517 _next_in_special_set->is_on_free_list(), 512 #ifdef ASSERT
518 "Malformed Free List."); 513 void set_containing_set(HeapRegionSetBase* containing_set) {
519 return _next_in_special_set; 514 assert((containing_set == NULL && _containing_set != NULL) ||
520 } 515 (containing_set != NULL && _containing_set == NULL),
521 516 err_msg("containing_set: "PTR_FORMAT" "
522 void set_next_on_free_list(HeapRegion* r) { 517 "_containing_set: "PTR_FORMAT,
523 assert(r == NULL || r->is_on_free_list(), "Malformed free list."); 518 containing_set, _containing_set));
524 _next_in_special_set = r; 519
525 } 520 _containing_set = containing_set;
526 521 }
527 bool is_on_unclean_list() { 522
528 return _is_on_unclean_list; 523 HeapRegionSetBase* containing_set() { return _containing_set; }
529 } 524 #else // ASSERT
530 525 void set_containing_set(HeapRegionSetBase* containing_set) { }
531 void set_on_unclean_list(bool b); 526
532 527 // containing_set() is only used in asserts so there's not reason
533 HeapRegion* next_from_unclean_list() { 528 // to provide a dummy version of it.
534 assert(is_on_unclean_list(), 529 #endif // ASSERT
535 "Should only invoke on unclean space."); 530
536 assert(_next_in_special_set == NULL || 531 // If we want to remove regions from a list in bulk we can simply tag
537 _next_in_special_set->is_on_unclean_list(), 532 // them with the pending_removal tag and call the
538 "Malformed unclean List."); 533 // remove_all_pending() method on the list.
539 return _next_in_special_set; 534
540 } 535 bool pending_removal() { return _pending_removal; }
541 536
542 void set_next_on_unclean_list(HeapRegion* r); 537 void set_pending_removal(bool pending_removal) {
538 // We can only set pending_removal to true, if it's false and the
539 // region belongs to a set.
540 assert(!pending_removal ||
541 (!_pending_removal && containing_set() != NULL), "pre-condition");
542 // We can only set pending_removal to false, if it's true and the
543 // region does not belong to a set.
544 assert( pending_removal ||
545 ( _pending_removal && containing_set() == NULL), "pre-condition");
546
547 _pending_removal = pending_removal;
548 }
543 549
544 HeapRegion* get_next_young_region() { return _next_young_region; } 550 HeapRegion* get_next_young_region() { return _next_young_region; }
545 void set_next_young_region(HeapRegion* hr) { 551 void set_next_young_region(HeapRegion* hr) {
546 _next_young_region = hr; 552 _next_young_region = hr;
547 } 553 }
556 562
557 // Reset HR stuff to default values. 563 // Reset HR stuff to default values.
558 void hr_clear(bool par, bool clear_space); 564 void hr_clear(bool par, bool clear_space);
559 565
560 void initialize(MemRegion mr, bool clear_space, bool mangle_space); 566 void initialize(MemRegion mr, bool clear_space, bool mangle_space);
561
562 // Ensure that "this" is zero-filled.
563 void ensure_zero_filled();
564 // This one requires that the calling thread holds ZF_mon.
565 void ensure_zero_filled_locked();
566 567
567 // Get the start of the unmarked area in this region. 568 // Get the start of the unmarked area in this region.
568 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; } 569 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
569 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; } 570 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
570 571
796 // Requires that "addr" is within the region. Returns the start of the 797 // Requires that "addr" is within the region. Returns the start of the
797 // first ("careful") block that starts at or after "addr", or else the 798 // first ("careful") block that starts at or after "addr", or else the
798 // "end" of the region if there is no such block. 799 // "end" of the region if there is no such block.
799 HeapWord* next_block_start_careful(HeapWord* addr); 800 HeapWord* next_block_start_careful(HeapWord* addr);
800 801
801 // Returns the zero-fill-state of the current region.
802 ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; }
803 bool zero_fill_is_allocated() { return _zfs == Allocated; }
804 Thread* zero_filler() { return _zero_filler; }
805
806 // Indicate that the contents of the region are unknown, and therefore
807 // might require zero-filling.
808 void set_zero_fill_needed() {
809 set_zero_fill_state_work(NotZeroFilled);
810 }
811 void set_zero_fill_in_progress(Thread* t) {
812 set_zero_fill_state_work(ZeroFilling);
813 _zero_filler = t;
814 }
815 void set_zero_fill_complete();
816 void set_zero_fill_allocated() {
817 set_zero_fill_state_work(Allocated);
818 }
819
820 void set_zero_fill_state_work(ZeroFillState zfs);
821
822 // This is called when a full collection shrinks the heap.
823 // We want to set the heap region to a value which says
824 // it is no longer part of the heap. For now, we'll let "NotZF" fill
825 // that role.
826 void reset_zero_fill() {
827 set_zero_fill_state_work(NotZeroFilled);
828 _zero_filler = NULL;
829 }
830
831 size_t recorded_rs_length() const { return _recorded_rs_length; } 802 size_t recorded_rs_length() const { return _recorded_rs_length; }
832 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; } 803 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
833 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; } 804 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
834 805
835 void set_recorded_rs_length(size_t rs_length) { 806 void set_recorded_rs_length(size_t rs_length) {
864 // the "next" marking information at the end of remark. 835 // the "next" marking information at the end of remark.
865 void verify(bool allow_dirty, bool use_prev_marking, bool *failures) const; 836 void verify(bool allow_dirty, bool use_prev_marking, bool *failures) const;
866 837
867 // Override; it uses the "prev" marking information 838 // Override; it uses the "prev" marking information
868 virtual void verify(bool allow_dirty) const; 839 virtual void verify(bool allow_dirty) const;
869
870 #ifdef DEBUG
871 HeapWord* allocate(size_t size);
872 #endif
873 }; 840 };
874 841
875 // HeapRegionClosure is used for iterating over regions. 842 // HeapRegionClosure is used for iterating over regions.
876 // Terminates the iteration when the "doHeapRegion" method returns "true". 843 // Terminates the iteration when the "doHeapRegion" method returns "true".
877 class HeapRegionClosure : public StackObj { 844 class HeapRegionClosure : public StackObj {
890 // True after iteration if the closure was applied to all heap regions 857 // True after iteration if the closure was applied to all heap regions
891 // and returned "false" in all cases. 858 // and returned "false" in all cases.
892 bool complete() { return _complete; } 859 bool complete() { return _complete; }
893 }; 860 };
894 861
895 // A linked lists of heap regions. It leaves the "next" field
896 // unspecified; that's up to subtypes.
897 class RegionList VALUE_OBJ_CLASS_SPEC {
898 protected:
899 virtual HeapRegion* get_next(HeapRegion* chr) = 0;
900 virtual void set_next(HeapRegion* chr,
901 HeapRegion* new_next) = 0;
902
903 HeapRegion* _hd;
904 HeapRegion* _tl;
905 size_t _sz;
906
907 // Protected constructor because this type is only meaningful
908 // when the _get/_set next functions are defined.
909 RegionList() : _hd(NULL), _tl(NULL), _sz(0) {}
910 public:
911 void reset() {
912 _hd = NULL;
913 _tl = NULL;
914 _sz = 0;
915 }
916 HeapRegion* hd() { return _hd; }
917 HeapRegion* tl() { return _tl; }
918 size_t sz() { return _sz; }
919 size_t length();
920
921 bool well_formed() {
922 return
923 ((hd() == NULL && tl() == NULL && sz() == 0)
924 || (hd() != NULL && tl() != NULL && sz() > 0))
925 && (sz() == length());
926 }
927 virtual void insert_before_head(HeapRegion* r);
928 void prepend_list(RegionList* new_list);
929 virtual HeapRegion* pop();
930 void dec_sz() { _sz--; }
931 // Requires that "r" is an element of the list, and is not the tail.
932 void delete_after(HeapRegion* r);
933 };
934
935 class EmptyNonHRegionList: public RegionList {
936 protected:
937 // Protected constructor because this type is only meaningful
938 // when the _get/_set next functions are defined.
939 EmptyNonHRegionList() : RegionList() {}
940
941 public:
942 void insert_before_head(HeapRegion* r) {
943 // assert(r->is_empty(), "Better be empty");
944 assert(!r->isHumongous(), "Better not be humongous.");
945 RegionList::insert_before_head(r);
946 }
947 void prepend_list(EmptyNonHRegionList* new_list) {
948 // assert(new_list->hd() == NULL || new_list->hd()->is_empty(),
949 // "Better be empty");
950 assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(),
951 "Better not be humongous.");
952 // assert(new_list->tl() == NULL || new_list->tl()->is_empty(),
953 // "Better be empty");
954 assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(),
955 "Better not be humongous.");
956 RegionList::prepend_list(new_list);
957 }
958 };
959
960 class UncleanRegionList: public EmptyNonHRegionList {
961 public:
962 HeapRegion* get_next(HeapRegion* hr) {
963 return hr->next_from_unclean_list();
964 }
965 void set_next(HeapRegion* hr, HeapRegion* new_next) {
966 hr->set_next_on_unclean_list(new_next);
967 }
968
969 UncleanRegionList() : EmptyNonHRegionList() {}
970
971 void insert_before_head(HeapRegion* r) {
972 assert(!r->is_on_free_list(),
973 "Better not already be on free list");
974 assert(!r->is_on_unclean_list(),
975 "Better not already be on unclean list");
976 r->set_zero_fill_needed();
977 r->set_on_unclean_list(true);
978 EmptyNonHRegionList::insert_before_head(r);
979 }
980 void prepend_list(UncleanRegionList* new_list) {
981 assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(),
982 "Better not already be on free list");
983 assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(),
984 "Better already be marked as on unclean list");
985 assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(),
986 "Better not already be on free list");
987 assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(),
988 "Better already be marked as on unclean list");
989 EmptyNonHRegionList::prepend_list(new_list);
990 }
991 HeapRegion* pop() {
992 HeapRegion* res = RegionList::pop();
993 if (res != NULL) res->set_on_unclean_list(false);
994 return res;
995 }
996 };
997
998 // Local Variables: ***
999 // c-indentation-style: gnu ***
1000 // End: ***
1001
1002 #endif // SERIALGC 862 #endif // SERIALGC
1003 863
1004 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP 864 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP