comparison src/share/vm/gc_implementation/g1/heapRegion.cpp @ 20276:1526a938e670

8047818: G1 HeapRegions can no longer be ContiguousSpaces Summary: Change parent of G1OffsetTableContigSpace to CompactibleSpace, reimplement missing functionality Reviewed-by: stefank, jmasa, tschatzl
author mgerdin
date Thu, 26 Jun 2014 11:36:58 +0200
parents a8137787acfe
children 2c6ef90f030a
comparison
equal deleted inserted replaced
20275:e5035defa3c4 20276:1526a938e670
28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
29 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 29 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
30 #include "gc_implementation/g1/heapRegion.inline.hpp" 30 #include "gc_implementation/g1/heapRegion.inline.hpp"
31 #include "gc_implementation/g1/heapRegionRemSet.hpp" 31 #include "gc_implementation/g1/heapRegionRemSet.hpp"
32 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 32 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
33 #include "gc_implementation/shared/liveRange.hpp"
33 #include "memory/genOopClosures.inline.hpp" 34 #include "memory/genOopClosures.inline.hpp"
34 #include "memory/iterator.hpp" 35 #include "memory/iterator.hpp"
35 #include "memory/space.inline.hpp" 36 #include "memory/space.inline.hpp"
36 #include "oops/oop.inline.hpp" 37 #include "oops/oop.inline.hpp"
37 #include "runtime/orderAccess.inline.hpp" 38 #include "runtime/orderAccess.inline.hpp"
58 template<class ClosureType> 59 template<class ClosureType>
59 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, 60 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
60 HeapRegion* hr, 61 HeapRegion* hr,
61 HeapWord* cur, HeapWord* top) { 62 HeapWord* cur, HeapWord* top) {
62 oop cur_oop = oop(cur); 63 oop cur_oop = oop(cur);
63 int oop_size = cur_oop->size(); 64 size_t oop_size = hr->block_size(cur);
64 HeapWord* next_obj = cur + oop_size; 65 HeapWord* next_obj = cur + oop_size;
65 while (next_obj < top) { 66 while (next_obj < top) {
66 // Keep filtering the remembered set. 67 // Keep filtering the remembered set.
67 if (!g1h->is_obj_dead(cur_oop, hr)) { 68 if (!g1h->is_obj_dead(cur_oop, hr)) {
68 // Bottom lies entirely below top, so we can call the 69 // Bottom lies entirely below top, so we can call the
69 // non-memRegion version of oop_iterate below. 70 // non-memRegion version of oop_iterate below.
70 cur_oop->oop_iterate(cl); 71 cur_oop->oop_iterate(cl);
71 } 72 }
72 cur = next_obj; 73 cur = next_obj;
73 cur_oop = oop(cur); 74 cur_oop = oop(cur);
74 oop_size = cur_oop->size(); 75 oop_size = hr->block_size(cur);
75 next_obj = cur + oop_size; 76 next_obj = cur + oop_size;
76 } 77 }
77 return cur; 78 return cur;
78 } 79 }
79 80
80 void HeapRegionDCTOC::walk_mem_region(MemRegion mr, 81 void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
81 HeapWord* bottom, 82 HeapWord* bottom,
82 HeapWord* top) { 83 HeapWord* top) {
83 G1CollectedHeap* g1h = _g1; 84 G1CollectedHeap* g1h = _g1;
84 int oop_size; 85 size_t oop_size;
85 ExtendedOopClosure* cl2 = NULL; 86 ExtendedOopClosure* cl2 = NULL;
86 87
87 FilterIntoCSClosure intoCSFilt(this, g1h, _cl); 88 FilterIntoCSClosure intoCSFilt(this, g1h, _cl);
88 FilterOutOfRegionClosure outOfRegionFilt(_hr, _cl); 89 FilterOutOfRegionClosure outOfRegionFilt(_hr, _cl);
89 90
99 // or it was allocated after marking finished, then we add it. Otherwise 100 // or it was allocated after marking finished, then we add it. Otherwise
100 // we can safely ignore the object. 101 // we can safely ignore the object.
101 if (!g1h->is_obj_dead(oop(bottom), _hr)) { 102 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
102 oop_size = oop(bottom)->oop_iterate(cl2, mr); 103 oop_size = oop(bottom)->oop_iterate(cl2, mr);
103 } else { 104 } else {
104 oop_size = oop(bottom)->size(); 105 oop_size = _hr->block_size(bottom);
105 } 106 }
106 107
107 bottom += oop_size; 108 bottom += oop_size;
108 109
109 if (bottom < top) { 110 if (bottom < top) {
450 cl->do_object(obj); 451 cl->do_object(obj);
451 } 452 }
452 if (cl->abort()) return cur; 453 if (cl->abort()) return cur;
453 // The check above must occur before the operation below, since an 454 // The check above must occur before the operation below, since an
454 // abort might invalidate the "size" operation. 455 // abort might invalidate the "size" operation.
455 cur += obj->size(); 456 cur += block_size(cur);
456 } 457 }
457 return NULL; 458 return NULL;
458 } 459 }
459 460
460 HeapWord* 461 HeapWord*
522 if (obj->klass_or_null() == NULL) { 523 if (obj->klass_or_null() == NULL) {
523 // Ran into an unparseable point. 524 // Ran into an unparseable point.
524 return cur; 525 return cur;
525 } 526 }
526 // Otherwise... 527 // Otherwise...
527 next = (cur + obj->size()); 528 next = cur + block_size(cur);
528 } 529 }
529 530
530 // If we finish the above loop...We have a parseable object that 531 // If we finish the above loop...We have a parseable object that
531 // begins on or before the start of the memory region, and ends 532 // begins on or before the start of the memory region, and ends
532 // inside or spans the entire region. 533 // inside or spans the entire region.
533 534
534 assert(obj == oop(cur), "sanity"); 535 assert(obj == oop(cur), "sanity");
535 assert(cur <= start && 536 assert(cur <= start, "Loop postcondition");
536 obj->klass_or_null() != NULL && 537 assert(obj->klass_or_null() != NULL, "Loop postcondition");
537 (cur + obj->size()) > start, 538 assert((cur + block_size(cur)) > start, "Loop postcondition");
538 "Loop postcondition");
539 539
540 if (!g1h->is_obj_dead(obj)) { 540 if (!g1h->is_obj_dead(obj)) {
541 obj->oop_iterate(cl, mr); 541 obj->oop_iterate(cl, mr);
542 } 542 }
543 543
547 // Ran into an unparseable point. 547 // Ran into an unparseable point.
548 return cur; 548 return cur;
549 }; 549 };
550 550
551 // Otherwise: 551 // Otherwise:
552 next = (cur + obj->size()); 552 next = cur + block_size(cur);
553 553
554 if (!g1h->is_obj_dead(obj)) { 554 if (!g1h->is_obj_dead(obj)) {
555 if (next < end || !obj->is_objArray()) { 555 if (next < end || !obj->is_objArray()) {
556 // This object either does not span the MemRegion 556 // This object either does not span the MemRegion
557 // boundary, or if it does it's not an array. 557 // boundary, or if it does it's not an array.
902 bool is_humongous = isHumongous(); 902 bool is_humongous = isHumongous();
903 bool do_bot_verify = !is_young(); 903 bool do_bot_verify = !is_young();
904 size_t object_num = 0; 904 size_t object_num = 0;
905 while (p < top()) { 905 while (p < top()) {
906 oop obj = oop(p); 906 oop obj = oop(p);
907 size_t obj_size = obj->size(); 907 size_t obj_size = block_size(p);
908 object_num += 1; 908 object_num += 1;
909 909
910 if (is_humongous != g1->isHumongous(obj_size)) { 910 if (is_humongous != g1->isHumongous(obj_size)) {
911 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size (" 911 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
912 SIZE_FORMAT" words) in a %shumongous region", 912 SIZE_FORMAT" words) in a %shumongous region",
1038 1038
1039 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 1039 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
1040 // away eventually. 1040 // away eventually.
1041 1041
1042 void G1OffsetTableContigSpace::clear(bool mangle_space) { 1042 void G1OffsetTableContigSpace::clear(bool mangle_space) {
1043 ContiguousSpace::clear(mangle_space); 1043 set_top(bottom());
1044 set_saved_mark_word(bottom());
1045 CompactibleSpace::clear(mangle_space);
1044 _offsets.zero_bottom_entry(); 1046 _offsets.zero_bottom_entry();
1045 _offsets.initialize_threshold(); 1047 _offsets.initialize_threshold();
1046 } 1048 }
1047 1049
1048 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 1050 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
1076 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1078 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1077 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); 1079 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
1078 if (_gc_time_stamp < g1h->get_gc_time_stamp()) 1080 if (_gc_time_stamp < g1h->get_gc_time_stamp())
1079 return top(); 1081 return top();
1080 else 1082 else
1081 return ContiguousSpace::saved_mark_word(); 1083 return Space::saved_mark_word();
1082 } 1084 }
1083 1085
1084 void G1OffsetTableContigSpace::record_top_and_timestamp() { 1086 void G1OffsetTableContigSpace::record_top_and_timestamp() {
1085 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1087 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1086 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); 1088 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
1091 // set_saved_mark and before _gc_time_stamp = ..., then the latter 1093 // set_saved_mark and before _gc_time_stamp = ..., then the latter
1092 // will be false, and it will pick up top() as the high water mark 1094 // will be false, and it will pick up top() as the high water mark
1093 // of region. If it does so after _gc_time_stamp = ..., then it 1095 // of region. If it does so after _gc_time_stamp = ..., then it
1094 // will pick up the right saved_mark_word() as the high water mark 1096 // will pick up the right saved_mark_word() as the high water mark
1095 // of the region. Either way, the behaviour will be correct. 1097 // of the region. Either way, the behaviour will be correct.
1096 ContiguousSpace::set_saved_mark(); 1098 Space::set_saved_mark_word(top());
1097 OrderAccess::storestore(); 1099 OrderAccess::storestore();
1098 _gc_time_stamp = curr_gc_time_stamp; 1100 _gc_time_stamp = curr_gc_time_stamp;
1099 // No need to do another barrier to flush the writes above. If 1101 // No need to do another barrier to flush the writes above. If
1100 // this is called in parallel with other threads trying to 1102 // this is called in parallel with other threads trying to
1101 // allocate into the region, the caller should call this while 1103 // allocate into the region, the caller should call this while
1102 // holding a lock and when the lock is released the writes will be 1104 // holding a lock and when the lock is released the writes will be
1103 // flushed. 1105 // flushed.
1104 } 1106 }
1105 } 1107 }
1108
1109 void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
1110 object_iterate(blk);
1111 }
1112
1113 void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
1114 HeapWord* p = bottom();
1115 while (p < top()) {
1116 if (block_is_obj(p)) {
1117 blk->do_object(oop(p));
1118 }
1119 p += block_size(p);
1120 }
1121 }
1122
1123 #define block_is_always_obj(q) true
1124 void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
1125 SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
1126 }
1127 #undef block_is_always_obj
1106 1128
1107 G1OffsetTableContigSpace:: 1129 G1OffsetTableContigSpace::
1108 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, 1130 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
1109 MemRegion mr) : 1131 MemRegion mr) :
1110 _offsets(sharedOffsetArray, mr), 1132 _offsets(sharedOffsetArray, mr),
1111 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), 1133 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
1112 _gc_time_stamp(0) 1134 _gc_time_stamp(0)
1113 { 1135 {
1114 _offsets.set_space(this); 1136 _offsets.set_space(this);
1115 // false ==> we'll do the clearing if there's clearing to be done. 1137 // false ==> we'll do the clearing if there's clearing to be done.
1116 ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle); 1138 CompactibleSpace::initialize(mr, false, SpaceDecorator::Mangle);
1139 _top = bottom();
1117 _offsets.zero_bottom_entry(); 1140 _offsets.zero_bottom_entry();
1118 _offsets.initialize_threshold(); 1141 _offsets.initialize_threshold();
1119 } 1142 }