comparison src/share/vm/gc_implementation/g1/heapRegion.cpp @ 20804:7848fc12602b

Merge with jdk8u40-b25
author Gilles Duboscq <gilles.m.duboscq@oracle.com>
date Tue, 07 Apr 2015 14:58:49 +0200
parents 52b4284cb496 e8bf410d5e23
children dd9cc155639c
comparison
equal deleted inserted replaced
20184:84105dcdb05b 20804:7848fc12602b
26 #include "code/nmethod.hpp" 26 #include "code/nmethod.hpp"
27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" 27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
29 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 29 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
30 #include "gc_implementation/g1/heapRegion.inline.hpp" 30 #include "gc_implementation/g1/heapRegion.inline.hpp"
31 #include "gc_implementation/g1/heapRegionBounds.inline.hpp"
31 #include "gc_implementation/g1/heapRegionRemSet.hpp" 32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
32 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 33 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
34 #include "gc_implementation/shared/liveRange.hpp"
33 #include "memory/genOopClosures.inline.hpp" 35 #include "memory/genOopClosures.inline.hpp"
34 #include "memory/iterator.hpp" 36 #include "memory/iterator.hpp"
37 #include "memory/space.inline.hpp"
35 #include "oops/oop.inline.hpp" 38 #include "oops/oop.inline.hpp"
39 #include "runtime/orderAccess.inline.hpp"
36 40
37 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 41 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
38 42
39 int HeapRegion::LogOfHRGrainBytes = 0; 43 int HeapRegion::LogOfHRGrainBytes = 0;
40 int HeapRegion::LogOfHRGrainWords = 0; 44 int HeapRegion::LogOfHRGrainWords = 0;
44 48
45 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, 49 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
46 HeapRegion* hr, ExtendedOopClosure* cl, 50 HeapRegion* hr, ExtendedOopClosure* cl,
47 CardTableModRefBS::PrecisionStyle precision, 51 CardTableModRefBS::PrecisionStyle precision,
48 FilterKind fk) : 52 FilterKind fk) :
49 ContiguousSpaceDCTOC(hr, cl, precision, NULL), 53 DirtyCardToOopClosure(hr, cl, precision, NULL),
50 _hr(hr), _fk(fk), _g1(g1) { } 54 _hr(hr), _fk(fk), _g1(g1) { }
51 55
52 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, 56 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
53 OopClosure* oc) : 57 OopClosure* oc) :
54 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } 58 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
56 template<class ClosureType> 60 template<class ClosureType>
57 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, 61 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
58 HeapRegion* hr, 62 HeapRegion* hr,
59 HeapWord* cur, HeapWord* top) { 63 HeapWord* cur, HeapWord* top) {
60 oop cur_oop = oop(cur); 64 oop cur_oop = oop(cur);
61 int oop_size = cur_oop->size(); 65 size_t oop_size = hr->block_size(cur);
62 HeapWord* next_obj = cur + oop_size; 66 HeapWord* next_obj = cur + oop_size;
63 while (next_obj < top) { 67 while (next_obj < top) {
64 // Keep filtering the remembered set. 68 // Keep filtering the remembered set.
65 if (!g1h->is_obj_dead(cur_oop, hr)) { 69 if (!g1h->is_obj_dead(cur_oop, hr)) {
66 // Bottom lies entirely below top, so we can call the 70 // Bottom lies entirely below top, so we can call the
67 // non-memRegion version of oop_iterate below. 71 // non-memRegion version of oop_iterate below.
68 cur_oop->oop_iterate(cl); 72 cur_oop->oop_iterate(cl);
69 } 73 }
70 cur = next_obj; 74 cur = next_obj;
71 cur_oop = oop(cur); 75 cur_oop = oop(cur);
72 oop_size = cur_oop->size(); 76 oop_size = hr->block_size(cur);
73 next_obj = cur + oop_size; 77 next_obj = cur + oop_size;
74 } 78 }
75 return cur; 79 return cur;
76 } 80 }
77 81
78 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, 82 void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
79 HeapWord* bottom, 83 HeapWord* bottom,
80 HeapWord* top, 84 HeapWord* top) {
81 ExtendedOopClosure* cl) {
82 G1CollectedHeap* g1h = _g1; 85 G1CollectedHeap* g1h = _g1;
83 int oop_size; 86 size_t oop_size;
84 ExtendedOopClosure* cl2 = NULL; 87 ExtendedOopClosure* cl2 = NULL;
85 88
86 FilterIntoCSClosure intoCSFilt(this, g1h, cl); 89 FilterIntoCSClosure intoCSFilt(this, g1h, _cl);
87 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); 90 FilterOutOfRegionClosure outOfRegionFilt(_hr, _cl);
88 91
89 switch (_fk) { 92 switch (_fk) {
90 case NoFilterKind: cl2 = cl; break; 93 case NoFilterKind: cl2 = _cl; break;
91 case IntoCSFilterKind: cl2 = &intoCSFilt; break; 94 case IntoCSFilterKind: cl2 = &intoCSFilt; break;
92 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; 95 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
93 default: ShouldNotReachHere(); 96 default: ShouldNotReachHere();
94 } 97 }
95 98
98 // or it was allocated after marking finished, then we add it. Otherwise 101 // or it was allocated after marking finished, then we add it. Otherwise
99 // we can safely ignore the object. 102 // we can safely ignore the object.
100 if (!g1h->is_obj_dead(oop(bottom), _hr)) { 103 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
101 oop_size = oop(bottom)->oop_iterate(cl2, mr); 104 oop_size = oop(bottom)->oop_iterate(cl2, mr);
102 } else { 105 } else {
103 oop_size = oop(bottom)->size(); 106 oop_size = _hr->block_size(bottom);
104 } 107 }
105 108
106 bottom += oop_size; 109 bottom += oop_size;
107 110
108 if (bottom < top) { 111 if (bottom < top) {
109 // We replicate the loop below for several kinds of possible filters. 112 // We replicate the loop below for several kinds of possible filters.
110 switch (_fk) { 113 switch (_fk) {
111 case NoFilterKind: 114 case NoFilterKind:
112 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); 115 bottom = walk_mem_region_loop(_cl, g1h, _hr, bottom, top);
113 break; 116 break;
114 117
115 case IntoCSFilterKind: { 118 case IntoCSFilterKind: {
116 FilterIntoCSClosure filt(this, g1h, cl); 119 FilterIntoCSClosure filt(this, g1h, _cl);
117 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); 120 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
118 break; 121 break;
119 } 122 }
120 123
121 case OutOfRegionFilterKind: { 124 case OutOfRegionFilterKind: {
122 FilterOutOfRegionClosure filt(_hr, cl); 125 FilterOutOfRegionClosure filt(_hr, _cl);
123 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); 126 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
124 break; 127 break;
125 } 128 }
126 129
127 default: 130 default:
133 oop(bottom)->oop_iterate(cl2, mr); 136 oop(bottom)->oop_iterate(cl2, mr);
134 } 137 }
135 } 138 }
136 } 139 }
137 140
138 // Minimum region size; we won't go lower than that.
139 // We might want to decrease this in the future, to deal with small
140 // heaps a bit more efficiently.
141 #define MIN_REGION_SIZE ( 1024 * 1024 )
142
143 // Maximum region size; we don't go higher than that. There's a good
144 // reason for having an upper bound. We don't want regions to get too
145 // large, otherwise cleanup's effectiveness would decrease as there
146 // will be fewer opportunities to find totally empty regions after
147 // marking.
148 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
149
150 // The automatic region size calculation will try to have around this
151 // many regions in the heap (based on the min heap size).
152 #define TARGET_REGION_NUMBER 2048
153
154 size_t HeapRegion::max_region_size() { 141 size_t HeapRegion::max_region_size() {
155 return (size_t)MAX_REGION_SIZE; 142 return HeapRegionBounds::max_size();
156 } 143 }
157 144
158 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 145 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
159 uintx region_size = G1HeapRegionSize; 146 uintx region_size = G1HeapRegionSize;
160 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { 147 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
161 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 148 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
162 region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER, 149 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
163 (uintx) MIN_REGION_SIZE); 150 (uintx) HeapRegionBounds::min_size());
164 } 151 }
165 152
166 int region_size_log = log2_long((jlong) region_size); 153 int region_size_log = log2_long((jlong) region_size);
167 // Recalculate the region size to make sure it's a power of 154 // Recalculate the region size to make sure it's a power of
168 // 2. This means that region_size is the largest power of 2 that's 155 // 2. This means that region_size is the largest power of 2 that's
169 // <= what we've calculated so far. 156 // <= what we've calculated so far.
170 region_size = ((uintx)1 << region_size_log); 157 region_size = ((uintx)1 << region_size_log);
171 158
172 // Now make sure that we don't go over or under our limits. 159 // Now make sure that we don't go over or under our limits.
173 if (region_size < MIN_REGION_SIZE) { 160 if (region_size < HeapRegionBounds::min_size()) {
174 region_size = MIN_REGION_SIZE; 161 region_size = HeapRegionBounds::min_size();
175 } else if (region_size > MAX_REGION_SIZE) { 162 } else if (region_size > HeapRegionBounds::max_size()) {
176 region_size = MAX_REGION_SIZE; 163 region_size = HeapRegionBounds::max_size();
177 } 164 }
178 165
179 // And recalculate the log. 166 // And recalculate the log.
180 region_size_log = log2_long((jlong) region_size); 167 region_size_log = log2_long((jlong) region_size);
181 168
206 zero_marked_bytes(); 193 zero_marked_bytes();
207 init_top_at_mark_start(); 194 init_top_at_mark_start();
208 } 195 }
209 196
210 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) { 197 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
211 assert(_humongous_type == NotHumongous,
212 "we should have already filtered out humongous regions");
213 assert(_humongous_start_region == NULL, 198 assert(_humongous_start_region == NULL,
214 "we should have already filtered out humongous regions"); 199 "we should have already filtered out humongous regions");
215 assert(_end == _orig_end, 200 assert(_end == _orig_end,
216 "we should have already filtered out humongous regions"); 201 "we should have already filtered out humongous regions");
217 202
218 _in_collection_set = false; 203 _in_collection_set = false;
219 204
205 set_allocation_context(AllocationContext::system());
220 set_young_index_in_cset(-1); 206 set_young_index_in_cset(-1);
221 uninstall_surv_rate_group(); 207 uninstall_surv_rate_group();
222 set_young_type(NotYoung); 208 set_free();
223 reset_pre_dummy_top(); 209 reset_pre_dummy_top();
224 210
225 if (!par) { 211 if (!par) {
226 // If this is parallel, this will be done later. 212 // If this is parallel, this will be done later.
227 HeapRegionRemSet* hrrs = rem_set(); 213 HeapRegionRemSet* hrrs = rem_set();
268 assert(end() == _orig_end, 254 assert(end() == _orig_end,
269 "Should be normal before the humongous object allocation"); 255 "Should be normal before the humongous object allocation");
270 assert(top() == bottom(), "should be empty"); 256 assert(top() == bottom(), "should be empty");
271 assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); 257 assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
272 258
273 _humongous_type = StartsHumongous; 259 _type.set_starts_humongous();
274 _humongous_start_region = this; 260 _humongous_start_region = this;
275 261
276 set_end(new_end); 262 set_end(new_end);
277 _offsets.set_for_starts_humongous(new_top); 263 _offsets.set_for_starts_humongous(new_top);
278 } 264 }
282 assert(end() == _orig_end, 268 assert(end() == _orig_end,
283 "Should be normal before the humongous object allocation"); 269 "Should be normal before the humongous object allocation");
284 assert(top() == bottom(), "should be empty"); 270 assert(top() == bottom(), "should be empty");
285 assert(first_hr->startsHumongous(), "pre-condition"); 271 assert(first_hr->startsHumongous(), "pre-condition");
286 272
287 _humongous_type = ContinuesHumongous; 273 _type.set_continues_humongous();
288 _humongous_start_region = first_hr; 274 _humongous_start_region = first_hr;
289 } 275 }
290 276
291 void HeapRegion::set_notHumongous() { 277 void HeapRegion::clear_humongous() {
292 assert(isHumongous(), "pre-condition"); 278 assert(isHumongous(), "pre-condition");
293 279
294 if (startsHumongous()) { 280 if (startsHumongous()) {
295 assert(top() <= end(), "pre-condition"); 281 assert(top() <= end(), "pre-condition");
296 set_end(_orig_end); 282 set_end(_orig_end);
302 // continues humongous 288 // continues humongous
303 assert(end() == _orig_end, "sanity"); 289 assert(end() == _orig_end, "sanity");
304 } 290 }
305 291
306 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); 292 assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
307 _humongous_type = NotHumongous;
308 _humongous_start_region = NULL; 293 _humongous_start_region = NULL;
309 } 294 }
310 295
311 bool HeapRegion::claimHeapRegion(jint claimValue) { 296 bool HeapRegion::claimHeapRegion(jint claimValue) {
312 jint current = _claimed; 297 jint current = _claimed;
317 } 302 }
318 } 303 }
319 return false; 304 return false;
320 } 305 }
321 306
322 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { 307 HeapRegion::HeapRegion(uint hrm_index,
323 HeapWord* low = addr;
324 HeapWord* high = end();
325 while (low < high) {
326 size_t diff = pointer_delta(high, low);
327 // Must add one below to bias toward the high amount. Otherwise, if
328 // "high" were at the desired value, and "low" were one less, we
329 // would not converge on "high". This is not symmetric, because
330 // we set "high" to a block start, which might be the right one,
331 // which we don't do for "low".
332 HeapWord* middle = low + (diff+1)/2;
333 if (middle == high) return high;
334 HeapWord* mid_bs = block_start_careful(middle);
335 if (mid_bs < addr) {
336 low = middle;
337 } else {
338 high = mid_bs;
339 }
340 }
341 assert(low == high && low >= addr, "Didn't work.");
342 return low;
343 }
344
345 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
346 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
347 #endif // _MSC_VER
348
349
350 HeapRegion::HeapRegion(uint hrs_index,
351 G1BlockOffsetSharedArray* sharedOffsetArray, 308 G1BlockOffsetSharedArray* sharedOffsetArray,
352 MemRegion mr) : 309 MemRegion mr) :
353 G1OffsetTableContigSpace(sharedOffsetArray, mr), 310 G1OffsetTableContigSpace(sharedOffsetArray, mr),
354 _hrs_index(hrs_index), 311 _hrm_index(hrm_index),
355 _humongous_type(NotHumongous), _humongous_start_region(NULL), 312 _allocation_context(AllocationContext::system()),
313 _humongous_start_region(NULL),
356 _in_collection_set(false), 314 _in_collection_set(false),
357 _next_in_special_set(NULL), _orig_end(NULL), 315 _next_in_special_set(NULL), _orig_end(NULL),
358 _claimed(InitialClaimValue), _evacuation_failed(false), 316 _claimed(InitialClaimValue), _evacuation_failed(false),
359 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 317 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
360 _young_type(NotYoung), _next_young_region(NULL), 318 _next_young_region(NULL),
361 _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), _pending_removal(false), 319 _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
362 #ifdef ASSERT 320 #ifdef ASSERT
363 _containing_set(NULL), 321 _containing_set(NULL),
364 #endif // ASSERT 322 #endif // ASSERT
365 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), 323 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
366 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), 324 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
367 _predicted_bytes_to_copy(0) 325 _predicted_bytes_to_copy(0)
368 { 326 {
369 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); 327 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
328 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
329
330 initialize(mr);
331 }
332
333 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
334 assert(_rem_set->is_empty(), "Remembered set must be empty");
335
336 G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
337
370 _orig_end = mr.end(); 338 _orig_end = mr.end();
371 // Note that initialize() will set the start of the unmarked area of the
372 // region.
373 hr_clear(false /*par*/, false /*clear_space*/); 339 hr_clear(false /*par*/, false /*clear_space*/);
374 set_top(bottom()); 340 set_top(bottom());
375 set_saved_mark(); 341 record_top_and_timestamp();
376
377 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
378 } 342 }
379 343
380 CompactibleSpace* HeapRegion::next_compaction_space() const { 344 CompactibleSpace* HeapRegion::next_compaction_space() const {
381 // We're not using an iterator given that it will wrap around when 345 return G1CollectedHeap::heap()->next_compaction_region(this);
382 // it reaches the last region and this is not what we want here.
383 G1CollectedHeap* g1h = G1CollectedHeap::heap();
384 uint index = hrs_index() + 1;
385 while (index < g1h->n_regions()) {
386 HeapRegion* hr = g1h->region_at(index);
387 if (!hr->isHumongous()) {
388 return hr;
389 }
390 index += 1;
391 }
392 return NULL;
393 }
394
395 void HeapRegion::save_marks() {
396 set_saved_mark();
397 }
398
399 void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) {
400 HeapWord* p = mr.start();
401 HeapWord* e = mr.end();
402 oop obj;
403 while (p < e) {
404 obj = oop(p);
405 p += obj->oop_iterate(cl);
406 }
407 assert(p == e, "bad memregion: doesn't end on obj boundary");
408 }
409
410 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
411 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
412 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \
413 }
414 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
415
416
417 void HeapRegion::oop_before_save_marks_iterate(ExtendedOopClosure* cl) {
418 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
419 } 346 }
420 347
421 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, 348 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
422 bool during_conc_mark) { 349 bool during_conc_mark) {
423 // We always recreate the prev marking info and we'll explicitly 350 // We always recreate the prev marking info and we'll explicitly
424 // mark all objects we find to be self-forwarded on the prev 351 // mark all objects we find to be self-forwarded on the prev
425 // bitmap. So all objects need to be below PTAMS. 352 // bitmap. So all objects need to be below PTAMS.
426 _prev_top_at_mark_start = top();
427 _prev_marked_bytes = 0; 353 _prev_marked_bytes = 0;
428 354
429 if (during_initial_mark) { 355 if (during_initial_mark) {
430 // During initial-mark, we'll also explicitly mark all objects 356 // During initial-mark, we'll also explicitly mark all objects
431 // we find to be self-forwarded on the next bitmap. So all 357 // we find to be self-forwarded on the next bitmap. So all
445 bool during_conc_mark, 371 bool during_conc_mark,
446 size_t marked_bytes) { 372 size_t marked_bytes) {
447 assert(0 <= marked_bytes && marked_bytes <= used(), 373 assert(0 <= marked_bytes && marked_bytes <= used(),
448 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT, 374 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
449 marked_bytes, used())); 375 marked_bytes, used()));
376 _prev_top_at_mark_start = top();
450 _prev_marked_bytes = marked_bytes; 377 _prev_marked_bytes = marked_bytes;
451 } 378 }
452 379
453 HeapWord* 380 HeapWord*
454 HeapRegion::object_iterate_mem_careful(MemRegion mr, 381 HeapRegion::object_iterate_mem_careful(MemRegion mr,
475 cl->do_object(obj); 402 cl->do_object(obj);
476 } 403 }
477 if (cl->abort()) return cur; 404 if (cl->abort()) return cur;
478 // The check above must occur before the operation below, since an 405 // The check above must occur before the operation below, since an
479 // abort might invalidate the "size" operation. 406 // abort might invalidate the "size" operation.
480 cur += obj->size(); 407 cur += block_size(cur);
481 } 408 }
482 return NULL; 409 return NULL;
483 } 410 }
484 411
485 HeapWord* 412 HeapWord*
547 if (obj->klass_or_null() == NULL) { 474 if (obj->klass_or_null() == NULL) {
548 // Ran into an unparseable point. 475 // Ran into an unparseable point.
549 return cur; 476 return cur;
550 } 477 }
551 // Otherwise... 478 // Otherwise...
552 next = (cur + obj->size()); 479 next = cur + block_size(cur);
553 } 480 }
554 481
555 // If we finish the above loop...We have a parseable object that 482 // If we finish the above loop...We have a parseable object that
556 // begins on or before the start of the memory region, and ends 483 // begins on or before the start of the memory region, and ends
557 // inside or spans the entire region. 484 // inside or spans the entire region.
558 485
559 assert(obj == oop(cur), "sanity"); 486 assert(obj == oop(cur), "sanity");
560 assert(cur <= start && 487 assert(cur <= start, "Loop postcondition");
561 obj->klass_or_null() != NULL && 488 assert(obj->klass_or_null() != NULL, "Loop postcondition");
562 (cur + obj->size()) > start, 489 assert((cur + block_size(cur)) > start, "Loop postcondition");
563 "Loop postcondition");
564 490
565 if (!g1h->is_obj_dead(obj)) { 491 if (!g1h->is_obj_dead(obj)) {
566 obj->oop_iterate(cl, mr); 492 obj->oop_iterate(cl, mr);
567 } 493 }
568 494
572 // Ran into an unparseable point. 498 // Ran into an unparseable point.
573 return cur; 499 return cur;
574 }; 500 };
575 501
576 // Otherwise: 502 // Otherwise:
577 next = (cur + obj->size()); 503 next = cur + block_size(cur);
578 504
579 if (!g1h->is_obj_dead(obj)) { 505 if (!g1h->is_obj_dead(obj)) {
580 if (next < end || !obj->is_objArray()) { 506 if (next < end || !obj->is_objArray()) {
581 // This object either does not span the MemRegion 507 // This object either does not span the MemRegion
582 // boundary, or if it does it's not an array. 508 // boundary, or if it does it's not an array.
598 void HeapRegion::add_strong_code_root(nmethod* nm) { 524 void HeapRegion::add_strong_code_root(nmethod* nm) {
599 HeapRegionRemSet* hrrs = rem_set(); 525 HeapRegionRemSet* hrrs = rem_set();
600 hrrs->add_strong_code_root(nm); 526 hrrs->add_strong_code_root(nm);
601 } 527 }
602 528
529 void HeapRegion::add_strong_code_root_locked(nmethod* nm) {
530 assert_locked_or_safepoint(CodeCache_lock);
531 HeapRegionRemSet* hrrs = rem_set();
532 hrrs->add_strong_code_root_locked(nm);
533 }
534
603 void HeapRegion::remove_strong_code_root(nmethod* nm) { 535 void HeapRegion::remove_strong_code_root(nmethod* nm) {
604 HeapRegionRemSet* hrrs = rem_set(); 536 HeapRegionRemSet* hrrs = rem_set();
605 hrrs->remove_strong_code_root(nm); 537 hrrs->remove_strong_code_root(nm);
606 }
607
608 void HeapRegion::migrate_strong_code_roots() {
609 assert(in_collection_set(), "only collection set regions");
610 assert(!isHumongous(),
611 err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
612 HR_FORMAT_PARAMS(this)));
613
614 HeapRegionRemSet* hrrs = rem_set();
615 hrrs->migrate_strong_code_roots();
616 } 538 }
617 539
618 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { 540 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
619 HeapRegionRemSet* hrrs = rem_set(); 541 HeapRegionRemSet* hrrs = rem_set();
620 hrrs->strong_code_roots_do(blk); 542 hrrs->strong_code_roots_do(blk);
748 } 670 }
749 } 671 }
750 672
751 void HeapRegion::print() const { print_on(gclog_or_tty); } 673 void HeapRegion::print() const { print_on(gclog_or_tty); }
752 void HeapRegion::print_on(outputStream* st) const { 674 void HeapRegion::print_on(outputStream* st) const {
753 if (isHumongous()) { 675 st->print("AC%4u", allocation_context());
754 if (startsHumongous()) 676 st->print(" %2s", get_short_type_str());
755 st->print(" HS");
756 else
757 st->print(" HC");
758 } else {
759 st->print(" ");
760 }
761 if (in_collection_set()) 677 if (in_collection_set())
762 st->print(" CS"); 678 st->print(" CS");
763 else 679 else
764 st->print(" "); 680 st->print(" ");
765 if (is_young())
766 st->print(is_survivor() ? " SU" : " Y ");
767 else
768 st->print(" ");
769 if (is_empty())
770 st->print(" F");
771 else
772 st->print(" ");
773 st->print(" TS %5d", _gc_time_stamp); 681 st->print(" TS %5d", _gc_time_stamp);
774 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, 682 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
775 prev_top_at_mark_start(), next_top_at_mark_start()); 683 prev_top_at_mark_start(), next_top_at_mark_start());
776 G1OffsetTableContigSpace::print_on(st); 684 G1OffsetTableContigSpace::print_on(st);
777 } 685 }
927 bool is_humongous = isHumongous(); 835 bool is_humongous = isHumongous();
928 bool do_bot_verify = !is_young(); 836 bool do_bot_verify = !is_young();
929 size_t object_num = 0; 837 size_t object_num = 0;
930 while (p < top()) { 838 while (p < top()) {
931 oop obj = oop(p); 839 oop obj = oop(p);
932 size_t obj_size = obj->size(); 840 size_t obj_size = block_size(p);
933 object_num += 1; 841 object_num += 1;
934 842
935 if (is_humongous != g1->isHumongous(obj_size)) { 843 if (is_humongous != g1->isHumongous(obj_size) &&
844 !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
936 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size (" 845 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
937 SIZE_FORMAT" words) in a %shumongous region", 846 SIZE_FORMAT" words) in a %shumongous region",
938 p, g1->isHumongous(obj_size) ? "" : "non-", 847 p, g1->isHumongous(obj_size) ? "" : "non-",
939 obj_size, is_humongous ? "" : "non-"); 848 obj_size, is_humongous ? "" : "non-");
940 *failures = true; 849 *failures = true;
941 return; 850 return;
942 } 851 }
943 852
944 // If it returns false, verify_for_object() will output the 853 // If it returns false, verify_for_object() will output the
945 // appropriate messasge. 854 // appropriate message.
946 if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) { 855 if (do_bot_verify &&
856 !g1->is_obj_dead(obj, this) &&
857 !_offsets.verify_for_object(p, obj_size)) {
947 *failures = true; 858 *failures = true;
948 return; 859 return;
949 } 860 }
950 861
951 if (!g1->is_obj_dead_cond(obj, this, vo)) { 862 if (!g1->is_obj_dead_cond(obj, this, vo)) {
952 if (obj->is_oop()) { 863 if (obj->is_oop()) {
953 Klass* klass = obj->klass(); 864 Klass* klass = obj->klass();
954 if (!klass->is_metaspace_object()) { 865 bool is_metaspace_object = Metaspace::contains(klass) ||
866 (vo == VerifyOption_G1UsePrevMarking &&
867 ClassLoaderDataGraph::unload_list_contains(klass));
868 if (!is_metaspace_object) {
955 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " 869 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
956 "not metadata", klass, (void *)obj); 870 "not metadata", klass, (void *)obj);
957 *failures = true; 871 *failures = true;
958 return; 872 return;
959 } else if (!klass->is_klass()) { 873 } else if (!klass->is_klass()) {
1063 977
1064 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 978 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
1065 // away eventually. 979 // away eventually.
1066 980
1067 void G1OffsetTableContigSpace::clear(bool mangle_space) { 981 void G1OffsetTableContigSpace::clear(bool mangle_space) {
1068 ContiguousSpace::clear(mangle_space); 982 set_top(bottom());
1069 _offsets.zero_bottom_entry(); 983 set_saved_mark_word(bottom());
1070 _offsets.initialize_threshold(); 984 CompactibleSpace::clear(mangle_space);
985 reset_bot();
1071 } 986 }
1072 987
1073 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 988 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
1074 Space::set_bottom(new_bottom); 989 Space::set_bottom(new_bottom);
1075 _offsets.set_bottom(new_bottom); 990 _offsets.set_bottom(new_bottom);
1098 } 1013 }
1099 1014
1100 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const { 1015 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
1101 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1016 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1102 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); 1017 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
1103 if (_gc_time_stamp < g1h->get_gc_time_stamp()) 1018 HeapWord* local_top = top();
1104 return top(); 1019 OrderAccess::loadload();
1105 else 1020 if (_gc_time_stamp < g1h->get_gc_time_stamp()) {
1106 return ContiguousSpace::saved_mark_word(); 1021 return local_top;
1107 } 1022 } else {
1108 1023 return Space::saved_mark_word();
1109 void G1OffsetTableContigSpace::set_saved_mark() { 1024 }
1025 }
1026
1027 void G1OffsetTableContigSpace::record_top_and_timestamp() {
1110 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1028 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1111 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); 1029 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
1112 1030
1113 if (_gc_time_stamp < curr_gc_time_stamp) { 1031 if (_gc_time_stamp < curr_gc_time_stamp) {
1114 // The order of these is important, as another thread might be 1032 // The order of these is important, as another thread might be
1116 // set_saved_mark and before _gc_time_stamp = ..., then the latter 1034 // set_saved_mark and before _gc_time_stamp = ..., then the latter
1117 // will be false, and it will pick up top() as the high water mark 1035 // will be false, and it will pick up top() as the high water mark
1118 // of region. If it does so after _gc_time_stamp = ..., then it 1036 // of region. If it does so after _gc_time_stamp = ..., then it
1119 // will pick up the right saved_mark_word() as the high water mark 1037 // will pick up the right saved_mark_word() as the high water mark
1120 // of the region. Either way, the behaviour will be correct. 1038 // of the region. Either way, the behaviour will be correct.
1121 ContiguousSpace::set_saved_mark(); 1039 Space::set_saved_mark_word(top());
1122 OrderAccess::storestore(); 1040 OrderAccess::storestore();
1123 _gc_time_stamp = curr_gc_time_stamp; 1041 _gc_time_stamp = curr_gc_time_stamp;
1124 // No need to do another barrier to flush the writes above. If 1042 // No need to do another barrier to flush the writes above. If
1125 // this is called in parallel with other threads trying to 1043 // this is called in parallel with other threads trying to
1126 // allocate into the region, the caller should call this while 1044 // allocate into the region, the caller should call this while
1127 // holding a lock and when the lock is released the writes will be 1045 // holding a lock and when the lock is released the writes will be
1128 // flushed. 1046 // flushed.
1129 } 1047 }
1130 } 1048 }
1049
1050 void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
1051 object_iterate(blk);
1052 }
1053
1054 void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
1055 HeapWord* p = bottom();
1056 while (p < top()) {
1057 if (block_is_obj(p)) {
1058 blk->do_object(oop(p));
1059 }
1060 p += block_size(p);
1061 }
1062 }
1063
1064 #define block_is_always_obj(q) true
1065 void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
1066 SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
1067 }
1068 #undef block_is_always_obj
1131 1069
1132 G1OffsetTableContigSpace:: 1070 G1OffsetTableContigSpace::
1133 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, 1071 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
1134 MemRegion mr) : 1072 MemRegion mr) :
1135 _offsets(sharedOffsetArray, mr), 1073 _offsets(sharedOffsetArray, mr),
1136 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), 1074 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
1137 _gc_time_stamp(0) 1075 _gc_time_stamp(0)
1138 { 1076 {
1139 _offsets.set_space(this); 1077 _offsets.set_space(this);
1140 // false ==> we'll do the clearing if there's clearing to be done. 1078 }
1141 ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle); 1079
1142 _offsets.zero_bottom_entry(); 1080 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
1143 _offsets.initialize_threshold(); 1081 CompactibleSpace::initialize(mr, clear_space, mangle_space);
1144 } 1082 _top = bottom();
1083 reset_bot();
1084 }
1085