comparison src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp @ 4787:2ace1c4ee8da

6888336: G1: avoid explicitly marking and pushing objects in survivor spaces Summary: This change simplifies the interaction between GC and concurrent marking. By disabling survivor spaces during the initial-mark pause we don't need to propagate marks of objects we copy during each GC (since we never need to copy an explicitly marked object). Reviewed-by: johnc, brutisso
author tonyp
date Tue, 10 Jan 2012 18:58:13 -0500
parents 23d434c6290d
children 2e966d967c5c
comparison
equal deleted inserted replaced
4786:1d6185f732aa 4787:2ace1c4ee8da
1 /* 1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
151 } 151 }
152 } 152 }
153 } 153 }
154 } 154 }
155 155
156 inline void ConcurrentMark::markPrev(oop p) {
157 assert(!_prevMarkBitMap->isMarked((HeapWord*) p), "sanity");
158 // Note we are overriding the read-only view of the prev map here, via
159 // the cast.
160 ((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p);
161 }
162
163 inline void ConcurrentMark::markNext(oop p) {
164 assert(!_nextMarkBitMap->isMarked((HeapWord*) p), "sanity");
165 _nextMarkBitMap->mark((HeapWord*) p);
166 }
167
168 inline void ConcurrentMark::grayRoot(oop obj, size_t word_size) {
169 HeapWord* addr = (HeapWord*) obj;
170
171 // Currently we don't do anything with word_size but we will use it
172 // in the very near future in the liveness calculation piggy-backing
173 // changes.
174
175 #ifdef ASSERT
176 HeapRegion* hr = _g1h->heap_region_containing(addr);
177 assert(hr != NULL, "sanity");
178 assert(!hr->is_survivor(), "should not allocate survivors during IM");
179 assert(addr < hr->next_top_at_mark_start(),
180 err_msg("addr: "PTR_FORMAT" hr: "HR_FORMAT" NTAMS: "PTR_FORMAT,
181 addr, HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start()));
182 // We cannot assert that word_size == obj->size() given that obj
183 // might not be in a consistent state (another thread might be in
184 // the process of copying it). So the best thing we can do is to
185 // assert that word_size is under an upper bound which is its
186 // containing region's capacity.
187 assert(word_size * HeapWordSize <= hr->capacity(),
188 err_msg("size: "SIZE_FORMAT" capacity: "SIZE_FORMAT" "HR_FORMAT,
189 word_size * HeapWordSize, hr->capacity(),
190 HR_FORMAT_PARAMS(hr)));
191 #endif // ASSERT
192
193 if (!_nextMarkBitMap->isMarked(addr)) {
194 _nextMarkBitMap->parMark(addr);
195 }
196 }
197
156 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP 198 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP