Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/heapRegion.cpp @ 4787:2ace1c4ee8da
6888336: G1: avoid explicitly marking and pushing objects in survivor spaces
Summary: This change simplifies the interaction between GC and concurrent marking. By disabling survivor spaces during the initial-mark pause we don't need to propagate marks of objects we copy during each GC (since we never need to copy an explicitly marked object).
Reviewed-by: johnc, brutisso
author | tonyp |
---|---|
date | Tue, 10 Jan 2012 18:58:13 -0500 |
parents | 3b2b58fb1425 |
children | b4ebad3520bb |
comparison
equal
deleted
inserted
replaced
4786:1d6185f732aa | 4787:2ace1c4ee8da |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
573 | 573 |
574 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) { | 574 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) { |
575 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); | 575 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); |
576 } | 576 } |
577 | 577 |
578 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, | |
579 bool during_conc_mark) { | |
580 // We always recreate the prev marking info and we'll explicitly | |
581 // mark all objects we find to be self-forwarded on the prev | |
582 // bitmap. So all objects need to be below PTAMS. | |
583 _prev_top_at_mark_start = top(); | |
584 _prev_marked_bytes = 0; | |
585 | |
586 if (during_initial_mark) { | |
587 // During initial-mark, we'll also explicitly mark all objects | |
588 // we find to be self-forwarded on the next bitmap. So all | |
589 // objects need to be below NTAMS. | |
590 _next_top_at_mark_start = top(); | |
591 set_top_at_conc_mark_count(bottom()); | |
592 _next_marked_bytes = 0; | |
593 } else if (during_conc_mark) { | |
594 // During concurrent mark, all objects in the CSet (including | |
595 // the ones we find to be self-forwarded) are implicitly live. | |
596 // So all objects need to be above NTAMS. | |
597 _next_top_at_mark_start = bottom(); | |
598 set_top_at_conc_mark_count(bottom()); | |
599 _next_marked_bytes = 0; | |
600 } | |
601 } | |
602 | |
603 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark, | |
604 bool during_conc_mark, | |
605 size_t marked_bytes) { | |
606 assert(0 <= marked_bytes && marked_bytes <= used(), | |
607 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT, | |
608 marked_bytes, used())); | |
609 _prev_marked_bytes = marked_bytes; | |
610 } | |
611 | |
578 HeapWord* | 612 HeapWord* |
579 HeapRegion::object_iterate_mem_careful(MemRegion mr, | 613 HeapRegion::object_iterate_mem_careful(MemRegion mr, |
580 ObjectClosure* cl) { | 614 ObjectClosure* cl) { |
581 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | 615 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
582 // We used to use "block_start_careful" here. But we're actually happy | 616 // We used to use "block_start_careful" here. But we're actually happy |