comparison src/share/vm/gc_implementation/g1/concurrentMark.cpp @ 2181:d25d4ca69222

Merge.
author Thomas Wuerthinger <wuerthinger@ssw.jku.at>
date Wed, 16 Feb 2011 13:47:20 +0100
parents 3582bf76420e
children 1216415d8e35
comparison
equal deleted inserted replaced
2108:50b45e2d9725 2181:d25d4ca69222
1 /* 1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
31 #include "gc_implementation/g1/g1RemSet.hpp" 31 #include "gc_implementation/g1/g1RemSet.hpp"
32 #include "gc_implementation/g1/heapRegionRemSet.hpp" 32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
33 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 33 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
34 #include "gc_implementation/shared/vmGCOperations.hpp"
34 #include "memory/genOopClosures.inline.hpp" 35 #include "memory/genOopClosures.inline.hpp"
35 #include "memory/referencePolicy.hpp" 36 #include "memory/referencePolicy.hpp"
36 #include "memory/resourceArea.hpp" 37 #include "memory/resourceArea.hpp"
37 #include "oops/oop.inline.hpp" 38 #include "oops/oop.inline.hpp"
38 #include "runtime/handles.inline.hpp" 39 #include "runtime/handles.inline.hpp"
455 _parallel_marking_threads(0), 456 _parallel_marking_threads(0),
456 _sleep_factor(0.0), 457 _sleep_factor(0.0),
457 _marking_task_overhead(1.0), 458 _marking_task_overhead(1.0),
458 _cleanup_sleep_factor(0.0), 459 _cleanup_sleep_factor(0.0),
459 _cleanup_task_overhead(1.0), 460 _cleanup_task_overhead(1.0),
461 _cleanup_list("Cleanup List"),
460 _region_bm(max_regions, false /* in_resource_area*/), 462 _region_bm(max_regions, false /* in_resource_area*/),
461 _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >> 463 _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
462 CardTableModRefBS::card_shift, 464 CardTableModRefBS::card_shift,
463 false /* in_resource_area*/), 465 false /* in_resource_area*/),
464 _prevMarkBitMap(&_markBitMap1), 466 _prevMarkBitMap(&_markBitMap1),
517 assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency"); 519 assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency");
518 assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency"); 520 assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency");
519 521
520 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 522 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
521 satb_qs.set_buffer_size(G1SATBBufferSize); 523 satb_qs.set_buffer_size(G1SATBBufferSize);
522
523 int size = (int) MAX2(ParallelGCThreads, (size_t)1);
524 _par_cleanup_thread_state = NEW_C_HEAP_ARRAY(ParCleanupThreadState*, size);
525 for (int i = 0 ; i < size; i++) {
526 _par_cleanup_thread_state[i] = new ParCleanupThreadState;
527 }
528 524
529 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num); 525 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num);
530 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num); 526 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num);
531 527
532 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 528 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
708 _active_tasks = 0; 704 _active_tasks = 0;
709 clear_concurrent_marking_in_progress(); 705 clear_concurrent_marking_in_progress();
710 } 706 }
711 707
712 ConcurrentMark::~ConcurrentMark() { 708 ConcurrentMark::~ConcurrentMark() {
713 int size = (int) MAX2(ParallelGCThreads, (size_t)1);
714 for (int i = 0; i < size; i++) delete _par_cleanup_thread_state[i];
715 FREE_C_HEAP_ARRAY(ParCleanupThreadState*,
716 _par_cleanup_thread_state);
717
718 for (int i = 0; i < (int) _max_task_num; ++i) { 709 for (int i = 0; i < (int) _max_task_num; ++i) {
719 delete _task_queues->queue(i); 710 delete _task_queues->queue(i);
720 delete _tasks[i]; 711 delete _tasks[i];
721 } 712 }
722 delete _task_queues; 713 delete _task_queues;
1062 the_task->record_start_time(); 1053 the_task->record_start_time();
1063 if (!_cm->has_aborted()) { 1054 if (!_cm->has_aborted()) {
1064 do { 1055 do {
1065 double start_vtime_sec = os::elapsedVTime(); 1056 double start_vtime_sec = os::elapsedVTime();
1066 double start_time_sec = os::elapsedTime(); 1057 double start_time_sec = os::elapsedTime();
1067 the_task->do_marking_step(10.0); 1058 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1059
1060 the_task->do_marking_step(mark_step_duration_ms,
1061 true /* do_stealing */,
1062 true /* do_termination */);
1063
1068 double end_time_sec = os::elapsedTime(); 1064 double end_time_sec = os::elapsedTime();
1069 double end_vtime_sec = os::elapsedVTime(); 1065 double end_vtime_sec = os::elapsedVTime();
1070 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1066 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1071 double elapsed_time_sec = end_time_sec - start_time_sec; 1067 double elapsed_time_sec = end_time_sec - start_time_sec;
1072 _cm->clear_has_overflown(); 1068 _cm->clear_has_overflown();
1118 // a safepoint is indeed in progress as a younger generation 1114 // a safepoint is indeed in progress as a younger generation
1119 // stop-the-world GC happens even as we mark in this generation. 1115 // stop-the-world GC happens even as we mark in this generation.
1120 1116
1121 _restart_for_overflow = false; 1117 _restart_for_overflow = false;
1122 1118
1123 set_phase(MAX2((size_t) 1, parallel_marking_threads()), true); 1119 size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
1120 set_phase(active_workers, true /* concurrent */);
1124 1121
1125 CMConcurrentMarkingTask markingTask(this, cmThread()); 1122 CMConcurrentMarkingTask markingTask(this, cmThread());
1126 if (parallel_marking_threads() > 0) 1123 if (parallel_marking_threads() > 0)
1127 _parallel_workers->run_task(&markingTask); 1124 _parallel_workers->run_task(&markingTask);
1128 else 1125 else
1139 // If a full collection has happened, we shouldn't do this. 1136 // If a full collection has happened, we shouldn't do this.
1140 if (has_aborted()) { 1137 if (has_aborted()) {
1141 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1138 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1142 return; 1139 return;
1143 } 1140 }
1141
1142 SvcGCMarker sgcm(SvcGCMarker::OTHER);
1144 1143
1145 if (VerifyDuringGC) { 1144 if (VerifyDuringGC) {
1146 HandleMark hm; // handle scope 1145 HandleMark hm; // handle scope
1147 gclog_or_tty->print(" VerifyDuringGC:(before)"); 1146 gclog_or_tty->print(" VerifyDuringGC:(before)");
1148 Universe::heap()->prepare_for_verify(); 1147 Universe::heap()->prepare_for_verify();
1166 // Clear the flag. We do not need it any more. 1165 // Clear the flag. We do not need it any more.
1167 clear_has_overflown(); 1166 clear_has_overflown();
1168 if (G1TraceMarkStackOverflow) 1167 if (G1TraceMarkStackOverflow)
1169 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1168 gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1170 } else { 1169 } else {
1170 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1171 // We're done with marking. 1171 // We're done with marking.
1172 // This is the end of the marking cycle, we're expected all 1172 // This is the end of the marking cycle, we're expected all
1173 // threads to have SATB queues with active set to true. 1173 // threads to have SATB queues with active set to true.
1174 JavaThread::satb_mark_queue_set().set_active_all_threads( 1174 satb_mq_set.set_active_all_threads(false, /* new active value */
1175 false, /* new active value */ 1175 true /* expected_active */);
1176 true /* expected_active */);
1177 1176
1178 if (VerifyDuringGC) { 1177 if (VerifyDuringGC) {
1179 HandleMark hm; // handle scope 1178 HandleMark hm; // handle scope
1180 gclog_or_tty->print(" VerifyDuringGC:(after)"); 1179 gclog_or_tty->print(" VerifyDuringGC:(after)");
1181 Universe::heap()->prepare_for_verify(); 1180 Universe::heap()->prepare_for_verify();
1182 Universe::heap()->verify(/* allow_dirty */ true, 1181 Universe::heap()->verify(/* allow_dirty */ true,
1183 /* silent */ false, 1182 /* silent */ false,
1184 /* use_prev_marking */ false); 1183 /* use_prev_marking */ false);
1185 } 1184 }
1185 assert(!restart_for_overflow(), "sanity");
1186 }
1187
1188 // Reset the marking state if marking completed
1189 if (!restart_for_overflow()) {
1190 set_non_marking_state();
1186 } 1191 }
1187 1192
1188 #if VERIFY_OBJS_PROCESSED 1193 #if VERIFY_OBJS_PROCESSED
1189 _scan_obj_cl.objs_processed = 0; 1194 _scan_obj_cl.objs_processed = 0;
1190 ThreadLocalObjQueue::objs_enqueued = 0; 1195 ThreadLocalObjQueue::objs_enqueued = 0;
1505 G1CollectedHeap* _g1; 1510 G1CollectedHeap* _g1;
1506 int _worker_num; 1511 int _worker_num;
1507 size_t _max_live_bytes; 1512 size_t _max_live_bytes;
1508 size_t _regions_claimed; 1513 size_t _regions_claimed;
1509 size_t _freed_bytes; 1514 size_t _freed_bytes;
1510 size_t _cleared_h_regions; 1515 FreeRegionList* _local_cleanup_list;
1511 size_t _freed_regions; 1516 HumongousRegionSet* _humongous_proxy_set;
1512 UncleanRegionList* _unclean_region_list; 1517 HRRSCleanupTask* _hrrs_cleanup_task;
1513 double _claimed_region_time; 1518 double _claimed_region_time;
1514 double _max_region_time; 1519 double _max_region_time;
1515 1520
1516 public: 1521 public:
1517 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1522 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1518 UncleanRegionList* list, 1523 int worker_num,
1519 int worker_num); 1524 FreeRegionList* local_cleanup_list,
1525 HumongousRegionSet* humongous_proxy_set,
1526 HRRSCleanupTask* hrrs_cleanup_task);
1520 size_t freed_bytes() { return _freed_bytes; } 1527 size_t freed_bytes() { return _freed_bytes; }
1521 size_t cleared_h_regions() { return _cleared_h_regions; }
1522 size_t freed_regions() { return _freed_regions; }
1523 UncleanRegionList* unclean_region_list() {
1524 return _unclean_region_list;
1525 }
1526 1528
1527 bool doHeapRegion(HeapRegion *r); 1529 bool doHeapRegion(HeapRegion *r);
1528 1530
1529 size_t max_live_bytes() { return _max_live_bytes; } 1531 size_t max_live_bytes() { return _max_live_bytes; }
1530 size_t regions_claimed() { return _regions_claimed; } 1532 size_t regions_claimed() { return _regions_claimed; }
1532 double max_region_time_sec() { return _max_region_time; } 1534 double max_region_time_sec() { return _max_region_time; }
1533 }; 1535 };
1534 1536
1535 class G1ParNoteEndTask: public AbstractGangTask { 1537 class G1ParNoteEndTask: public AbstractGangTask {
1536 friend class G1NoteEndOfConcMarkClosure; 1538 friend class G1NoteEndOfConcMarkClosure;
1539
1537 protected: 1540 protected:
1538 G1CollectedHeap* _g1h; 1541 G1CollectedHeap* _g1h;
1539 size_t _max_live_bytes; 1542 size_t _max_live_bytes;
1540 size_t _freed_bytes; 1543 size_t _freed_bytes;
1541 ConcurrentMark::ParCleanupThreadState** _par_cleanup_thread_state; 1544 FreeRegionList* _cleanup_list;
1545
1542 public: 1546 public:
1543 G1ParNoteEndTask(G1CollectedHeap* g1h, 1547 G1ParNoteEndTask(G1CollectedHeap* g1h,
1544 ConcurrentMark::ParCleanupThreadState** 1548 FreeRegionList* cleanup_list) :
1545 par_cleanup_thread_state) :
1546 AbstractGangTask("G1 note end"), _g1h(g1h), 1549 AbstractGangTask("G1 note end"), _g1h(g1h),
1547 _max_live_bytes(0), _freed_bytes(0), 1550 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
1548 _par_cleanup_thread_state(par_cleanup_thread_state)
1549 {}
1550 1551
1551 void work(int i) { 1552 void work(int i) {
1552 double start = os::elapsedTime(); 1553 double start = os::elapsedTime();
1553 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, 1554 FreeRegionList local_cleanup_list("Local Cleanup List");
1554 &_par_cleanup_thread_state[i]->list, 1555 HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set");
1555 i); 1556 HRRSCleanupTask hrrs_cleanup_task;
1557 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i, &local_cleanup_list,
1558 &humongous_proxy_set,
1559 &hrrs_cleanup_task);
1556 if (G1CollectedHeap::use_parallel_gc_threads()) { 1560 if (G1CollectedHeap::use_parallel_gc_threads()) {
1557 _g1h->heap_region_par_iterate_chunked(&g1_note_end, i, 1561 _g1h->heap_region_par_iterate_chunked(&g1_note_end, i,
1558 HeapRegion::NoteEndClaimValue); 1562 HeapRegion::NoteEndClaimValue);
1559 } else { 1563 } else {
1560 _g1h->heap_region_iterate(&g1_note_end); 1564 _g1h->heap_region_iterate(&g1_note_end);
1561 } 1565 }
1562 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1566 assert(g1_note_end.complete(), "Shouldn't have yielded!");
1563 1567
1564 // Now finish up freeing the current thread's regions. 1568 // Now update the lists
1565 _g1h->finish_free_region_work(g1_note_end.freed_bytes(), 1569 _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(),
1566 g1_note_end.cleared_h_regions(), 1570 NULL /* free_list */,
1567 0, NULL); 1571 &humongous_proxy_set,
1572 true /* par */);
1568 { 1573 {
1569 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1574 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1570 _max_live_bytes += g1_note_end.max_live_bytes(); 1575 _max_live_bytes += g1_note_end.max_live_bytes();
1571 _freed_bytes += g1_note_end.freed_bytes(); 1576 _freed_bytes += g1_note_end.freed_bytes();
1577
1578 _cleanup_list->add_as_tail(&local_cleanup_list);
1579 assert(local_cleanup_list.is_empty(), "post-condition");
1580
1581 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1572 } 1582 }
1573 double end = os::elapsedTime(); 1583 double end = os::elapsedTime();
1574 if (G1PrintParCleanupStats) { 1584 if (G1PrintParCleanupStats) {
1575 gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] " 1585 gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] "
1576 "claimed %d regions (tot = %8.3f ms, max = %8.3f ms).\n", 1586 "claimed %d regions (tot = %8.3f ms, max = %8.3f ms).\n",
1607 1617
1608 }; 1618 };
1609 1619
1610 G1NoteEndOfConcMarkClosure:: 1620 G1NoteEndOfConcMarkClosure::
1611 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1621 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1612 UncleanRegionList* list, 1622 int worker_num,
1613 int worker_num) 1623 FreeRegionList* local_cleanup_list,
1624 HumongousRegionSet* humongous_proxy_set,
1625 HRRSCleanupTask* hrrs_cleanup_task)
1614 : _g1(g1), _worker_num(worker_num), 1626 : _g1(g1), _worker_num(worker_num),
1615 _max_live_bytes(0), _regions_claimed(0), 1627 _max_live_bytes(0), _regions_claimed(0),
1616 _freed_bytes(0), _cleared_h_regions(0), _freed_regions(0), 1628 _freed_bytes(0),
1617 _claimed_region_time(0.0), _max_region_time(0.0), 1629 _claimed_region_time(0.0), _max_region_time(0.0),
1618 _unclean_region_list(list) 1630 _local_cleanup_list(local_cleanup_list),
1619 {} 1631 _humongous_proxy_set(humongous_proxy_set),
1620 1632 _hrrs_cleanup_task(hrrs_cleanup_task) { }
1621 bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *r) { 1633
1634 bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *hr) {
1622 // We use a claim value of zero here because all regions 1635 // We use a claim value of zero here because all regions
1623 // were claimed with value 1 in the FinalCount task. 1636 // were claimed with value 1 in the FinalCount task.
1624 r->reset_gc_time_stamp(); 1637 hr->reset_gc_time_stamp();
1625 if (!r->continuesHumongous()) { 1638 if (!hr->continuesHumongous()) {
1626 double start = os::elapsedTime(); 1639 double start = os::elapsedTime();
1627 _regions_claimed++; 1640 _regions_claimed++;
1628 r->note_end_of_marking(); 1641 hr->note_end_of_marking();
1629 _max_live_bytes += r->max_live_bytes(); 1642 _max_live_bytes += hr->max_live_bytes();
1630 _g1->free_region_if_totally_empty_work(r, 1643 _g1->free_region_if_empty(hr,
1631 _freed_bytes, 1644 &_freed_bytes,
1632 _cleared_h_regions, 1645 _local_cleanup_list,
1633 _freed_regions, 1646 _humongous_proxy_set,
1634 _unclean_region_list, 1647 _hrrs_cleanup_task,
1635 true /*par*/); 1648 true /* par */);
1636 double region_time = (os::elapsedTime() - start); 1649 double region_time = (os::elapsedTime() - start);
1637 _claimed_region_time += region_time; 1650 _claimed_region_time += region_time;
1638 if (region_time > _max_region_time) _max_region_time = region_time; 1651 if (region_time > _max_region_time) _max_region_time = region_time;
1639 } 1652 }
1640 return false; 1653 return false;
1649 // If a full collection has happened, we shouldn't do this. 1662 // If a full collection has happened, we shouldn't do this.
1650 if (has_aborted()) { 1663 if (has_aborted()) {
1651 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1664 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1652 return; 1665 return;
1653 } 1666 }
1667
1668 g1h->verify_region_sets_optional();
1654 1669
1655 if (VerifyDuringGC) { 1670 if (VerifyDuringGC) {
1656 HandleMark hm; // handle scope 1671 HandleMark hm; // handle scope
1657 gclog_or_tty->print(" VerifyDuringGC:(before)"); 1672 gclog_or_tty->print(" VerifyDuringGC:(before)");
1658 Universe::heap()->prepare_for_verify(); 1673 Universe::heap()->prepare_for_verify();
1663 1678
1664 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); 1679 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1665 g1p->record_concurrent_mark_cleanup_start(); 1680 g1p->record_concurrent_mark_cleanup_start();
1666 1681
1667 double start = os::elapsedTime(); 1682 double start = os::elapsedTime();
1683
1684 HeapRegionRemSet::reset_for_cleanup_tasks();
1668 1685
1669 // Do counting once more with the world stopped for good measure. 1686 // Do counting once more with the world stopped for good measure.
1670 G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(), 1687 G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
1671 &_region_bm, &_card_bm); 1688 &_region_bm, &_card_bm);
1672 if (G1CollectedHeap::use_parallel_gc_threads()) { 1689 if (G1CollectedHeap::use_parallel_gc_threads()) {
1714 1731
1715 g1h->reset_gc_time_stamp(); 1732 g1h->reset_gc_time_stamp();
1716 1733
1717 // Note end of marking in all heap regions. 1734 // Note end of marking in all heap regions.
1718 double note_end_start = os::elapsedTime(); 1735 double note_end_start = os::elapsedTime();
1719 G1ParNoteEndTask g1_par_note_end_task(g1h, _par_cleanup_thread_state); 1736 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
1720 if (G1CollectedHeap::use_parallel_gc_threads()) { 1737 if (G1CollectedHeap::use_parallel_gc_threads()) {
1721 int n_workers = g1h->workers()->total_workers(); 1738 int n_workers = g1h->workers()->total_workers();
1722 g1h->set_par_threads(n_workers); 1739 g1h->set_par_threads(n_workers);
1723 g1h->workers()->run_task(&g1_par_note_end_task); 1740 g1h->workers()->run_task(&g1_par_note_end_task);
1724 g1h->set_par_threads(0); 1741 g1h->set_par_threads(0);
1726 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue), 1743 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
1727 "sanity check"); 1744 "sanity check");
1728 } else { 1745 } else {
1729 g1_par_note_end_task.work(0); 1746 g1_par_note_end_task.work(0);
1730 } 1747 }
1731 g1h->set_unclean_regions_coming(true); 1748
1749 if (!cleanup_list_is_empty()) {
1750 // The cleanup list is not empty, so we'll have to process it
1751 // concurrently. Notify anyone else that might be wanting free
1752 // regions that there will be more free regions coming soon.
1753 g1h->set_free_regions_coming();
1754 }
1732 double note_end_end = os::elapsedTime(); 1755 double note_end_end = os::elapsedTime();
1733 // Tell the mutators that there might be unclean regions coming...
1734 if (G1PrintParCleanupStats) { 1756 if (G1PrintParCleanupStats) {
1735 gclog_or_tty->print_cr(" note end of marking: %8.3f ms.", 1757 gclog_or_tty->print_cr(" note end of marking: %8.3f ms.",
1736 (note_end_end - note_end_start)*1000.0); 1758 (note_end_end - note_end_start)*1000.0);
1737 } 1759 }
1738 1760
1794 Universe::heap()->prepare_for_verify(); 1816 Universe::heap()->prepare_for_verify();
1795 Universe::verify(/* allow dirty */ true, 1817 Universe::verify(/* allow dirty */ true,
1796 /* silent */ false, 1818 /* silent */ false,
1797 /* prev marking */ true); 1819 /* prev marking */ true);
1798 } 1820 }
1821
1822 g1h->verify_region_sets_optional();
1799 } 1823 }
1800 1824
1801 void ConcurrentMark::completeCleanup() { 1825 void ConcurrentMark::completeCleanup() {
1802 // A full collection intervened.
1803 if (has_aborted()) return; 1826 if (has_aborted()) return;
1804 1827
1805 int first = 0; 1828 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1806 int last = (int)MAX2(ParallelGCThreads, (size_t)1); 1829
1807 for (int t = 0; t < last; t++) { 1830 _cleanup_list.verify_optional();
1808 UncleanRegionList* list = &_par_cleanup_thread_state[t]->list; 1831 FreeRegionList local_free_list("Local Cleanup List");
1809 assert(list->well_formed(), "Inv"); 1832
1810 HeapRegion* hd = list->hd(); 1833 if (G1ConcRegionFreeingVerbose) {
1811 while (hd != NULL) { 1834 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
1812 // Now finish up the other stuff. 1835 "cleanup list has "SIZE_FORMAT" entries",
1813 hd->rem_set()->clear(); 1836 _cleanup_list.length());
1814 HeapRegion* next_hd = hd->next_from_unclean_list(); 1837 }
1815 (void)list->pop(); 1838
1816 assert(list->hd() == next_hd, "how not?"); 1839 // Noone else should be accessing the _cleanup_list at this point,
1817 _g1h->put_region_on_unclean_list(hd); 1840 // so it's not necessary to take any locks
1818 if (!hd->isHumongous()) { 1841 while (!_cleanup_list.is_empty()) {
1819 // Add this to the _free_regions count by 1. 1842 HeapRegion* hr = _cleanup_list.remove_head();
1820 _g1h->finish_free_region_work(0, 0, 1, NULL); 1843 assert(hr != NULL, "the list was not empty");
1844 hr->rem_set()->clear();
1845 local_free_list.add_as_tail(hr);
1846
1847 // Instead of adding one region at a time to the secondary_free_list,
1848 // we accumulate them in the local list and move them a few at a
1849 // time. This also cuts down on the number of notify_all() calls
1850 // we do during this process. We'll also append the local list when
1851 // _cleanup_list is empty (which means we just removed the last
1852 // region from the _cleanup_list).
1853 if ((local_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
1854 _cleanup_list.is_empty()) {
1855 if (G1ConcRegionFreeingVerbose) {
1856 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
1857 "appending "SIZE_FORMAT" entries to the "
1858 "secondary_free_list, clean list still has "
1859 SIZE_FORMAT" entries",
1860 local_free_list.length(),
1861 _cleanup_list.length());
1821 } 1862 }
1822 hd = list->hd(); 1863
1823 assert(hd == next_hd, "how not?"); 1864 {
1824 } 1865 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1825 } 1866 g1h->secondary_free_list_add_as_tail(&local_free_list);
1826 } 1867 SecondaryFreeList_lock->notify_all();
1868 }
1869
1870 if (G1StressConcRegionFreeing) {
1871 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
1872 os::sleep(Thread::current(), (jlong) 1, false);
1873 }
1874 }
1875 }
1876 }
1877 assert(local_free_list.is_empty(), "post-condition");
1878 }
1879
1880 // Support closures for reference procssing in G1
1827 1881
1828 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1882 bool G1CMIsAliveClosure::do_object_b(oop obj) {
1829 HeapWord* addr = (HeapWord*)obj; 1883 HeapWord* addr = (HeapWord*)obj;
1830 return addr != NULL && 1884 return addr != NULL &&
1831 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1885 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
1843 1897
1844 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1898 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1845 virtual void do_oop( oop* p) { do_oop_work(p); } 1899 virtual void do_oop( oop* p) { do_oop_work(p); }
1846 1900
1847 template <class T> void do_oop_work(T* p) { 1901 template <class T> void do_oop_work(T* p) {
1848 oop thisOop = oopDesc::load_decode_heap_oop(p); 1902 oop obj = oopDesc::load_decode_heap_oop(p);
1849 HeapWord* addr = (HeapWord*)thisOop; 1903 HeapWord* addr = (HeapWord*)obj;
1850 if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(thisOop)) { 1904
1905 if (_cm->verbose_high())
1906 gclog_or_tty->print_cr("\t[0] we're looking at location "
1907 "*"PTR_FORMAT" = "PTR_FORMAT,
1908 p, (void*) obj);
1909
1910 if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) {
1851 _bitMap->mark(addr); 1911 _bitMap->mark(addr);
1852 _cm->mark_stack_push(thisOop); 1912 _cm->mark_stack_push(obj);
1853 } 1913 }
1854 } 1914 }
1855 }; 1915 };
1856 1916
1857 class G1CMDrainMarkingStackClosure: public VoidClosure { 1917 class G1CMDrainMarkingStackClosure: public VoidClosure {
1869 void do_void() { 1929 void do_void() {
1870 _markStack->drain((OopClosure*)_oopClosure, _bitMap, false); 1930 _markStack->drain((OopClosure*)_oopClosure, _bitMap, false);
1871 } 1931 }
1872 }; 1932 };
1873 1933
1934 // 'Keep Alive' closure used by parallel reference processing.
1935 // An instance of this closure is used in the parallel reference processing
1936 // code rather than an instance of G1CMKeepAliveClosure. We could have used
1937 // the G1CMKeepAliveClosure as it is MT-safe. Also reference objects are
1938 // placed on to discovered ref lists once so we can mark and push with no
1939 // need to check whether the object has already been marked. Using the
1940 // G1CMKeepAliveClosure would mean, however, having all the worker threads
1941 // operating on the global mark stack. This means that an individual
1942 // worker would be doing lock-free pushes while it processes its own
1943 // discovered ref list followed by drain call. If the discovered ref lists
1944 // are unbalanced then this could cause interference with the other
1945 // workers. Using a CMTask (and its embedded local data structures)
1946 // avoids that potential interference.
1947 class G1CMParKeepAliveAndDrainClosure: public OopClosure {
1948 ConcurrentMark* _cm;
1949 CMTask* _task;
1950 CMBitMap* _bitMap;
1951 int _ref_counter_limit;
1952 int _ref_counter;
1953 public:
1954 G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm,
1955 CMTask* task,
1956 CMBitMap* bitMap) :
1957 _cm(cm), _task(task), _bitMap(bitMap),
1958 _ref_counter_limit(G1RefProcDrainInterval)
1959 {
1960 assert(_ref_counter_limit > 0, "sanity");
1961 _ref_counter = _ref_counter_limit;
1962 }
1963
1964 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1965 virtual void do_oop( oop* p) { do_oop_work(p); }
1966
1967 template <class T> void do_oop_work(T* p) {
1968 if (!_cm->has_overflown()) {
1969 oop obj = oopDesc::load_decode_heap_oop(p);
1970 if (_cm->verbose_high())
1971 gclog_or_tty->print_cr("\t[%d] we're looking at location "
1972 "*"PTR_FORMAT" = "PTR_FORMAT,
1973 _task->task_id(), p, (void*) obj);
1974
1975 _task->deal_with_reference(obj);
1976 _ref_counter--;
1977
1978 if (_ref_counter == 0) {
1979 // We have dealt with _ref_counter_limit references, pushing them and objects
1980 // reachable from them on to the local stack (and possibly the global stack).
1981 // Call do_marking_step() to process these entries. We call the routine in a
1982 // loop, which we'll exit if there's nothing more to do (i.e. we're done
1983 // with the entries that we've pushed as a result of the deal_with_reference
1984 // calls above) or we overflow.
1985 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag
1986 // while there may still be some work to do. (See the comment at the
1987 // beginning of CMTask::do_marking_step() for those conditions - one of which
1988 // is reaching the specified time target.) It is only when
1989 // CMTask::do_marking_step() returns without setting the has_aborted() flag
1990 // that the marking has completed.
1991 do {
1992 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1993 _task->do_marking_step(mark_step_duration_ms,
1994 false /* do_stealing */,
1995 false /* do_termination */);
1996 } while (_task->has_aborted() && !_cm->has_overflown());
1997 _ref_counter = _ref_counter_limit;
1998 }
1999 } else {
2000 if (_cm->verbose_high())
2001 gclog_or_tty->print_cr("\t[%d] CM Overflow", _task->task_id());
2002 }
2003 }
2004 };
2005
2006 class G1CMParDrainMarkingStackClosure: public VoidClosure {
2007 ConcurrentMark* _cm;
2008 CMTask* _task;
2009 public:
2010 G1CMParDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task) :
2011 _cm(cm), _task(task)
2012 {}
2013
2014 void do_void() {
2015 do {
2016 if (_cm->verbose_high())
2017 gclog_or_tty->print_cr("\t[%d] Drain: Calling do marking_step", _task->task_id());
2018
2019 // We call CMTask::do_marking_step() to completely drain the local and
2020 // global marking stacks. The routine is called in a loop, which we'll
2021 // exit if there's nothing more to do (i.e. we'completely drained the
2022 // entries that were pushed as a result of applying the
2023 // G1CMParKeepAliveAndDrainClosure to the entries on the discovered ref
2024 // lists above) or we overflow the global marking stack.
2025 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag
2026 // while there may still be some work to do. (See the comment at the
2027 // beginning of CMTask::do_marking_step() for those conditions - one of which
2028 // is reaching the specified time target.) It is only when
2029 // CMTask::do_marking_step() returns without setting the has_aborted() flag
2030 // that the marking has completed.
2031
2032 _task->do_marking_step(1000000000.0 /* something very large */,
2033 true /* do_stealing */,
2034 true /* do_termination */);
2035 } while (_task->has_aborted() && !_cm->has_overflown());
2036 }
2037 };
2038
2039 // Implementation of AbstractRefProcTaskExecutor for G1
2040 class G1RefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2041 private:
2042 G1CollectedHeap* _g1h;
2043 ConcurrentMark* _cm;
2044 CMBitMap* _bitmap;
2045 WorkGang* _workers;
2046 int _active_workers;
2047
2048 public:
2049 G1RefProcTaskExecutor(G1CollectedHeap* g1h,
2050 ConcurrentMark* cm,
2051 CMBitMap* bitmap,
2052 WorkGang* workers,
2053 int n_workers) :
2054 _g1h(g1h), _cm(cm), _bitmap(bitmap),
2055 _workers(workers), _active_workers(n_workers)
2056 { }
2057
2058 // Executes the given task using concurrent marking worker threads.
2059 virtual void execute(ProcessTask& task);
2060 virtual void execute(EnqueueTask& task);
2061 };
2062
2063 class G1RefProcTaskProxy: public AbstractGangTask {
2064 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2065 ProcessTask& _proc_task;
2066 G1CollectedHeap* _g1h;
2067 ConcurrentMark* _cm;
2068 CMBitMap* _bitmap;
2069
2070 public:
2071 G1RefProcTaskProxy(ProcessTask& proc_task,
2072 G1CollectedHeap* g1h,
2073 ConcurrentMark* cm,
2074 CMBitMap* bitmap) :
2075 AbstractGangTask("Process reference objects in parallel"),
2076 _proc_task(proc_task), _g1h(g1h), _cm(cm), _bitmap(bitmap)
2077 {}
2078
2079 virtual void work(int i) {
2080 CMTask* marking_task = _cm->task(i);
2081 G1CMIsAliveClosure g1_is_alive(_g1h);
2082 G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task, _bitmap);
2083 G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
2084
2085 _proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2086 }
2087 };
2088
2089 void G1RefProcTaskExecutor::execute(ProcessTask& proc_task) {
2090 assert(_workers != NULL, "Need parallel worker threads.");
2091
2092 G1RefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
2093
2094 // We need to reset the phase for each task execution so that
2095 // the termination protocol of CMTask::do_marking_step works.
2096 _cm->set_phase(_active_workers, false /* concurrent */);
2097 _g1h->set_par_threads(_active_workers);
2098 _workers->run_task(&proc_task_proxy);
2099 _g1h->set_par_threads(0);
2100 }
2101
2102 class G1RefEnqueueTaskProxy: public AbstractGangTask {
2103 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2104 EnqueueTask& _enq_task;
2105
2106 public:
2107 G1RefEnqueueTaskProxy(EnqueueTask& enq_task) :
2108 AbstractGangTask("Enqueue reference objects in parallel"),
2109 _enq_task(enq_task)
2110 { }
2111
2112 virtual void work(int i) {
2113 _enq_task.work(i);
2114 }
2115 };
2116
2117 void G1RefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2118 assert(_workers != NULL, "Need parallel worker threads.");
2119
2120 G1RefEnqueueTaskProxy enq_task_proxy(enq_task);
2121
2122 _g1h->set_par_threads(_active_workers);
2123 _workers->run_task(&enq_task_proxy);
2124 _g1h->set_par_threads(0);
2125 }
2126
1874 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2127 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
1875 ResourceMark rm; 2128 ResourceMark rm;
1876 HandleMark hm; 2129 HandleMark hm;
1877 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2130 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1878 ReferenceProcessor* rp = g1h->ref_processor(); 2131 ReferenceProcessor* rp = g1h->ref_processor();
1887 G1CMIsAliveClosure g1_is_alive(g1h); 2140 G1CMIsAliveClosure g1_is_alive(g1h);
1888 G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap()); 2141 G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
1889 G1CMDrainMarkingStackClosure 2142 G1CMDrainMarkingStackClosure
1890 g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive); 2143 g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
1891 2144
1892 // XXXYYY Also: copy the parallel ref processing code from CMS. 2145 // We use the work gang from the G1CollectedHeap and we utilize all
1893 rp->process_discovered_references(&g1_is_alive, 2146 // the worker threads.
1894 &g1_keep_alive, 2147 int active_workers = MAX2(MIN2(g1h->workers()->total_workers(), (int)_max_task_num), 1);
1895 &g1_drain_mark_stack, 2148
1896 NULL); 2149 G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
2150 g1h->workers(), active_workers);
2151
2152 if (rp->processing_is_mt()) {
2153 // Set the degree of MT here. If the discovery is done MT, there
2154 // may have been a different number of threads doing the discovery
2155 // and a different number of discovered lists may have Ref objects.
2156 // That is OK as long as the Reference lists are balanced (see
2157 // balance_all_queues() and balance_queues()).
2158 rp->set_mt_degree(active_workers);
2159
2160 rp->process_discovered_references(&g1_is_alive,
2161 &g1_keep_alive,
2162 &g1_drain_mark_stack,
2163 &par_task_executor);
2164
2165 // The work routines of the parallel keep_alive and drain_marking_stack
2166 // will set the has_overflown flag if we overflow the global marking
2167 // stack.
2168 } else {
2169 rp->process_discovered_references(&g1_is_alive,
2170 &g1_keep_alive,
2171 &g1_drain_mark_stack,
2172 NULL);
2173
2174 }
2175
1897 assert(_markStack.overflow() || _markStack.isEmpty(), 2176 assert(_markStack.overflow() || _markStack.isEmpty(),
1898 "mark stack should be empty (unless it overflowed)"); 2177 "mark stack should be empty (unless it overflowed)");
1899 if (_markStack.overflow()) { 2178 if (_markStack.overflow()) {
2179 // Should have been done already when we tried to push an
2180 // entry on to the global mark stack. But let's do it again.
1900 set_has_overflown(); 2181 set_has_overflown();
1901 } 2182 }
1902 2183
1903 rp->enqueue_discovered_references(); 2184 if (rp->processing_is_mt()) {
2185 assert(rp->num_q() == active_workers, "why not");
2186 rp->enqueue_discovered_references(&par_task_executor);
2187 } else {
2188 rp->enqueue_discovered_references();
2189 }
2190
1904 rp->verify_no_references_recorded(); 2191 rp->verify_no_references_recorded();
1905 assert(!rp->discovery_enabled(), "should have been disabled"); 2192 assert(!rp->discovery_enabled(), "should have been disabled");
1906 2193
1907 // Now clean up stale oops in SymbolTable and StringTable 2194 // Now clean up stale oops in StringTable
1908 SymbolTable::unlink(&g1_is_alive);
1909 StringTable::unlink(&g1_is_alive); 2195 StringTable::unlink(&g1_is_alive);
2196 // Clean up unreferenced symbols in symbol table.
2197 SymbolTable::unlink();
1910 } 2198 }
1911 2199
1912 void ConcurrentMark::swapMarkBitMaps() { 2200 void ConcurrentMark::swapMarkBitMaps() {
1913 CMBitMapRO* temp = _prevMarkBitMap; 2201 CMBitMapRO* temp = _prevMarkBitMap;
1914 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2202 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap;
1925 // only proceed if we're supposed to be actived. 2213 // only proceed if we're supposed to be actived.
1926 if ((size_t)worker_i < _cm->active_tasks()) { 2214 if ((size_t)worker_i < _cm->active_tasks()) {
1927 CMTask* task = _cm->task(worker_i); 2215 CMTask* task = _cm->task(worker_i);
1928 task->record_start_time(); 2216 task->record_start_time();
1929 do { 2217 do {
1930 task->do_marking_step(1000000000.0 /* something very large */); 2218 task->do_marking_step(1000000000.0 /* something very large */,
2219 true /* do_stealing */,
2220 true /* do_termination */);
1931 } while (task->has_aborted() && !_cm->has_overflown()); 2221 } while (task->has_aborted() && !_cm->has_overflown());
1932 // If we overflow, then we do not want to restart. We instead 2222 // If we overflow, then we do not want to restart. We instead
1933 // want to abort remark and do concurrent marking again. 2223 // want to abort remark and do concurrent marking again.
1934 task->record_end_time(); 2224 task->record_end_time();
1935 } 2225 }
1948 2238
1949 if (G1CollectedHeap::use_parallel_gc_threads()) { 2239 if (G1CollectedHeap::use_parallel_gc_threads()) {
1950 G1CollectedHeap::StrongRootsScope srs(g1h); 2240 G1CollectedHeap::StrongRootsScope srs(g1h);
1951 // this is remark, so we'll use up all available threads 2241 // this is remark, so we'll use up all available threads
1952 int active_workers = ParallelGCThreads; 2242 int active_workers = ParallelGCThreads;
1953 set_phase(active_workers, false); 2243 set_phase(active_workers, false /* concurrent */);
1954 2244
1955 CMRemarkTask remarkTask(this); 2245 CMRemarkTask remarkTask(this);
1956 // We will start all available threads, even if we decide that the 2246 // We will start all available threads, even if we decide that the
1957 // active_workers will be fewer. The extra ones will just bail out 2247 // active_workers will be fewer. The extra ones will just bail out
1958 // immediately. 2248 // immediately.
1962 g1h->set_par_threads(0); 2252 g1h->set_par_threads(0);
1963 } else { 2253 } else {
1964 G1CollectedHeap::StrongRootsScope srs(g1h); 2254 G1CollectedHeap::StrongRootsScope srs(g1h);
1965 // this is remark, so we'll use up all available threads 2255 // this is remark, so we'll use up all available threads
1966 int active_workers = 1; 2256 int active_workers = 1;
1967 set_phase(active_workers, false); 2257 set_phase(active_workers, false /* concurrent */);
1968 2258
1969 CMRemarkTask remarkTask(this); 2259 CMRemarkTask remarkTask(this);
1970 // We will start all available threads, even if we decide that the 2260 // We will start all available threads, even if we decide that the
1971 // active_workers will be fewer. The extra ones will just bail out 2261 // active_workers will be fewer. The extra ones will just bail out
1972 // immediately. 2262 // immediately.
1974 } 2264 }
1975 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2265 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1976 guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant"); 2266 guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant");
1977 2267
1978 print_stats(); 2268 print_stats();
1979
1980 if (!restart_for_overflow())
1981 set_non_marking_state();
1982 2269
1983 #if VERIFY_OBJS_PROCESSED 2270 #if VERIFY_OBJS_PROCESSED
1984 if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) { 2271 if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) {
1985 gclog_or_tty->print_cr("Processed = %d, enqueued = %d.", 2272 gclog_or_tty->print_cr("Processed = %d, enqueued = %d.",
1986 _scan_obj_cl.objs_processed, 2273 _scan_obj_cl.objs_processed,
2892 public: 3179 public:
2893 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 3180 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2894 virtual void do_oop( oop* p) { do_oop_work(p); } 3181 virtual void do_oop( oop* p) { do_oop_work(p); }
2895 3182
2896 template <class T> void do_oop_work(T* p) { 3183 template <class T> void do_oop_work(T* p) {
2897 assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant"); 3184 assert( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
2898 assert(!_g1h->heap_region_containing((HeapWord*) p)->is_on_free_list(), 3185 assert(!_g1h->is_on_free_list(
2899 "invariant"); 3186 _g1h->heap_region_containing((HeapWord*) p)), "invariant");
2900 3187
2901 oop obj = oopDesc::load_decode_heap_oop(p); 3188 oop obj = oopDesc::load_decode_heap_oop(p);
2902 if (_cm->verbose_high()) 3189 if (_cm->verbose_high())
2903 gclog_or_tty->print_cr("[%d] we're looking at location " 3190 gclog_or_tty->print_cr("[%d] we're looking at location "
2904 "*"PTR_FORMAT" = "PTR_FORMAT, 3191 "*"PTR_FORMAT" = "PTR_FORMAT,
3094 push(obj); 3381 push(obj);
3095 } else { 3382 } else {
3096 // do nothing 3383 // do nothing
3097 } 3384 }
3098 #else // _CHECK_BOTH_FINGERS_ 3385 #else // _CHECK_BOTH_FINGERS_
3099 // we will only check the global finger 3386 // we will only check the global finger
3100 3387
3101 if (objAddr < global_finger) { 3388 if (objAddr < global_finger) {
3102 // see long comment above 3389 // see long comment above
3103 3390
3104 if (_cm->verbose_high()) 3391 if (_cm->verbose_high())
3114 } 3401 }
3115 3402
3116 void CMTask::push(oop obj) { 3403 void CMTask::push(oop obj) {
3117 HeapWord* objAddr = (HeapWord*) obj; 3404 HeapWord* objAddr = (HeapWord*) obj;
3118 assert(_g1h->is_in_g1_reserved(objAddr), "invariant"); 3405 assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
3119 assert(!_g1h->heap_region_containing(objAddr)->is_on_free_list(), 3406 assert(!_g1h->is_on_free_list(
3120 "invariant"); 3407 _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
3121 assert(!_g1h->is_obj_ill(obj), "invariant"); 3408 assert(!_g1h->is_obj_ill(obj), "invariant");
3122 assert(_nextMarkBitMap->isMarked(objAddr), "invariant"); 3409 assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
3123 3410
3124 if (_cm->verbose_high()) 3411 if (_cm->verbose_high())
3125 gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj); 3412 gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj);
3219 // (5) We check whether we've reached our time quota. If we have, 3506 // (5) We check whether we've reached our time quota. If we have,
3220 // then we abort. 3507 // then we abort.
3221 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3508 double elapsed_time_ms = curr_time_ms - _start_time_ms;
3222 if (elapsed_time_ms > _time_target_ms) { 3509 if (elapsed_time_ms > _time_target_ms) {
3223 set_has_aborted(); 3510 set_has_aborted();
3224 _has_aborted_timed_out = true; 3511 _has_timed_out = true;
3225 statsOnly( ++_aborted_timed_out ); 3512 statsOnly( ++_aborted_timed_out );
3226 return; 3513 return;
3227 } 3514 }
3228 3515
3229 // (6) Finally, we check whether there are enough completed STAB 3516 // (6) Finally, we check whether there are enough completed STAB
3360 if (_cm->verbose_high()) 3647 if (_cm->verbose_high())
3361 gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id, 3648 gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id,
3362 (void*) obj); 3649 (void*) obj);
3363 3650
3364 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3651 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3365 assert(!_g1h->heap_region_containing(obj)->is_on_free_list(), 3652 assert(!_g1h->is_on_free_list(
3366 "invariant"); 3653 _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3367 3654
3368 scan_object(obj); 3655 scan_object(obj);
3369 3656
3370 if (_task_queue->size() <= target_size || has_aborted()) 3657 if (_task_queue->size() <= target_size || has_aborted())
3371 ret = false; 3658 ret = false;
3724 place, it was natural to piggy-back all the other conditions on it 4011 place, it was natural to piggy-back all the other conditions on it
3725 too and not constantly check them throughout the code. 4012 too and not constantly check them throughout the code.
3726 4013
3727 *****************************************************************************/ 4014 *****************************************************************************/
3728 4015
3729 void CMTask::do_marking_step(double time_target_ms) { 4016 void CMTask::do_marking_step(double time_target_ms,
4017 bool do_stealing,
4018 bool do_termination) {
3730 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 4019 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
3731 assert(concurrent() == _cm->concurrent(), "they should be the same"); 4020 assert(concurrent() == _cm->concurrent(), "they should be the same");
3732 4021
3733 assert(concurrent() || _cm->region_stack_empty(), 4022 assert(concurrent() || _cm->region_stack_empty(),
3734 "the region stack should have been cleared before remark"); 4023 "the region stack should have been cleared before remark");
3764 _refs_reached = 0; 4053 _refs_reached = 0;
3765 recalculate_limits(); 4054 recalculate_limits();
3766 4055
3767 // clear all flags 4056 // clear all flags
3768 clear_has_aborted(); 4057 clear_has_aborted();
3769 _has_aborted_timed_out = false; 4058 _has_timed_out = false;
3770 _draining_satb_buffers = false; 4059 _draining_satb_buffers = false;
3771 4060
3772 ++_calls; 4061 ++_calls;
3773 4062
3774 if (_cm->verbose_low()) 4063 if (_cm->verbose_low())
3940 // local queue and global stack. 4229 // local queue and global stack.
3941 drain_local_queue(false); 4230 drain_local_queue(false);
3942 drain_global_stack(false); 4231 drain_global_stack(false);
3943 4232
3944 // Attempt at work stealing from other task's queues. 4233 // Attempt at work stealing from other task's queues.
3945 if (!has_aborted()) { 4234 if (do_stealing && !has_aborted()) {
3946 // We have not aborted. This means that we have finished all that 4235 // We have not aborted. This means that we have finished all that
3947 // we could. Let's try to do some stealing... 4236 // we could. Let's try to do some stealing...
3948 4237
3949 // We cannot check whether the global stack is empty, since other 4238 // We cannot check whether the global stack is empty, since other
3950 // tasks might be pushing objects to it concurrently. We also cannot 4239 // tasks might be pushing objects to it concurrently. We also cannot
3981 } 4270 }
3982 } 4271 }
3983 4272
3984 // We still haven't aborted. Now, let's try to get into the 4273 // We still haven't aborted. Now, let's try to get into the
3985 // termination protocol. 4274 // termination protocol.
3986 if (!has_aborted()) { 4275 if (do_termination && !has_aborted()) {
3987 // We cannot check whether the global stack is empty, since other 4276 // We cannot check whether the global stack is empty, since other
3988 // tasks might be concurrently pushing objects on it. We also cannot 4277 // tasks might be concurrently pushing objects on it. We also cannot
3989 // check if the region stack is empty because if a thread is aborting 4278 // check if the region stack is empty because if a thread is aborting
3990 // it can push a partially done region back. 4279 // it can push a partially done region back.
3991 // Separated the asserts so that we know which one fires. 4280 // Separated the asserts so that we know which one fires.
4057 if (has_aborted()) { 4346 if (has_aborted()) {
4058 // The task was aborted for some reason. 4347 // The task was aborted for some reason.
4059 4348
4060 statsOnly( ++_aborted ); 4349 statsOnly( ++_aborted );
4061 4350
4062 if (_has_aborted_timed_out) { 4351 if (_has_timed_out) {
4063 double diff_ms = elapsed_time_ms - _time_target_ms; 4352 double diff_ms = elapsed_time_ms - _time_target_ms;
4064 // Keep statistics of how well we did with respect to hitting 4353 // Keep statistics of how well we did with respect to hitting
4065 // our target only if we actually timed out (if we aborted for 4354 // our target only if we actually timed out (if we aborted for
4066 // other reasons, then the results might get skewed). 4355 // other reasons, then the results might get skewed).
4067 _marking_step_diffs_ms.add(diff_ms); 4356 _marking_step_diffs_ms.add(diff_ms);