comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 1111:44f61c24ddab

6862387: tune concurrent refinement further Summary: Reworked the concurrent refinement: threads activation, feedback-based threshold adjustment, other miscellaneous fixes. Reviewed-by: apetrusenko, tonyp
author iveresov
date Wed, 16 Dec 2009 15:12:51 -0800
parents ed52bcc32739
children 7b0e9cba0307
comparison
equal deleted inserted replaced
1104:27f9477e879b 1111:44f61c24ddab
1373 1373
1374 1374
1375 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : 1375 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1376 SharedHeap(policy_), 1376 SharedHeap(policy_),
1377 _g1_policy(policy_), 1377 _g1_policy(policy_),
1378 _dirty_card_queue_set(false),
1378 _ref_processor(NULL), 1379 _ref_processor(NULL),
1379 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), 1380 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1380 _bot_shared(NULL), 1381 _bot_shared(NULL),
1381 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), 1382 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"),
1382 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), 1383 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
1458 1459
1459 // Ensure that the sizes are properly aligned. 1460 // Ensure that the sizes are properly aligned.
1460 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); 1461 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1461 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); 1462 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1462 1463
1463 // We allocate this in any case, but only do no work if the command line
1464 // param is off.
1465 _cg1r = new ConcurrentG1Refine(); 1464 _cg1r = new ConcurrentG1Refine();
1466 1465
1467 // Reserve the maximum. 1466 // Reserve the maximum.
1468 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); 1467 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
1469 // Includes the perm-gen. 1468 // Includes the perm-gen.
1592 concurrent_g1_refine()); 1591 concurrent_g1_refine());
1593 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); 1592 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
1594 1593
1595 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, 1594 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1596 SATB_Q_FL_lock, 1595 SATB_Q_FL_lock,
1597 0, 1596 G1SATBProcessCompletedThreshold,
1598 Shared_SATB_Q_lock); 1597 Shared_SATB_Q_lock);
1599 1598
1600 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, 1599 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1601 DirtyCardQ_FL_lock, 1600 DirtyCardQ_FL_lock,
1602 G1UpdateBufferQueueMaxLength, 1601 concurrent_g1_refine()->yellow_zone(),
1602 concurrent_g1_refine()->red_zone(),
1603 Shared_DirtyCardQ_lock); 1603 Shared_DirtyCardQ_lock);
1604 1604
1605 if (G1DeferredRSUpdate) { 1605 if (G1DeferredRSUpdate) {
1606 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, 1606 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1607 DirtyCardQ_FL_lock, 1607 DirtyCardQ_FL_lock,
1608 0, 1608 -1, // never trigger processing
1609 -1, // no limit on length
1609 Shared_DirtyCardQ_lock, 1610 Shared_DirtyCardQ_lock,
1610 &JavaThread::dirty_card_queue_set()); 1611 &JavaThread::dirty_card_queue_set());
1611 } 1612 }
1612 // In case we're keeping closure specialization stats, initialize those 1613 // In case we're keeping closure specialization stats, initialize those
1613 // counts and that mechanism. 1614 // counts and that mechanism.
4237 4238
4238 if (G1DeferredRSUpdate) { 4239 if (G1DeferredRSUpdate) {
4239 RedirtyLoggedCardTableEntryFastClosure redirty; 4240 RedirtyLoggedCardTableEntryFastClosure redirty;
4240 dirty_card_queue_set().set_closure(&redirty); 4241 dirty_card_queue_set().set_closure(&redirty);
4241 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); 4242 dirty_card_queue_set().apply_closure_to_all_completed_buffers();
4242 JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set()); 4243
4244 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
4245 dcq.merge_bufferlists(&dirty_card_queue_set());
4243 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); 4246 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
4244 } 4247 }
4245
4246 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 4248 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
4247 } 4249 }
4248 4250
4249 void G1CollectedHeap::free_region(HeapRegion* hr) { 4251 void G1CollectedHeap::free_region(HeapRegion* hr) {
4250 size_t pre_used = 0; 4252 size_t pre_used = 0;