comparison src/share/vm/memory/cardTableModRefBS.cpp @ 4095:bca17e38de00

6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads Summary: Select number of GC threads dynamically based on heap usage and number of Java threads Reviewed-by: johnc, ysr, jcoomes
author jmasa
date Tue, 09 Aug 2011 10:16:01 -0700
parents fc2b798ab316
children ea640b5e949a
comparison
equal deleted inserted replaced
4094:3a298e04d914 4095:bca17e38de00
458 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp, 458 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
459 MemRegion mr, 459 MemRegion mr,
460 OopsInGenClosure* cl, 460 OopsInGenClosure* cl,
461 CardTableRS* ct) { 461 CardTableRS* ct) {
462 if (!mr.is_empty()) { 462 if (!mr.is_empty()) {
463 int n_threads = SharedHeap::heap()->n_par_threads(); 463 // Caller (process_strong_roots()) claims that all GC threads
464 if (n_threads > 0) { 464 // execute this call. With UseDynamicNumberOfGCThreads now all
465 // active GC threads execute this call. The number of active GC
466 // threads needs to be passed to par_non_clean_card_iterate_work()
467 // to get proper partitioning and termination.
468 //
469 // This is an example of where n_par_threads() is used instead
470 // of workers()->active_workers(). n_par_threads can be set to 0 to
471 // turn off parallelism. For example when this code is called as
472 // part of verification and SharedHeap::process_strong_roots() is being
473 // used, then n_par_threads() may have been set to 0. active_workers
474 // is not overloaded with the meaning that it is a switch to disable
475 // parallelism and so keeps the meaning of the number of
476 // active gc workers. If parallelism has not been shut off by
477 // setting n_par_threads to 0, then n_par_threads should be
478 // equal to active_workers. When a different mechanism for shutting
479 // off parallelism is used, then active_workers can be used in
480 // place of n_par_threads.
481 // This is an example of a path where n_par_threads is
482 // set to 0 to turn off parallism.
483 // [7] CardTableModRefBS::non_clean_card_iterate()
484 // [8] CardTableRS::younger_refs_in_space_iterate()
485 // [9] Generation::younger_refs_in_space_iterate()
486 // [10] OneContigSpaceCardGeneration::younger_refs_iterate()
487 // [11] CompactingPermGenGen::younger_refs_iterate()
488 // [12] CardTableRS::younger_refs_iterate()
489 // [13] SharedHeap::process_strong_roots()
490 // [14] G1CollectedHeap::verify()
491 // [15] Universe::verify()
492 // [16] G1CollectedHeap::do_collection_pause_at_safepoint()
493 //
494 int n_threads = SharedHeap::heap()->n_par_threads();
495 bool is_par = n_threads > 0;
496 if (is_par) {
465 #ifndef SERIALGC 497 #ifndef SERIALGC
498 assert(SharedHeap::heap()->n_par_threads() ==
499 SharedHeap::heap()->workers()->active_workers(), "Mismatch");
466 non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); 500 non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
467 #else // SERIALGC 501 #else // SERIALGC
468 fatal("Parallel gc not supported here."); 502 fatal("Parallel gc not supported here.");
469 #endif // SERIALGC 503 #endif // SERIALGC
470 } else { 504 } else {
487 // accomplish dirty card iteration in parallel. The 521 // accomplish dirty card iteration in parallel. The
488 // iterator itself does not clear the dirty cards, or 522 // iterator itself does not clear the dirty cards, or
489 // change their values in any manner. 523 // change their values in any manner.
490 void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr, 524 void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr,
491 MemRegionClosure* cl) { 525 MemRegionClosure* cl) {
526 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
527 assert(!is_par ||
528 (SharedHeap::heap()->n_par_threads() ==
529 SharedHeap::heap()->workers()->active_workers()), "Mismatch");
492 for (int i = 0; i < _cur_covered_regions; i++) { 530 for (int i = 0; i < _cur_covered_regions; i++) {
493 MemRegion mri = mr.intersection(_covered[i]); 531 MemRegion mri = mr.intersection(_covered[i]);
494 if (mri.word_size() > 0) { 532 if (mri.word_size() > 0) {
495 jbyte* cur_entry = byte_for(mri.last()); 533 jbyte* cur_entry = byte_for(mri.last());
496 jbyte* limit = byte_for(mri.start()); 534 jbyte* limit = byte_for(mri.start());