Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 20278:2c6ef90f030a
8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com
author | stefank |
---|---|
date | Mon, 07 Jul 2014 10:12:40 +0200 |
parents | a8137787acfe |
children | 870c03421152 |
comparison
equal
deleted
inserted
replaced
20277:882004b9e7e1 | 20278:2c6ef90f030a |
---|---|
53 #include "gc_implementation/shared/gcHeapSummary.hpp" | 53 #include "gc_implementation/shared/gcHeapSummary.hpp" |
54 #include "gc_implementation/shared/gcTimer.hpp" | 54 #include "gc_implementation/shared/gcTimer.hpp" |
55 #include "gc_implementation/shared/gcTrace.hpp" | 55 #include "gc_implementation/shared/gcTrace.hpp" |
56 #include "gc_implementation/shared/gcTraceTime.hpp" | 56 #include "gc_implementation/shared/gcTraceTime.hpp" |
57 #include "gc_implementation/shared/isGCActiveMark.hpp" | 57 #include "gc_implementation/shared/isGCActiveMark.hpp" |
58 #include "memory/allocation.hpp" | |
58 #include "memory/gcLocker.inline.hpp" | 59 #include "memory/gcLocker.inline.hpp" |
59 #include "memory/generationSpec.hpp" | 60 #include "memory/generationSpec.hpp" |
60 #include "memory/iterator.hpp" | 61 #include "memory/iterator.hpp" |
61 #include "memory/referenceProcessor.hpp" | 62 #include "memory/referenceProcessor.hpp" |
62 #include "oops/oop.inline.hpp" | 63 #include "oops/oop.inline.hpp" |
85 // Notes on implementation of parallelism in different tasks. | 86 // Notes on implementation of parallelism in different tasks. |
86 // | 87 // |
87 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism. | 88 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism. |
88 // The number of GC workers is passed to heap_region_par_iterate_chunked(). | 89 // The number of GC workers is passed to heap_region_par_iterate_chunked(). |
89 // It does use run_task() which sets _n_workers in the task. | 90 // It does use run_task() which sets _n_workers in the task. |
90 // G1ParTask executes g1_process_strong_roots() -> | 91 // G1ParTask executes g1_process_roots() -> |
91 // SharedHeap::process_strong_roots() which calls eventually to | 92 // SharedHeap::process_roots() which calls eventually to |
92 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses | 93 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses |
93 // SequentialSubTasksDone. SharedHeap::process_strong_roots() also | 94 // SequentialSubTasksDone. SharedHeap::process_roots() also |
94 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap). | 95 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap). |
95 // | 96 // |
96 | 97 |
97 // Local to this file. | 98 // Local to this file. |
98 | 99 |
3389 "Expected to be executed serially by the VM thread at this point"); | 3390 "Expected to be executed serially by the VM thread at this point"); |
3390 | 3391 |
3391 if (!silent) { gclog_or_tty->print("Roots "); } | 3392 if (!silent) { gclog_or_tty->print("Roots "); } |
3392 VerifyRootsClosure rootsCl(vo); | 3393 VerifyRootsClosure rootsCl(vo); |
3393 VerifyKlassClosure klassCl(this, &rootsCl); | 3394 VerifyKlassClosure klassCl(this, &rootsCl); |
3395 CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false); | |
3394 | 3396 |
3395 // We apply the relevant closures to all the oops in the | 3397 // We apply the relevant closures to all the oops in the |
3396 // system dictionary, class loader data graph and the string table. | 3398 // system dictionary, class loader data graph, the string table |
3397 // Don't verify the code cache here, since it's verified below. | 3399 // and the nmethods in the code cache. |
3398 const int so = SO_AllClasses | SO_Strings; | |
3399 | |
3400 // Need cleared claim bits for the strong roots processing | |
3401 ClassLoaderDataGraph::clear_claimed_marks(); | |
3402 | |
3403 process_strong_roots(true, // activate StrongRootsScope | |
3404 ScanningOption(so), // roots scanning options | |
3405 &rootsCl, | |
3406 &klassCl | |
3407 ); | |
3408 | |
3409 // Verify the nmethods in the code cache. | |
3410 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo); | 3400 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo); |
3411 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl); | 3401 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl); |
3412 CodeCache::blobs_do(&blobsCl); | 3402 |
3403 process_all_roots(true, // activate StrongRootsScope | |
3404 SO_AllCodeCache, // roots scanning options | |
3405 &rootsCl, | |
3406 &cldCl, | |
3407 &blobsCl); | |
3413 | 3408 |
3414 bool failures = rootsCl.failures() || codeRootsCl.failures(); | 3409 bool failures = rootsCl.failures() || codeRootsCl.failures(); |
3415 | 3410 |
3416 if (vo != VerifyOption_G1UseMarkWord) { | 3411 if (vo != VerifyOption_G1UseMarkWord) { |
3417 // If we're verifying during a full GC then the region sets | 3412 // If we're verifying during a full GC then the region sets |
4337 void G1CollectedHeap::release_mutator_alloc_region() { | 4332 void G1CollectedHeap::release_mutator_alloc_region() { |
4338 _mutator_alloc_region.release(); | 4333 _mutator_alloc_region.release(); |
4339 assert(_mutator_alloc_region.get() == NULL, "post-condition"); | 4334 assert(_mutator_alloc_region.get() == NULL, "post-condition"); |
4340 } | 4335 } |
4341 | 4336 |
4342 void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { | 4337 void G1CollectedHeap::use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info) { |
4343 assert_at_safepoint(true /* should_be_vm_thread */); | |
4344 | |
4345 _survivor_gc_alloc_region.init(); | |
4346 _old_gc_alloc_region.init(); | |
4347 HeapRegion* retained_region = _retained_old_gc_alloc_region; | 4338 HeapRegion* retained_region = _retained_old_gc_alloc_region; |
4348 _retained_old_gc_alloc_region = NULL; | 4339 _retained_old_gc_alloc_region = NULL; |
4349 | 4340 |
4350 // We will discard the current GC alloc region if: | 4341 // We will discard the current GC alloc region if: |
4351 // a) it's in the collection set (it can happen!), | 4342 // a) it's in the collection set (it can happen!), |
4373 _hr_printer.reuse(retained_region); | 4364 _hr_printer.reuse(retained_region); |
4374 evacuation_info.set_alloc_regions_used_before(retained_region->used()); | 4365 evacuation_info.set_alloc_regions_used_before(retained_region->used()); |
4375 } | 4366 } |
4376 } | 4367 } |
4377 | 4368 |
4369 void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { | |
4370 assert_at_safepoint(true /* should_be_vm_thread */); | |
4371 | |
4372 _survivor_gc_alloc_region.init(); | |
4373 _old_gc_alloc_region.init(); | |
4374 | |
4375 use_retained_old_gc_alloc_region(evacuation_info); | |
4376 } | |
4377 | |
4378 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) { | 4378 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) { |
4379 evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() + | 4379 evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() + |
4380 _old_gc_alloc_region.count()); | 4380 _old_gc_alloc_region.count()); |
4381 _survivor_gc_alloc_region.release(); | 4381 _survivor_gc_alloc_region.release(); |
4382 // If we have an old GC alloc region to release, we'll save it in | 4382 // If we have an old GC alloc region to release, we'll save it in |
4606 if (_g1->heap_region_containing_raw(new_obj)->is_young()) { | 4606 if (_g1->heap_region_containing_raw(new_obj)->is_young()) { |
4607 _scanned_klass->record_modified_oops(); | 4607 _scanned_klass->record_modified_oops(); |
4608 } | 4608 } |
4609 } | 4609 } |
4610 | 4610 |
4611 template <G1Barrier barrier, bool do_mark_object> | 4611 template <G1Barrier barrier, G1Mark do_mark_object> |
4612 template <class T> | 4612 template <class T> |
4613 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) { | 4613 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) { |
4614 T heap_oop = oopDesc::load_heap_oop(p); | 4614 T heap_oop = oopDesc::load_heap_oop(p); |
4615 | 4615 |
4616 if (oopDesc::is_null(heap_oop)) { | 4616 if (oopDesc::is_null(heap_oop)) { |
4628 } else { | 4628 } else { |
4629 forwardee = _par_scan_state->copy_to_survivor_space(obj); | 4629 forwardee = _par_scan_state->copy_to_survivor_space(obj); |
4630 } | 4630 } |
4631 assert(forwardee != NULL, "forwardee should not be NULL"); | 4631 assert(forwardee != NULL, "forwardee should not be NULL"); |
4632 oopDesc::encode_store_heap_oop(p, forwardee); | 4632 oopDesc::encode_store_heap_oop(p, forwardee); |
4633 if (do_mark_object && forwardee != obj) { | 4633 if (do_mark_object != G1MarkNone && forwardee != obj) { |
4634 // If the object is self-forwarded we don't need to explicitly | 4634 // If the object is self-forwarded we don't need to explicitly |
4635 // mark it, the evacuation failure protocol will do so. | 4635 // mark it, the evacuation failure protocol will do so. |
4636 mark_forwarded_object(obj, forwardee); | 4636 mark_forwarded_object(obj, forwardee); |
4637 } | 4637 } |
4638 | 4638 |
4639 if (barrier == G1BarrierKlass) { | 4639 if (barrier == G1BarrierKlass) { |
4640 do_klass_barrier(p, forwardee); | 4640 do_klass_barrier(p, forwardee); |
4641 } | 4641 } |
4642 } else { | 4642 } else { |
4643 // The object is not in collection set. If we're a root scanning | 4643 // The object is not in collection set. If we're a root scanning |
4644 // closure during an initial mark pause (i.e. do_mark_object will | 4644 // closure during an initial mark pause then attempt to mark the object. |
4645 // be true) then attempt to mark the object. | 4645 if (do_mark_object == G1MarkFromRoot) { |
4646 if (do_mark_object) { | |
4647 mark_object(obj); | 4646 mark_object(obj); |
4648 } | 4647 } |
4649 } | 4648 } |
4650 | 4649 |
4651 if (barrier == G1BarrierEvac) { | 4650 if (barrier == G1BarrierEvac) { |
4652 _par_scan_state->update_rs(_from, p, _worker_id); | 4651 _par_scan_state->update_rs(_from, p, _worker_id); |
4653 } | 4652 } |
4654 } | 4653 } |
4655 | 4654 |
4656 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p); | 4655 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p); |
4657 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p); | 4656 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p); |
4658 | 4657 |
4659 class G1ParEvacuateFollowersClosure : public VoidClosure { | 4658 class G1ParEvacuateFollowersClosure : public VoidClosure { |
4660 protected: | 4659 protected: |
4661 G1CollectedHeap* _g1h; | 4660 G1CollectedHeap* _g1h; |
4662 G1ParScanThreadState* _par_scan_state; | 4661 G1ParScanThreadState* _par_scan_state; |
4765 _g1h->set_n_termination(active_workers); | 4764 _g1h->set_n_termination(active_workers); |
4766 terminator()->reset_for_reuse(active_workers); | 4765 terminator()->reset_for_reuse(active_workers); |
4767 _n_workers = active_workers; | 4766 _n_workers = active_workers; |
4768 } | 4767 } |
4769 | 4768 |
4769 // Helps out with CLD processing. | |
4770 // | |
4771 // During InitialMark we need to: | |
4772 // 1) Scavenge all CLDs for the young GC. | |
4773 // 2) Mark all objects directly reachable from strong CLDs. | |
4774 template <G1Mark do_mark_object> | |
4775 class G1CLDClosure : public CLDClosure { | |
4776 G1ParCopyClosure<G1BarrierNone, do_mark_object>* _oop_closure; | |
4777 G1ParCopyClosure<G1BarrierKlass, do_mark_object> _oop_in_klass_closure; | |
4778 G1KlassScanClosure _klass_in_cld_closure; | |
4779 bool _claim; | |
4780 | |
4781 public: | |
4782 G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure, | |
4783 bool only_young, bool claim) | |
4784 : _oop_closure(oop_closure), | |
4785 _oop_in_klass_closure(oop_closure->g1(), | |
4786 oop_closure->pss(), | |
4787 oop_closure->rp()), | |
4788 _klass_in_cld_closure(&_oop_in_klass_closure, only_young), | |
4789 _claim(claim) { | |
4790 | |
4791 } | |
4792 | |
4793 void do_cld(ClassLoaderData* cld) { | |
4794 cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim); | |
4795 } | |
4796 }; | |
4797 | |
4798 class G1CodeBlobClosure: public CodeBlobClosure { | |
4799 OopClosure* _f; | |
4800 | |
4801 public: | |
4802 G1CodeBlobClosure(OopClosure* f) : _f(f) {} | |
4803 void do_code_blob(CodeBlob* blob) { | |
4804 nmethod* that = blob->as_nmethod_or_null(); | |
4805 if (that != NULL) { | |
4806 if (!that->test_set_oops_do_mark()) { | |
4807 that->oops_do(_f); | |
4808 that->fix_oop_relocations(); | |
4809 } | |
4810 } | |
4811 } | |
4812 }; | |
4813 | |
4770 void work(uint worker_id) { | 4814 void work(uint worker_id) { |
4771 if (worker_id >= _n_workers) return; // no work needed this round | 4815 if (worker_id >= _n_workers) return; // no work needed this round |
4772 | 4816 |
4773 double start_time_ms = os::elapsedTime() * 1000.0; | 4817 double start_time_ms = os::elapsedTime() * 1000.0; |
4774 _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms); | 4818 _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms); |
4782 G1ParScanThreadState pss(_g1h, worker_id, rp); | 4826 G1ParScanThreadState pss(_g1h, worker_id, rp); |
4783 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp); | 4827 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp); |
4784 | 4828 |
4785 pss.set_evac_failure_closure(&evac_failure_cl); | 4829 pss.set_evac_failure_closure(&evac_failure_cl); |
4786 | 4830 |
4787 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp); | 4831 bool only_young = _g1h->g1_policy()->gcs_are_young(); |
4788 G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp); | 4832 |
4789 | 4833 // Non-IM young GC. |
4790 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp); | 4834 G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, &pss, rp); |
4791 G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp); | 4835 G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl, |
4792 | 4836 only_young, // Only process dirty klasses. |
4793 bool only_young = _g1h->g1_policy()->gcs_are_young(); | 4837 false); // No need to claim CLDs. |
4794 G1KlassScanClosure scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false); | 4838 // IM young GC. |
4795 G1KlassScanClosure only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young); | 4839 // Strong roots closures. |
4796 | 4840 G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, &pss, rp); |
4797 OopClosure* scan_root_cl = &only_scan_root_cl; | 4841 G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl, |
4798 G1KlassScanClosure* scan_klasses_cl = &only_scan_klasses_cl_s; | 4842 false, // Process all klasses. |
4843 true); // Need to claim CLDs. | |
4844 // Weak roots closures. | |
4845 G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp); | |
4846 G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl, | |
4847 false, // Process all klasses. | |
4848 true); // Need to claim CLDs. | |
4849 | |
4850 G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl); | |
4851 G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl); | |
4852 // IM Weak code roots are handled later. | |
4853 | |
4854 OopClosure* strong_root_cl; | |
4855 OopClosure* weak_root_cl; | |
4856 CLDClosure* strong_cld_cl; | |
4857 CLDClosure* weak_cld_cl; | |
4858 CodeBlobClosure* strong_code_cl; | |
4799 | 4859 |
4800 if (_g1h->g1_policy()->during_initial_mark_pause()) { | 4860 if (_g1h->g1_policy()->during_initial_mark_pause()) { |
4801 // We also need to mark copied objects. | 4861 // We also need to mark copied objects. |
4802 scan_root_cl = &scan_mark_root_cl; | 4862 strong_root_cl = &scan_mark_root_cl; |
4803 scan_klasses_cl = &scan_mark_klasses_cl_s; | 4863 weak_root_cl = &scan_mark_weak_root_cl; |
4864 strong_cld_cl = &scan_mark_cld_cl; | |
4865 weak_cld_cl = &scan_mark_weak_cld_cl; | |
4866 strong_code_cl = &scan_mark_code_cl; | |
4867 } else { | |
4868 strong_root_cl = &scan_only_root_cl; | |
4869 weak_root_cl = &scan_only_root_cl; | |
4870 strong_cld_cl = &scan_only_cld_cl; | |
4871 weak_cld_cl = &scan_only_cld_cl; | |
4872 strong_code_cl = &scan_only_code_cl; | |
4804 } | 4873 } |
4805 | 4874 |
4806 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); | 4875 |
4807 | 4876 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); |
4808 // Don't scan the scavengable methods in the code cache as part | |
4809 // of strong root scanning. The code roots that point into a | |
4810 // region in the collection set are scanned when we scan the | |
4811 // region's RSet. | |
4812 int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings; | |
4813 | 4877 |
4814 pss.start_strong_roots(); | 4878 pss.start_strong_roots(); |
4815 _g1h->g1_process_strong_roots(/* is scavenging */ true, | 4879 _g1h->g1_process_roots(strong_root_cl, |
4816 SharedHeap::ScanningOption(so), | 4880 weak_root_cl, |
4817 scan_root_cl, | 4881 &push_heap_rs_cl, |
4818 &push_heap_rs_cl, | 4882 strong_cld_cl, |
4819 scan_klasses_cl, | 4883 weak_cld_cl, |
4820 worker_id); | 4884 strong_code_cl, |
4885 worker_id); | |
4886 | |
4821 pss.end_strong_roots(); | 4887 pss.end_strong_roots(); |
4822 | 4888 |
4823 { | 4889 { |
4824 double start = os::elapsedTime(); | 4890 double start = os::elapsedTime(); |
4825 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); | 4891 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); |
4853 | 4919 |
4854 // This method is run in a GC worker. | 4920 // This method is run in a GC worker. |
4855 | 4921 |
4856 void | 4922 void |
4857 G1CollectedHeap:: | 4923 G1CollectedHeap:: |
4858 g1_process_strong_roots(bool is_scavenging, | 4924 g1_process_roots(OopClosure* scan_non_heap_roots, |
4859 ScanningOption so, | 4925 OopClosure* scan_non_heap_weak_roots, |
4860 OopClosure* scan_non_heap_roots, | 4926 OopsInHeapRegionClosure* scan_rs, |
4861 OopsInHeapRegionClosure* scan_rs, | 4927 CLDClosure* scan_strong_clds, |
4862 G1KlassScanClosure* scan_klasses, | 4928 CLDClosure* scan_weak_clds, |
4863 uint worker_i) { | 4929 CodeBlobClosure* scan_strong_code, |
4864 | 4930 uint worker_i) { |
4865 // First scan the strong roots | 4931 |
4932 // First scan the shared roots. | |
4866 double ext_roots_start = os::elapsedTime(); | 4933 double ext_roots_start = os::elapsedTime(); |
4867 double closure_app_time_sec = 0.0; | 4934 double closure_app_time_sec = 0.0; |
4868 | 4935 |
4936 bool during_im = _g1h->g1_policy()->during_initial_mark_pause(); | |
4937 | |
4869 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); | 4938 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); |
4870 | 4939 BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots); |
4871 process_strong_roots(false, // no scoping; this is parallel code | 4940 |
4872 so, | 4941 process_roots(false, // no scoping; this is parallel code |
4873 &buf_scan_non_heap_roots, | 4942 SharedHeap::SO_None, |
4874 scan_klasses | 4943 &buf_scan_non_heap_roots, |
4875 ); | 4944 &buf_scan_non_heap_weak_roots, |
4945 scan_strong_clds, | |
4946 // Initial Mark handles the weak CLDs separately. | |
4947 (during_im ? NULL : scan_weak_clds), | |
4948 scan_strong_code); | |
4876 | 4949 |
4877 // Now the CM ref_processor roots. | 4950 // Now the CM ref_processor roots. |
4878 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { | 4951 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { |
4879 // We need to treat the discovered reference lists of the | 4952 // We need to treat the discovered reference lists of the |
4880 // concurrent mark ref processor as roots and keep entries | 4953 // concurrent mark ref processor as roots and keep entries |
4881 // (which are added by the marking threads) on them live | 4954 // (which are added by the marking threads) on them live |
4882 // until they can be processed at the end of marking. | 4955 // until they can be processed at the end of marking. |
4883 ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots); | 4956 ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots); |
4884 } | 4957 } |
4885 | 4958 |
4959 if (during_im) { | |
4960 // Barrier to make sure all workers passed | |
4961 // the strong CLD and strong nmethods phases. | |
4962 active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads()); | |
4963 | |
4964 // Now take the complement of the strong CLDs. | |
4965 ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds); | |
4966 } | |
4967 | |
4886 // Finish up any enqueued closure apps (attributed as object copy time). | 4968 // Finish up any enqueued closure apps (attributed as object copy time). |
4887 buf_scan_non_heap_roots.done(); | 4969 buf_scan_non_heap_roots.done(); |
4888 | 4970 buf_scan_non_heap_weak_roots.done(); |
4889 double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds(); | 4971 |
4972 double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds() | |
4973 + buf_scan_non_heap_weak_roots.closure_app_seconds(); | |
4890 | 4974 |
4891 g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); | 4975 g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); |
4892 | 4976 |
4893 double ext_root_time_ms = | 4977 double ext_root_time_ms = |
4894 ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0; | 4978 ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0; |
4908 satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0; | 4992 satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0; |
4909 } | 4993 } |
4910 } | 4994 } |
4911 g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms); | 4995 g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms); |
4912 | 4996 |
4913 // If this is an initial mark pause, and we're not scanning | |
4914 // the entire code cache, we need to mark the oops in the | |
4915 // strong code root lists for the regions that are not in | |
4916 // the collection set. | |
4917 // Note all threads participate in this set of root tasks. | |
4918 double mark_strong_code_roots_ms = 0.0; | |
4919 if (g1_policy()->during_initial_mark_pause() && !(so & SO_AllCodeCache)) { | |
4920 double mark_strong_roots_start = os::elapsedTime(); | |
4921 mark_strong_code_roots(worker_i); | |
4922 mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0; | |
4923 } | |
4924 g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms); | |
4925 | |
4926 // Now scan the complement of the collection set. | 4997 // Now scan the complement of the collection set. |
4927 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */); | 4998 MarkingCodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots, CodeBlobToOopClosure::FixRelocations); |
4928 g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i); | 4999 |
5000 g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i); | |
4929 | 5001 |
4930 _process_strong_tasks->all_tasks_completed(); | 5002 _process_strong_tasks->all_tasks_completed(); |
4931 } | 5003 } |
4932 | 5004 |
4933 class G1StringSymbolTableUnlinkTask : public AbstractGangTask { | 5005 class G1StringSymbolTableUnlinkTask : public AbstractGangTask { |
4945 int _symbols_removed; | 5017 int _symbols_removed; |
4946 | 5018 |
4947 bool _do_in_parallel; | 5019 bool _do_in_parallel; |
4948 public: | 5020 public: |
4949 G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) : | 5021 G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) : |
4950 AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive), | 5022 AbstractGangTask("String/Symbol Unlinking"), |
5023 _is_alive(is_alive), | |
4951 _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()), | 5024 _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()), |
4952 _process_strings(process_strings), _strings_processed(0), _strings_removed(0), | 5025 _process_strings(process_strings), _strings_processed(0), _strings_removed(0), |
4953 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) { | 5026 _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) { |
4954 | 5027 |
4955 _initial_string_table_size = StringTable::the_table()->table_size(); | 5028 _initial_string_table_size = StringTable::the_table()->table_size(); |
4967 err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT, | 5040 err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT, |
4968 StringTable::parallel_claimed_index(), _initial_string_table_size)); | 5041 StringTable::parallel_claimed_index(), _initial_string_table_size)); |
4969 guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size, | 5042 guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size, |
4970 err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT, | 5043 err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT, |
4971 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size)); | 5044 SymbolTable::parallel_claimed_index(), _initial_symbol_table_size)); |
5045 | |
5046 if (G1TraceStringSymbolTableScrubbing) { | |
5047 gclog_or_tty->print_cr("Cleaned string and symbol table, " | |
5048 "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, " | |
5049 "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed", | |
5050 strings_processed(), strings_removed(), | |
5051 symbols_processed(), symbols_removed()); | |
5052 } | |
4972 } | 5053 } |
4973 | 5054 |
4974 void work(uint worker_id) { | 5055 void work(uint worker_id) { |
4975 if (_do_in_parallel) { | 5056 if (_do_in_parallel) { |
4976 int strings_processed = 0; | 5057 int strings_processed = 0; |
5002 | 5083 |
5003 size_t symbols_processed() const { return (size_t)_symbols_processed; } | 5084 size_t symbols_processed() const { return (size_t)_symbols_processed; } |
5004 size_t symbols_removed() const { return (size_t)_symbols_removed; } | 5085 size_t symbols_removed() const { return (size_t)_symbols_removed; } |
5005 }; | 5086 }; |
5006 | 5087 |
5007 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, | 5088 class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC { |
5008 bool process_strings, bool process_symbols) { | 5089 private: |
5090 static Monitor* _lock; | |
5091 | |
5092 BoolObjectClosure* const _is_alive; | |
5093 const bool _unloading_occurred; | |
5094 const uint _num_workers; | |
5095 | |
5096 // Variables used to claim nmethods. | |
5097 nmethod* _first_nmethod; | |
5098 volatile nmethod* _claimed_nmethod; | |
5099 | |
5100 // The list of nmethods that need to be processed by the second pass. | |
5101 volatile nmethod* _postponed_list; | |
5102 volatile uint _num_entered_barrier; | |
5103 | |
5104 public: | |
5105 G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) : | |
5106 _is_alive(is_alive), | |
5107 _unloading_occurred(unloading_occurred), | |
5108 _num_workers(num_workers), | |
5109 _first_nmethod(NULL), | |
5110 _claimed_nmethod(NULL), | |
5111 _postponed_list(NULL), | |
5112 _num_entered_barrier(0) | |
5113 { | |
5114 nmethod::increase_unloading_clock(); | |
5115 _first_nmethod = CodeCache::alive_nmethod(CodeCache::first()); | |
5116 _claimed_nmethod = (volatile nmethod*)_first_nmethod; | |
5117 } | |
5118 | |
5119 ~G1CodeCacheUnloadingTask() { | |
5120 CodeCache::verify_clean_inline_caches(); | |
5121 | |
5122 CodeCache::set_needs_cache_clean(false); | |
5123 guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be"); | |
5124 | |
5125 CodeCache::verify_icholder_relocations(); | |
5126 } | |
5127 | |
5128 private: | |
5129 void add_to_postponed_list(nmethod* nm) { | |
5130 nmethod* old; | |
5131 do { | |
5132 old = (nmethod*)_postponed_list; | |
5133 nm->set_unloading_next(old); | |
5134 } while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old); | |
5135 } | |
5136 | |
5137 void clean_nmethod(nmethod* nm) { | |
5138 bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred); | |
5139 | |
5140 if (postponed) { | |
5141 // This nmethod referred to an nmethod that has not been cleaned/unloaded yet. | |
5142 add_to_postponed_list(nm); | |
5143 } | |
5144 | |
5145 // Mark that this thread has been cleaned/unloaded. | |
5146 // After this call, it will be safe to ask if this nmethod was unloaded or not. | |
5147 nm->set_unloading_clock(nmethod::global_unloading_clock()); | |
5148 } | |
5149 | |
5150 void clean_nmethod_postponed(nmethod* nm) { | |
5151 nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred); | |
5152 } | |
5153 | |
5154 static const int MaxClaimNmethods = 16; | |
5155 | |
5156 void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) { | |
5157 nmethod* first; | |
5158 nmethod* last; | |
5159 | |
5160 do { | |
5161 *num_claimed_nmethods = 0; | |
5162 | |
5163 first = last = (nmethod*)_claimed_nmethod; | |
5164 | |
5165 if (first != NULL) { | |
5166 for (int i = 0; i < MaxClaimNmethods; i++) { | |
5167 last = CodeCache::alive_nmethod(CodeCache::next(last)); | |
5168 | |
5169 if (last == NULL) { | |
5170 break; | |
5171 } | |
5172 | |
5173 claimed_nmethods[i] = last; | |
5174 (*num_claimed_nmethods)++; | |
5175 } | |
5176 } | |
5177 | |
5178 } while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first); | |
5179 } | |
5180 | |
5181 nmethod* claim_postponed_nmethod() { | |
5182 nmethod* claim; | |
5183 nmethod* next; | |
5184 | |
5185 do { | |
5186 claim = (nmethod*)_postponed_list; | |
5187 if (claim == NULL) { | |
5188 return NULL; | |
5189 } | |
5190 | |
5191 next = claim->unloading_next(); | |
5192 | |
5193 } while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim); | |
5194 | |
5195 return claim; | |
5196 } | |
5197 | |
5198 public: | |
5199 // Mark that we're done with the first pass of nmethod cleaning. | |
5200 void barrier_mark(uint worker_id) { | |
5201 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); | |
5202 _num_entered_barrier++; | |
5203 if (_num_entered_barrier == _num_workers) { | |
5204 ml.notify_all(); | |
5205 } | |
5206 } | |
5207 | |
5208 // See if we have to wait for the other workers to | |
5209 // finish their first-pass nmethod cleaning work. | |
5210 void barrier_wait(uint worker_id) { | |
5211 if (_num_entered_barrier < _num_workers) { | |
5212 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); | |
5213 while (_num_entered_barrier < _num_workers) { | |
5214 ml.wait(Mutex::_no_safepoint_check_flag, 0, false); | |
5215 } | |
5216 } | |
5217 } | |
5218 | |
5219 // Cleaning and unloading of nmethods. Some work has to be postponed | |
5220 // to the second pass, when we know which nmethods survive. | |
5221 void work_first_pass(uint worker_id) { | |
5222 // The first nmethods is claimed by the first worker. | |
5223 if (worker_id == 0 && _first_nmethod != NULL) { | |
5224 clean_nmethod(_first_nmethod); | |
5225 _first_nmethod = NULL; | |
5226 } | |
5227 | |
5228 int num_claimed_nmethods; | |
5229 nmethod* claimed_nmethods[MaxClaimNmethods]; | |
5230 | |
5231 while (true) { | |
5232 claim_nmethods(claimed_nmethods, &num_claimed_nmethods); | |
5233 | |
5234 if (num_claimed_nmethods == 0) { | |
5235 break; | |
5236 } | |
5237 | |
5238 for (int i = 0; i < num_claimed_nmethods; i++) { | |
5239 clean_nmethod(claimed_nmethods[i]); | |
5240 } | |
5241 } | |
5242 } | |
5243 | |
5244 void work_second_pass(uint worker_id) { | |
5245 nmethod* nm; | |
5246 // Take care of postponed nmethods. | |
5247 while ((nm = claim_postponed_nmethod()) != NULL) { | |
5248 clean_nmethod_postponed(nm); | |
5249 } | |
5250 } | |
5251 }; | |
5252 | |
5253 Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock"); | |
5254 | |
5255 class G1KlassCleaningTask : public StackObj { | |
5256 BoolObjectClosure* _is_alive; | |
5257 volatile jint _clean_klass_tree_claimed; | |
5258 ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator; | |
5259 | |
5260 public: | |
5261 G1KlassCleaningTask(BoolObjectClosure* is_alive) : | |
5262 _is_alive(is_alive), | |
5263 _clean_klass_tree_claimed(0), | |
5264 _klass_iterator() { | |
5265 } | |
5266 | |
5267 private: | |
5268 bool claim_clean_klass_tree_task() { | |
5269 if (_clean_klass_tree_claimed) { | |
5270 return false; | |
5271 } | |
5272 | |
5273 return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0; | |
5274 } | |
5275 | |
5276 InstanceKlass* claim_next_klass() { | |
5277 Klass* klass; | |
5278 do { | |
5279 klass =_klass_iterator.next_klass(); | |
5280 } while (klass != NULL && !klass->oop_is_instance()); | |
5281 | |
5282 return (InstanceKlass*)klass; | |
5283 } | |
5284 | |
5285 public: | |
5286 | |
5287 void clean_klass(InstanceKlass* ik) { | |
5288 ik->clean_implementors_list(_is_alive); | |
5289 ik->clean_method_data(_is_alive); | |
5290 | |
5291 // G1 specific cleanup work that has | |
5292 // been moved here to be done in parallel. | |
5293 ik->clean_dependent_nmethods(); | |
5294 } | |
5295 | |
5296 void work() { | |
5297 ResourceMark rm; | |
5298 | |
5299 // One worker will clean the subklass/sibling klass tree. | |
5300 if (claim_clean_klass_tree_task()) { | |
5301 Klass::clean_subklass_tree(_is_alive); | |
5302 } | |
5303 | |
5304 // All workers will help cleaning the classes, | |
5305 InstanceKlass* klass; | |
5306 while ((klass = claim_next_klass()) != NULL) { | |
5307 clean_klass(klass); | |
5308 } | |
5309 } | |
5310 }; | |
5311 | |
5312 // To minimize the remark pause times, the tasks below are done in parallel. | |
5313 class G1ParallelCleaningTask : public AbstractGangTask { | |
5314 private: | |
5315 G1StringSymbolTableUnlinkTask _string_symbol_task; | |
5316 G1CodeCacheUnloadingTask _code_cache_task; | |
5317 G1KlassCleaningTask _klass_cleaning_task; | |
5318 | |
5319 public: | |
5320 // The constructor is run in the VMThread. | |
5321 G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) : | |
5322 AbstractGangTask("Parallel Cleaning"), | |
5323 _string_symbol_task(is_alive, process_strings, process_symbols), | |
5324 _code_cache_task(num_workers, is_alive, unloading_occurred), | |
5325 _klass_cleaning_task(is_alive) { | |
5326 } | |
5327 | |
5328 // The parallel work done by all worker threads. | |
5329 void work(uint worker_id) { | |
5330 // Do first pass of code cache cleaning. | |
5331 _code_cache_task.work_first_pass(worker_id); | |
5332 | |
5333 // Let the threads, mark that the first pass is done. | |
5334 _code_cache_task.barrier_mark(worker_id); | |
5335 | |
5336 // Clean the Strings and Symbols. | |
5337 _string_symbol_task.work(worker_id); | |
5338 | |
5339 // Wait for all workers to finish the first code cache cleaning pass. | |
5340 _code_cache_task.barrier_wait(worker_id); | |
5341 | |
5342 // Do the second code cache cleaning work, which realize on | |
5343 // the liveness information gathered during the first pass. | |
5344 _code_cache_task.work_second_pass(worker_id); | |
5345 | |
5346 // Clean all klasses that were not unloaded. | |
5347 _klass_cleaning_task.work(); | |
5348 } | |
5349 }; | |
5350 | |
5351 | |
5352 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive, | |
5353 bool process_strings, | |
5354 bool process_symbols, | |
5355 bool class_unloading_occurred) { | |
5009 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? | 5356 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? |
5010 _g1h->workers()->active_workers() : 1); | 5357 workers()->active_workers() : 1); |
5011 | 5358 |
5012 G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols); | 5359 G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols, |
5360 n_workers, class_unloading_occurred); | |
5013 if (G1CollectedHeap::use_parallel_gc_threads()) { | 5361 if (G1CollectedHeap::use_parallel_gc_threads()) { |
5014 set_par_threads(n_workers); | 5362 set_par_threads(n_workers); |
5015 workers()->run_task(&g1_unlink_task); | 5363 workers()->run_task(&g1_unlink_task); |
5016 set_par_threads(0); | 5364 set_par_threads(0); |
5017 } else { | 5365 } else { |
5018 g1_unlink_task.work(0); | 5366 g1_unlink_task.work(0); |
5019 } | 5367 } |
5020 if (G1TraceStringSymbolTableScrubbing) { | 5368 } |
5021 gclog_or_tty->print_cr("Cleaned string and symbol table, " | 5369 |
5022 "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, " | 5370 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, |
5023 "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed", | 5371 bool process_strings, bool process_symbols) { |
5024 g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(), | 5372 { |
5025 g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed()); | 5373 uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? |
5374 _g1h->workers()->active_workers() : 1); | |
5375 G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols); | |
5376 if (G1CollectedHeap::use_parallel_gc_threads()) { | |
5377 set_par_threads(n_workers); | |
5378 workers()->run_task(&g1_unlink_task); | |
5379 set_par_threads(0); | |
5380 } else { | |
5381 g1_unlink_task.work(0); | |
5382 } | |
5026 } | 5383 } |
5027 | 5384 |
5028 if (G1StringDedup::is_enabled()) { | 5385 if (G1StringDedup::is_enabled()) { |
5029 G1StringDedup::unlink(is_alive); | 5386 G1StringDedup::unlink(is_alive); |
5030 } | 5387 } |
5613 double start_par_time_sec = os::elapsedTime(); | 5970 double start_par_time_sec = os::elapsedTime(); |
5614 double end_par_time_sec; | 5971 double end_par_time_sec; |
5615 | 5972 |
5616 { | 5973 { |
5617 StrongRootsScope srs(this); | 5974 StrongRootsScope srs(this); |
5975 // InitialMark needs claim bits to keep track of the marked-through CLDs. | |
5976 if (g1_policy()->during_initial_mark_pause()) { | |
5977 ClassLoaderDataGraph::clear_claimed_marks(); | |
5978 } | |
5618 | 5979 |
5619 if (G1CollectedHeap::use_parallel_gc_threads()) { | 5980 if (G1CollectedHeap::use_parallel_gc_threads()) { |
5620 // The individual threads will set their evac-failure closures. | 5981 // The individual threads will set their evac-failure closures. |
5621 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr(); | 5982 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr(); |
5622 // These tasks use ShareHeap::_process_strong_tasks | 5983 // These tasks use ShareHeap::_process_strong_tasks |
6564 G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent); | 6925 G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent); |
6565 double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0; | 6926 double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0; |
6566 g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms); | 6927 g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms); |
6567 } | 6928 } |
6568 | 6929 |
6569 // Mark all the code roots that point into regions *not* in the | |
6570 // collection set. | |
6571 // | |
6572 // Note we do not want to use a "marking" CodeBlobToOopClosure while | |
6573 // walking the the code roots lists of regions not in the collection | |
6574 // set. Suppose we have an nmethod (M) that points to objects in two | |
6575 // separate regions - one in the collection set (R1) and one not (R2). | |
6576 // Using a "marking" CodeBlobToOopClosure here would result in "marking" | |
6577 // nmethod M when walking the code roots for R1. When we come to scan | |
6578 // the code roots for R2, we would see that M is already marked and it | |
6579 // would be skipped and the objects in R2 that are referenced from M | |
6580 // would not be evacuated. | |
6581 | |
6582 class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure { | |
6583 | |
6584 class MarkStrongCodeRootOopClosure: public OopClosure { | |
6585 ConcurrentMark* _cm; | |
6586 HeapRegion* _hr; | |
6587 uint _worker_id; | |
6588 | |
6589 template <class T> void do_oop_work(T* p) { | |
6590 T heap_oop = oopDesc::load_heap_oop(p); | |
6591 if (!oopDesc::is_null(heap_oop)) { | |
6592 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); | |
6593 // Only mark objects in the region (which is assumed | |
6594 // to be not in the collection set). | |
6595 if (_hr->is_in(obj)) { | |
6596 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id); | |
6597 } | |
6598 } | |
6599 } | |
6600 | |
6601 public: | |
6602 MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) : | |
6603 _cm(cm), _hr(hr), _worker_id(worker_id) { | |
6604 assert(!_hr->in_collection_set(), "sanity"); | |
6605 } | |
6606 | |
6607 void do_oop(narrowOop* p) { do_oop_work(p); } | |
6608 void do_oop(oop* p) { do_oop_work(p); } | |
6609 }; | |
6610 | |
6611 MarkStrongCodeRootOopClosure _oop_cl; | |
6612 | |
6613 public: | |
6614 MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id): | |
6615 _oop_cl(cm, hr, worker_id) {} | |
6616 | |
6617 void do_code_blob(CodeBlob* cb) { | |
6618 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); | |
6619 if (nm != NULL) { | |
6620 nm->oops_do(&_oop_cl); | |
6621 } | |
6622 } | |
6623 }; | |
6624 | |
6625 class MarkStrongCodeRootsHRClosure: public HeapRegionClosure { | |
6626 G1CollectedHeap* _g1h; | |
6627 uint _worker_id; | |
6628 | |
6629 public: | |
6630 MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) : | |
6631 _g1h(g1h), _worker_id(worker_id) {} | |
6632 | |
6633 bool doHeapRegion(HeapRegion *hr) { | |
6634 HeapRegionRemSet* hrrs = hr->rem_set(); | |
6635 if (hr->continuesHumongous()) { | |
6636 // Code roots should never be attached to a continuation of a humongous region | |
6637 assert(hrrs->strong_code_roots_list_length() == 0, | |
6638 err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT | |
6639 " starting at "HR_FORMAT", but has "SIZE_FORMAT, | |
6640 HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()), | |
6641 hrrs->strong_code_roots_list_length())); | |
6642 return false; | |
6643 } | |
6644 | |
6645 if (hr->in_collection_set()) { | |
6646 // Don't mark code roots into regions in the collection set here. | |
6647 // They will be marked when we scan them. | |
6648 return false; | |
6649 } | |
6650 | |
6651 MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id); | |
6652 hr->strong_code_roots_do(&cb_cl); | |
6653 return false; | |
6654 } | |
6655 }; | |
6656 | |
6657 void G1CollectedHeap::mark_strong_code_roots(uint worker_id) { | |
6658 MarkStrongCodeRootsHRClosure cl(this, worker_id); | |
6659 if (G1CollectedHeap::use_parallel_gc_threads()) { | |
6660 heap_region_par_iterate_chunked(&cl, | |
6661 worker_id, | |
6662 workers()->active_workers(), | |
6663 HeapRegion::ParMarkRootClaimValue); | |
6664 } else { | |
6665 heap_region_iterate(&cl); | |
6666 } | |
6667 } | |
6668 | |
6669 class RebuildStrongCodeRootClosure: public CodeBlobClosure { | 6930 class RebuildStrongCodeRootClosure: public CodeBlobClosure { |
6670 G1CollectedHeap* _g1h; | 6931 G1CollectedHeap* _g1h; |
6671 | 6932 |
6672 public: | 6933 public: |
6673 RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) : | 6934 RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) : |