Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @ 20804:7848fc12602b
Merge with jdk8u40-b25
author | Gilles Duboscq <gilles.m.duboscq@oracle.com> |
---|---|
date | Tue, 07 Apr 2015 14:58:49 +0200 |
parents | 52b4284cb496 2c6ef90f030a |
children |
comparison
equal
deleted
inserted
replaced
20184:84105dcdb05b | 20804:7848fc12602b |
---|---|
976 { | 976 { |
977 // Update the from & to space pointers in space_info, since they are swapped | 977 // Update the from & to space pointers in space_info, since they are swapped |
978 // at each young gen gc. Do the update unconditionally (even though a | 978 // at each young gen gc. Do the update unconditionally (even though a |
979 // promotion failure does not swap spaces) because an unknown number of minor | 979 // promotion failure does not swap spaces) because an unknown number of minor |
980 // collections will have swapped the spaces an unknown number of times. | 980 // collections will have swapped the spaces an unknown number of times. |
981 GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer); | 981 GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); |
982 ParallelScavengeHeap* heap = gc_heap(); | 982 ParallelScavengeHeap* heap = gc_heap(); |
983 _space_info[from_space_id].set_space(heap->young_gen()->from_space()); | 983 _space_info[from_space_id].set_space(heap->young_gen()->from_space()); |
984 _space_info[to_space_id].set_space(heap->young_gen()->to_space()); | 984 _space_info[to_space_id].set_space(heap->young_gen()->to_space()); |
985 | 985 |
986 pre_gc_values->fill(heap); | 986 pre_gc_values->fill(heap); |
1019 gc_task_manager()->release_all_resources(); | 1019 gc_task_manager()->release_all_resources(); |
1020 } | 1020 } |
1021 | 1021 |
1022 void PSParallelCompact::post_compact() | 1022 void PSParallelCompact::post_compact() |
1023 { | 1023 { |
1024 GCTraceTime tm("post compact", print_phases(), true, &_gc_timer); | 1024 GCTraceTime tm("post compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); |
1025 | 1025 |
1026 for (unsigned int id = old_space_id; id < last_space_id; ++id) { | 1026 for (unsigned int id = old_space_id; id < last_space_id; ++id) { |
1027 // Clear the marking bitmap, summary data and split info. | 1027 // Clear the marking bitmap, summary data and split info. |
1028 clear_data_covering_space(SpaceId(id)); | 1028 clear_data_covering_space(SpaceId(id)); |
1029 // Update top(). Must be done after clearing the bitmap and summary data. | 1029 // Update top(). Must be done after clearing the bitmap and summary data. |
1845 #endif // #ifndef PRODUCT | 1845 #endif // #ifndef PRODUCT |
1846 | 1846 |
1847 void PSParallelCompact::summary_phase(ParCompactionManager* cm, | 1847 void PSParallelCompact::summary_phase(ParCompactionManager* cm, |
1848 bool maximum_compaction) | 1848 bool maximum_compaction) |
1849 { | 1849 { |
1850 GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer); | 1850 GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); |
1851 // trace("2"); | 1851 // trace("2"); |
1852 | 1852 |
1853 #ifdef ASSERT | 1853 #ifdef ASSERT |
1854 if (TraceParallelOldGCMarkingPhase) { | 1854 if (TraceParallelOldGCMarkingPhase) { |
1855 tty->print_cr("add_obj_count=" SIZE_FORMAT " " | 1855 tty->print_cr("add_obj_count=" SIZE_FORMAT " " |
2054 gc_task_manager()->task_idle_workers(); | 2054 gc_task_manager()->task_idle_workers(); |
2055 heap->set_par_threads(gc_task_manager()->active_workers()); | 2055 heap->set_par_threads(gc_task_manager()->active_workers()); |
2056 | 2056 |
2057 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | 2057 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
2058 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | 2058 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
2059 GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); | 2059 GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id()); |
2060 TraceCollectorStats tcs(counters()); | 2060 TraceCollectorStats tcs(counters()); |
2061 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); | 2061 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); |
2062 | 2062 |
2063 if (TraceGen1Time) accumulated_time()->start(); | 2063 if (TraceGen1Time) accumulated_time()->start(); |
2064 | 2064 |
2349 | 2349 |
2350 void PSParallelCompact::marking_phase(ParCompactionManager* cm, | 2350 void PSParallelCompact::marking_phase(ParCompactionManager* cm, |
2351 bool maximum_heap_compaction, | 2351 bool maximum_heap_compaction, |
2352 ParallelOldTracer *gc_tracer) { | 2352 ParallelOldTracer *gc_tracer) { |
2353 // Recursively traverse all live objects and mark them | 2353 // Recursively traverse all live objects and mark them |
2354 GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer); | 2354 GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); |
2355 | 2355 |
2356 ParallelScavengeHeap* heap = gc_heap(); | 2356 ParallelScavengeHeap* heap = gc_heap(); |
2357 uint parallel_gc_threads = heap->gc_task_manager()->workers(); | 2357 uint parallel_gc_threads = heap->gc_task_manager()->workers(); |
2358 uint active_gc_threads = heap->gc_task_manager()->active_workers(); | 2358 uint active_gc_threads = heap->gc_task_manager()->active_workers(); |
2359 TaskQueueSetSuper* qset = ParCompactionManager::region_array(); | 2359 TaskQueueSetSuper* qset = ParCompactionManager::region_array(); |
2364 | 2364 |
2365 // Need new claim bits before marking starts. | 2365 // Need new claim bits before marking starts. |
2366 ClassLoaderDataGraph::clear_claimed_marks(); | 2366 ClassLoaderDataGraph::clear_claimed_marks(); |
2367 | 2367 |
2368 { | 2368 { |
2369 GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer); | 2369 GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); |
2370 | 2370 |
2371 ParallelScavengeHeap::ParStrongRootsScope psrs; | 2371 ParallelScavengeHeap::ParStrongRootsScope psrs; |
2372 | 2372 |
2373 GCTaskQueue* q = GCTaskQueue::create(); | 2373 GCTaskQueue* q = GCTaskQueue::create(); |
2374 | 2374 |
2393 gc_task_manager()->execute_and_wait(q); | 2393 gc_task_manager()->execute_and_wait(q); |
2394 } | 2394 } |
2395 | 2395 |
2396 // Process reference objects found during marking | 2396 // Process reference objects found during marking |
2397 { | 2397 { |
2398 GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer); | 2398 GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); |
2399 | 2399 |
2400 ReferenceProcessorStats stats; | 2400 ReferenceProcessorStats stats; |
2401 if (ref_processor()->processing_is_mt()) { | 2401 if (ref_processor()->processing_is_mt()) { |
2402 RefProcTaskExecutor task_executor; | 2402 RefProcTaskExecutor task_executor; |
2403 stats = ref_processor()->process_discovered_references( | 2403 stats = ref_processor()->process_discovered_references( |
2404 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, | 2404 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, |
2405 &task_executor, &_gc_timer); | 2405 &task_executor, &_gc_timer, _gc_tracer.gc_id()); |
2406 } else { | 2406 } else { |
2407 stats = ref_processor()->process_discovered_references( | 2407 stats = ref_processor()->process_discovered_references( |
2408 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL, | 2408 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL, |
2409 &_gc_timer); | 2409 &_gc_timer, _gc_tracer.gc_id()); |
2410 } | 2410 } |
2411 | 2411 |
2412 gc_tracer->report_gc_reference_stats(stats); | 2412 gc_tracer->report_gc_reference_stats(stats); |
2413 } | 2413 } |
2414 | 2414 |
2415 GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer); | 2415 GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); |
2416 | 2416 |
2417 // This is the point where the entire marking should have completed. | 2417 // This is the point where the entire marking should have completed. |
2418 assert(cm->marking_stacks_empty(), "Marking should have completed"); | 2418 assert(cm->marking_stacks_empty(), "Marking should have completed"); |
2419 | 2419 |
2420 // Follow system dictionary roots and unload classes. | 2420 // Follow system dictionary roots and unload classes. |
2449 }; | 2449 }; |
2450 static PSAlwaysTrueClosure always_true; | 2450 static PSAlwaysTrueClosure always_true; |
2451 | 2451 |
2452 void PSParallelCompact::adjust_roots() { | 2452 void PSParallelCompact::adjust_roots() { |
2453 // Adjust the pointers to reflect the new locations | 2453 // Adjust the pointers to reflect the new locations |
2454 GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer); | 2454 GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); |
2455 | 2455 |
2456 // Need new claim bits when tracing through and adjusting pointers. | 2456 // Need new claim bits when tracing through and adjusting pointers. |
2457 ClassLoaderDataGraph::clear_claimed_marks(); | 2457 ClassLoaderDataGraph::clear_claimed_marks(); |
2458 | 2458 |
2459 // General strong roots. | 2459 // General strong roots. |
2463 Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL); | 2463 Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL); |
2464 ObjectSynchronizer::oops_do(adjust_pointer_closure()); | 2464 ObjectSynchronizer::oops_do(adjust_pointer_closure()); |
2465 FlatProfiler::oops_do(adjust_pointer_closure()); | 2465 FlatProfiler::oops_do(adjust_pointer_closure()); |
2466 Management::oops_do(adjust_pointer_closure()); | 2466 Management::oops_do(adjust_pointer_closure()); |
2467 JvmtiExport::oops_do(adjust_pointer_closure()); | 2467 JvmtiExport::oops_do(adjust_pointer_closure()); |
2468 // SO_AllClasses | |
2469 SystemDictionary::oops_do(adjust_pointer_closure()); | 2468 SystemDictionary::oops_do(adjust_pointer_closure()); |
2470 ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true); | 2469 ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true); |
2471 | 2470 |
2472 // Now adjust pointers in remaining weak roots. (All of which should | 2471 // Now adjust pointers in remaining weak roots. (All of which should |
2473 // have been cleared if they pointed to non-surviving objects.) | 2472 // have been cleared if they pointed to non-surviving objects.) |
2474 // Global (weak) JNI handles | 2473 // Global (weak) JNI handles |
2475 JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); | 2474 JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); |
2476 | 2475 |
2477 CodeCache::oops_do(adjust_pointer_closure()); | 2476 CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations); |
2477 CodeCache::blobs_do(&adjust_from_blobs); | |
2478 StringTable::oops_do(adjust_pointer_closure()); | 2478 StringTable::oops_do(adjust_pointer_closure()); |
2479 ref_processor()->weak_oops_do(adjust_pointer_closure()); | 2479 ref_processor()->weak_oops_do(adjust_pointer_closure()); |
2480 // Roots were visited so references into the young gen in roots | 2480 // Roots were visited so references into the young gen in roots |
2481 // may have been scanned. Process them also. | 2481 // may have been scanned. Process them also. |
2482 // Should the reference processor have a span that excludes | 2482 // Should the reference processor have a span that excludes |
2485 } | 2485 } |
2486 | 2486 |
2487 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, | 2487 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, |
2488 uint parallel_gc_threads) | 2488 uint parallel_gc_threads) |
2489 { | 2489 { |
2490 GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer); | 2490 GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); |
2491 | 2491 |
2492 // Find the threads that are active | 2492 // Find the threads that are active |
2493 unsigned int which = 0; | 2493 unsigned int which = 0; |
2494 | 2494 |
2495 const uint task_count = MAX2(parallel_gc_threads, 1U); | 2495 const uint task_count = MAX2(parallel_gc_threads, 1U); |
2559 | 2559 |
2560 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4 | 2560 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4 |
2561 | 2561 |
2562 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q, | 2562 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q, |
2563 uint parallel_gc_threads) { | 2563 uint parallel_gc_threads) { |
2564 GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer); | 2564 GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); |
2565 | 2565 |
2566 ParallelCompactData& sd = PSParallelCompact::summary_data(); | 2566 ParallelCompactData& sd = PSParallelCompact::summary_data(); |
2567 | 2567 |
2568 // Iterate over all the spaces adding tasks for updating | 2568 // Iterate over all the spaces adding tasks for updating |
2569 // regions in the dense prefix. Assume that 1 gc thread | 2569 // regions in the dense prefix. Assume that 1 gc thread |
2641 | 2641 |
2642 void PSParallelCompact::enqueue_region_stealing_tasks( | 2642 void PSParallelCompact::enqueue_region_stealing_tasks( |
2643 GCTaskQueue* q, | 2643 GCTaskQueue* q, |
2644 ParallelTaskTerminator* terminator_ptr, | 2644 ParallelTaskTerminator* terminator_ptr, |
2645 uint parallel_gc_threads) { | 2645 uint parallel_gc_threads) { |
2646 GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer); | 2646 GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); |
2647 | 2647 |
2648 // Once a thread has drained it's stack, it should try to steal regions from | 2648 // Once a thread has drained it's stack, it should try to steal regions from |
2649 // other threads. | 2649 // other threads. |
2650 if (parallel_gc_threads > 1) { | 2650 if (parallel_gc_threads > 1) { |
2651 for (uint j = 0; j < parallel_gc_threads; j++) { | 2651 for (uint j = 0; j < parallel_gc_threads; j++) { |
2689 } | 2689 } |
2690 #endif // #ifdef ASSERT | 2690 #endif // #ifdef ASSERT |
2691 | 2691 |
2692 void PSParallelCompact::compact() { | 2692 void PSParallelCompact::compact() { |
2693 // trace("5"); | 2693 // trace("5"); |
2694 GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer); | 2694 GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); |
2695 | 2695 |
2696 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | 2696 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
2697 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | 2697 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
2698 PSOldGen* old_gen = heap->old_gen(); | 2698 PSOldGen* old_gen = heap->old_gen(); |
2699 old_gen->start_array()->reset(); | 2699 old_gen->start_array()->reset(); |
2706 enqueue_region_draining_tasks(q, active_gc_threads); | 2706 enqueue_region_draining_tasks(q, active_gc_threads); |
2707 enqueue_dense_prefix_tasks(q, active_gc_threads); | 2707 enqueue_dense_prefix_tasks(q, active_gc_threads); |
2708 enqueue_region_stealing_tasks(q, &terminator, active_gc_threads); | 2708 enqueue_region_stealing_tasks(q, &terminator, active_gc_threads); |
2709 | 2709 |
2710 { | 2710 { |
2711 GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer); | 2711 GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); |
2712 | 2712 |
2713 gc_task_manager()->execute_and_wait(q); | 2713 gc_task_manager()->execute_and_wait(q); |
2714 | 2714 |
2715 #ifdef ASSERT | 2715 #ifdef ASSERT |
2716 // Verify that all regions have been processed before the deferred updates. | 2716 // Verify that all regions have been processed before the deferred updates. |
2720 #endif | 2720 #endif |
2721 } | 2721 } |
2722 | 2722 |
2723 { | 2723 { |
2724 // Update the deferred objects, if any. Any compaction manager can be used. | 2724 // Update the deferred objects, if any. Any compaction manager can be used. |
2725 GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer); | 2725 GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); |
2726 ParCompactionManager* cm = ParCompactionManager::manager_array(0); | 2726 ParCompactionManager* cm = ParCompactionManager::manager_array(0); |
2727 for (unsigned int id = old_space_id; id < last_space_id; ++id) { | 2727 for (unsigned int id = old_space_id; id < last_space_id; ++id) { |
2728 update_deferred_objects(cm, SpaceId(id)); | 2728 update_deferred_objects(cm, SpaceId(id)); |
2729 } | 2729 } |
2730 } | 2730 } |