comparison src/share/vm/gc_implementation/g1/vm_operations_g1.cpp @ 1973:631f79e71e90

6974966: G1: unnecessary direct-to-old allocations Summary: This change revamps the slow allocation path of G1. Improvements include the following: a) Allocations directly to old regions are now totally banned. G1 now only allows allocations out of young regions (with the only exception being humongous regions). b) The thread that allocates a new region (which is now guaranteed to be young) does not dirty all its cards. Each thread that successfully allocates out of a young region is now responsible for dirtying the cards that corresponding to the "block" that just got allocated. c) allocate_new_tlab() and mem_allocate() are now implemented differently and TLAB allocations are only done by allocate_new_tlab(). d) If a thread schedules an evacuation pause in order to satisfy an allocation request, it will perform the allocation at the end of the safepoint so that the thread that initiated the GC also gets "first pick" of any space made available by the GC. e) If a thread is unable to allocate a humongous object it will schedule an evacuation pause in case it reclaims enough regions so that the humongous allocation can be satisfied aftewards. f) The G1 policy is more careful to set the young list target length to be the survivor number +1. g) Lots of code tidy up, removal, refactoring to make future changes easier. Reviewed-by: johnc, ysr
author tonyp
date Tue, 24 Aug 2010 17:24:33 -0400
parents f95d63e2154a
children 7246a374a9f2
comparison
equal deleted inserted replaced
1972:f95d63e2154a 1973:631f79e71e90
25 #include "precompiled.hpp" 25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
27 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 27 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
28 #include "gc_implementation/g1/vm_operations_g1.hpp" 28 #include "gc_implementation/g1/vm_operations_g1.hpp"
29 #include "gc_implementation/shared/isGCActiveMark.hpp" 29 #include "gc_implementation/shared/isGCActiveMark.hpp"
30 #include "gc_implementation/g1/vm_operations_g1.hpp"
30 #include "runtime/interfaceSupport.hpp" 31 #include "runtime/interfaceSupport.hpp"
32
33 VM_G1CollectForAllocation::VM_G1CollectForAllocation(
34 unsigned int gc_count_before,
35 size_t word_size)
36 : VM_G1OperationWithAllocRequest(gc_count_before, word_size) {
37 guarantee(word_size > 0, "an allocation should always be requested");
38 }
31 39
32 void VM_G1CollectForAllocation::doit() { 40 void VM_G1CollectForAllocation::doit() {
33 JvmtiGCForAllocationMarker jgcm; 41 JvmtiGCForAllocationMarker jgcm;
34 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 42 G1CollectedHeap* g1h = G1CollectedHeap::heap();
35 _res = g1h->satisfy_failed_allocation(_size); 43 _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
36 assert(g1h->is_in_or_null(_res), "result not in heap"); 44 assert(_result == NULL || _pause_succeeded,
45 "if we get back a result, the pause should have succeeded");
37 } 46 }
38 47
39 void VM_G1CollectFull::doit() { 48 void VM_G1CollectFull::doit() {
40 JvmtiGCFullMarker jgcm; 49 JvmtiGCFullMarker jgcm;
41 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 50 G1CollectedHeap* g1h = G1CollectedHeap::heap();
42 GCCauseSetter x(g1h, _gc_cause); 51 GCCauseSetter x(g1h, _gc_cause);
43 g1h->do_full_collection(false /* clear_all_soft_refs */); 52 g1h->do_full_collection(false /* clear_all_soft_refs */);
44 } 53 }
45 54
55 VM_G1IncCollectionPause::VM_G1IncCollectionPause(
56 unsigned int gc_count_before,
57 size_t word_size,
58 bool should_initiate_conc_mark,
59 double target_pause_time_ms,
60 GCCause::Cause gc_cause)
61 : VM_G1OperationWithAllocRequest(gc_count_before, word_size),
62 _should_initiate_conc_mark(should_initiate_conc_mark),
63 _target_pause_time_ms(target_pause_time_ms),
64 _full_collections_completed_before(0) {
65 guarantee(target_pause_time_ms > 0.0,
66 err_msg("target_pause_time_ms = %1.6lf should be positive",
67 target_pause_time_ms));
68 guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause,
69 "we can only request an allocation if the GC cause is for "
70 "an incremental GC pause");
71 _gc_cause = gc_cause;
72 }
73
46 void VM_G1IncCollectionPause::doit() { 74 void VM_G1IncCollectionPause::doit() {
47 JvmtiGCForAllocationMarker jgcm; 75 JvmtiGCForAllocationMarker jgcm;
48 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 76 G1CollectedHeap* g1h = G1CollectedHeap::heap();
49 assert(!_should_initiate_conc_mark || 77 assert(!_should_initiate_conc_mark ||
50 ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || 78 ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
51 (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)), 79 (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
52 "only a GC locker or a System.gc() induced GC should start a cycle"); 80 "only a GC locker or a System.gc() induced GC should start a cycle");
81
82 if (_word_size > 0) {
83 // An allocation has been requested. So, try to do that first.
84 _result = g1h->attempt_allocation_at_safepoint(_word_size,
85 false /* expect_null_cur_alloc_region */);
86 if (_result != NULL) {
87 // If we can successfully allocate before we actually do the
88 // pause then we will consider this pause successful.
89 _pause_succeeded = true;
90 return;
91 }
92 }
53 93
54 GCCauseSetter x(g1h, _gc_cause); 94 GCCauseSetter x(g1h, _gc_cause);
55 if (_should_initiate_conc_mark) { 95 if (_should_initiate_conc_mark) {
56 // It's safer to read full_collections_completed() here, given 96 // It's safer to read full_collections_completed() here, given
57 // that noone else will be updating it concurrently. Since we'll 97 // that noone else will be updating it concurrently. Since we'll
61 101
62 // At this point we are supposed to start a concurrent cycle. We 102 // At this point we are supposed to start a concurrent cycle. We
63 // will do so if one is not already in progress. 103 // will do so if one is not already in progress.
64 bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(); 104 bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
65 } 105 }
66 g1h->do_collection_pause_at_safepoint(_target_pause_time_ms); 106
107 _pause_succeeded =
108 g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
109 if (_pause_succeeded && _word_size > 0) {
110 // An allocation had been requested.
111 _result = g1h->attempt_allocation_at_safepoint(_word_size,
112 true /* expect_null_cur_alloc_region */);
113 } else {
114 assert(_result == NULL, "invariant");
115 }
67 } 116 }
68 117
69 void VM_G1IncCollectionPause::doit_epilogue() { 118 void VM_G1IncCollectionPause::doit_epilogue() {
70 VM_GC_Operation::doit_epilogue(); 119 VM_GC_Operation::doit_epilogue();
71 120