comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp @ 2134:b158bed62ef5

6994297: G1: do first-level slow-path allocations with a CAS Summary: First attempt to allocate out the current alloc region using a CAS instead of taking the Heap_lock (first level of G1's slow allocation path). Only if that fails and it's necessary to replace the current alloc region take the Heap_lock (that's the second level of G1's slow allocation path). Reviewed-by: johnc, brutisso, ysr
author tonyp
date Wed, 12 Jan 2011 16:34:25 -0500
parents 016a3628c885
children 7e37af9d69ef
comparison
equal deleted inserted replaced
2133:2250ee17e258 2134:b158bed62ef5
1 /* 1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
61 61
62 // See the comment in the .hpp file about the locking protocol and 62 // See the comment in the .hpp file about the locking protocol and
63 // assumptions of this method (and other related ones). 63 // assumptions of this method (and other related ones).
64 inline HeapWord* 64 inline HeapWord*
65 G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region, 65 G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
66 size_t word_size) { 66 size_t word_size,
67 assert_heap_locked_and_not_at_safepoint(); 67 bool with_heap_lock) {
68 assert_not_at_safepoint();
69 assert(with_heap_lock == Heap_lock->owned_by_self(),
70 "with_heap_lock and Heap_lock->owned_by_self() should be a tautology");
68 assert(cur_alloc_region != NULL, "pre-condition of the method"); 71 assert(cur_alloc_region != NULL, "pre-condition of the method");
69 assert(cur_alloc_region == _cur_alloc_region, "pre-condition of the method");
70 assert(cur_alloc_region->is_young(), 72 assert(cur_alloc_region->is_young(),
71 "we only support young current alloc regions"); 73 "we only support young current alloc regions");
72 assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() " 74 assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
73 "should not be used for humongous allocations"); 75 "should not be used for humongous allocations");
74 assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug."); 76 assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug.");
75 77
76 assert(!cur_alloc_region->is_empty(), 78 assert(!cur_alloc_region->is_empty(),
77 err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty", 79 err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
78 cur_alloc_region->bottom(), cur_alloc_region->end())); 80 cur_alloc_region->bottom(), cur_alloc_region->end()));
79 // This allocate method does BOT updates and we don't need them in 81 HeapWord* result = cur_alloc_region->par_allocate_no_bot_updates(word_size);
80 // the young generation. This will be fixed in the near future by
81 // CR 6994297.
82 HeapWord* result = cur_alloc_region->allocate(word_size);
83 if (result != NULL) { 82 if (result != NULL) {
84 assert(is_in(result), "result should be in the heap"); 83 assert(is_in(result), "result should be in the heap");
85 Heap_lock->unlock(); 84
86 85 if (with_heap_lock) {
86 Heap_lock->unlock();
87 }
88 assert_heap_not_locked();
87 // Do the dirtying after we release the Heap_lock. 89 // Do the dirtying after we release the Heap_lock.
88 dirty_young_block(result, word_size); 90 dirty_young_block(result, word_size);
89 return result; 91 return result;
90 } 92 }
91 93
92 assert_heap_locked(); 94 if (with_heap_lock) {
95 assert_heap_locked();
96 } else {
97 assert_heap_not_locked();
98 }
93 return NULL; 99 return NULL;
94 } 100 }
95 101
96 // See the comment in the .hpp file about the locking protocol and 102 // See the comment in the .hpp file about the locking protocol and
97 // assumptions of this method (and other related ones). 103 // assumptions of this method (and other related ones).
98 inline HeapWord* 104 inline HeapWord*
99 G1CollectedHeap::attempt_allocation(size_t word_size) { 105 G1CollectedHeap::attempt_allocation(size_t word_size) {
100 assert_heap_locked_and_not_at_safepoint(); 106 assert_heap_not_locked_and_not_at_safepoint();
101 assert(!isHumongous(word_size), "attempt_allocation() should not be called " 107 assert(!isHumongous(word_size), "attempt_allocation() should not be called "
102 "for humongous allocation requests"); 108 "for humongous allocation requests");
103 109
104 HeapRegion* cur_alloc_region = _cur_alloc_region; 110 HeapRegion* cur_alloc_region = _cur_alloc_region;
105 if (cur_alloc_region != NULL) { 111 if (cur_alloc_region != NULL) {
106 HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region, 112 HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
107 word_size); 113 word_size,
114 false /* with_heap_lock */);
115 assert_heap_not_locked();
108 if (result != NULL) { 116 if (result != NULL) {
109 assert_heap_not_locked();
110 return result; 117 return result;
111 } 118 }
112 119 }
113 assert_heap_locked(); 120
114 121 // Our attempt to allocate lock-free failed as the current
115 // Since we couldn't successfully allocate into it, retire the 122 // allocation region is either NULL or full. So, we'll now take the
116 // current alloc region. 123 // Heap_lock and retry.
117 retire_cur_alloc_region(cur_alloc_region); 124 Heap_lock->lock();
118 } 125
119 126 HeapWord* result = attempt_allocation_locked(word_size);
120 // Try to get a new region and allocate out of it
121 HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
122 false, /* at_safepoint */
123 true, /* do_dirtying */
124 false /* can_expand */);
125 if (result != NULL) { 127 if (result != NULL) {
126 assert_heap_not_locked(); 128 assert_heap_not_locked();
127 return result; 129 return result;
128 } 130 }
129 131
141 143
142 // The region is guaranteed to be young 144 // The region is guaranteed to be young
143 g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region); 145 g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
144 _summary_bytes_used += cur_alloc_region->used(); 146 _summary_bytes_used += cur_alloc_region->used();
145 _cur_alloc_region = NULL; 147 _cur_alloc_region = NULL;
148 }
149
150 inline HeapWord*
151 G1CollectedHeap::attempt_allocation_locked(size_t word_size) {
152 assert_heap_locked_and_not_at_safepoint();
153 assert(!isHumongous(word_size), "attempt_allocation_locked() "
154 "should not be called for humongous allocation requests");
155
156 // First, reread the current alloc region and retry the allocation
157 // in case somebody replaced it while we were waiting to get the
158 // Heap_lock.
159 HeapRegion* cur_alloc_region = _cur_alloc_region;
160 if (cur_alloc_region != NULL) {
161 HeapWord* result = allocate_from_cur_alloc_region(
162 cur_alloc_region, word_size,
163 true /* with_heap_lock */);
164 if (result != NULL) {
165 assert_heap_not_locked();
166 return result;
167 }
168
169 // We failed to allocate out of the current alloc region, so let's
170 // retire it before getting a new one.
171 retire_cur_alloc_region(cur_alloc_region);
172 }
173
174 assert_heap_locked();
175 // Try to get a new region and allocate out of it
176 HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
177 false, /* at_safepoint */
178 true, /* do_dirtying */
179 false /* can_expand */);
180 if (result != NULL) {
181 assert_heap_not_locked();
182 return result;
183 }
184
185 assert_heap_locked();
186 return NULL;
146 } 187 }
147 188
148 // It dirties the cards that cover the block so that so that the post 189 // It dirties the cards that cover the block so that so that the post
149 // write barrier never queues anything when updating objects on this 190 // write barrier never queues anything when updating objects on this
150 // block. It is assumed (and in fact we assert) that the block 191 // block. It is assumed (and in fact we assert) that the block