Mercurial > hg > graal-jvmci-8
annotate src/share/vm/gc_implementation/g1/ptrQueue.hpp @ 1718:bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
Summary: as the title says, sometimes we are allocating humongous objects in young regions and we shouldn't.
Reviewed-by: ysr, johnc
author | tonyp |
---|---|
date | Tue, 17 Aug 2010 14:40:00 -0400 |
parents | c18cbe5936b8 |
children | 6e0aac35bfa9 |
rev | line source |
---|---|
342 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1317
diff
changeset
|
2 * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1317
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1317
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1317
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
25 // There are various techniques that require threads to be able to log | |
26 // addresses. For example, a generational write barrier might log | |
27 // the addresses of modified old-generation objects. This type supports | |
28 // this operation. | |
29 | |
1111 | 30 // The definition of placement operator new(size_t, void*) in the <new>. |
31 #include <new> | |
32 | |
342 | 33 class PtrQueueSet; |
549
fe3d7c11b4b7
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
470
diff
changeset
|
34 class PtrQueue VALUE_OBJ_CLASS_SPEC { |
342 | 35 |
36 protected: | |
37 // The ptr queue set to which this queue belongs. | |
38 PtrQueueSet* _qset; | |
39 | |
40 // Whether updates should be logged. | |
41 bool _active; | |
42 | |
43 // The buffer. | |
44 void** _buf; | |
45 // The index at which an object was last enqueued. Starts at "_sz" | |
46 // (indicating an empty buffer) and goes towards zero. | |
47 size_t _index; | |
48 | |
49 // The size of the buffer. | |
50 size_t _sz; | |
51 | |
52 // If true, the queue is permanent, and doesn't need to deallocate | |
53 // its buffer in the destructor (since that obtains a lock which may not | |
54 // be legally locked by then. | |
55 bool _perm; | |
56 | |
57 // If there is a lock associated with this buffer, this is that lock. | |
58 Mutex* _lock; | |
59 | |
60 PtrQueueSet* qset() { return _qset; } | |
61 | |
62 public: | |
63 // Initialize this queue to contain a null buffer, and be part of the | |
64 // given PtrQueueSet. | |
1317
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1111
diff
changeset
|
65 PtrQueue(PtrQueueSet*, bool perm = false, bool active = false); |
342 | 66 // Release any contained resources. |
441
da9cb4e97a5f
6770608: G1: Mutator thread can flush barrier and satb queues during safepoint
iveresov
parents:
342
diff
changeset
|
67 void flush(); |
da9cb4e97a5f
6770608: G1: Mutator thread can flush barrier and satb queues during safepoint
iveresov
parents:
342
diff
changeset
|
68 // Calls flush() when destroyed. |
da9cb4e97a5f
6770608: G1: Mutator thread can flush barrier and satb queues during safepoint
iveresov
parents:
342
diff
changeset
|
69 ~PtrQueue() { flush(); } |
342 | 70 |
71 // Associate a lock with a ptr queue. | |
72 void set_lock(Mutex* lock) { _lock = lock; } | |
73 | |
74 void reset() { if (_buf != NULL) _index = _sz; } | |
75 | |
76 // Enqueues the given "obj". | |
77 void enqueue(void* ptr) { | |
78 if (!_active) return; | |
79 else enqueue_known_active(ptr); | |
80 } | |
81 | |
1111 | 82 void handle_zero_index(); |
342 | 83 void locking_enqueue_completed_buffer(void** buf); |
84 | |
85 void enqueue_known_active(void* ptr); | |
86 | |
87 size_t size() { | |
88 assert(_sz >= _index, "Invariant."); | |
89 return _buf == NULL ? 0 : _sz - _index; | |
90 } | |
91 | |
92 // Set the "active" property of the queue to "b". An enqueue to an | |
93 // inactive thread is a no-op. Setting a queue to inactive resets its | |
94 // log to the empty state. | |
95 void set_active(bool b) { | |
96 _active = b; | |
97 if (!b && _buf != NULL) { | |
98 _index = _sz; | |
99 } else if (b && _buf != NULL) { | |
100 assert(_index == _sz, "invariant: queues are empty when activated."); | |
101 } | |
102 } | |
103 | |
1317
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1111
diff
changeset
|
104 bool is_active() { return _active; } |
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1111
diff
changeset
|
105 |
342 | 106 static int byte_index_to_index(int ind) { |
107 assert((ind % oopSize) == 0, "Invariant."); | |
108 return ind / oopSize; | |
109 } | |
110 | |
111 static int index_to_byte_index(int byte_ind) { | |
112 return byte_ind * oopSize; | |
113 } | |
114 | |
115 // To support compiler. | |
116 static ByteSize byte_offset_of_index() { | |
117 return byte_offset_of(PtrQueue, _index); | |
118 } | |
119 static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); } | |
120 | |
121 static ByteSize byte_offset_of_buf() { | |
122 return byte_offset_of(PtrQueue, _buf); | |
123 } | |
124 static ByteSize byte_width_of_buf() { return in_ByteSize(sizeof(void*)); } | |
125 | |
126 static ByteSize byte_offset_of_active() { | |
127 return byte_offset_of(PtrQueue, _active); | |
128 } | |
129 static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); } | |
130 | |
131 }; | |
132 | |
1111 | 133 class BufferNode { |
134 size_t _index; | |
135 BufferNode* _next; | |
136 public: | |
137 BufferNode() : _index(0), _next(NULL) { } | |
138 BufferNode* next() const { return _next; } | |
139 void set_next(BufferNode* n) { _next = n; } | |
140 size_t index() const { return _index; } | |
141 void set_index(size_t i) { _index = i; } | |
142 | |
143 // Align the size of the structure to the size of the pointer | |
144 static size_t aligned_size() { | |
145 static const size_t alignment = round_to(sizeof(BufferNode), sizeof(void*)); | |
146 return alignment; | |
147 } | |
148 | |
149 // BufferNode is allocated before the buffer. | |
150 // The chunk of memory that holds both of them is a block. | |
151 | |
152 // Produce a new BufferNode given a buffer. | |
153 static BufferNode* new_from_buffer(void** buf) { | |
154 return new (make_block_from_buffer(buf)) BufferNode; | |
155 } | |
156 | |
157 // The following are the required conversion routines: | |
158 static BufferNode* make_node_from_buffer(void** buf) { | |
159 return (BufferNode*)make_block_from_buffer(buf); | |
160 } | |
161 static void** make_buffer_from_node(BufferNode *node) { | |
162 return make_buffer_from_block(node); | |
163 } | |
164 static void* make_block_from_node(BufferNode *node) { | |
165 return (void*)node; | |
166 } | |
167 static void** make_buffer_from_block(void* p) { | |
168 return (void**)((char*)p + aligned_size()); | |
169 } | |
170 static void* make_block_from_buffer(void** p) { | |
171 return (void*)((char*)p - aligned_size()); | |
172 } | |
173 }; | |
174 | |
342 | 175 // A PtrQueueSet represents resources common to a set of pointer queues. |
176 // In particular, the individual queues allocate buffers from this shared | |
177 // set, and return completed buffers to the set. | |
178 // All these variables are are protected by the TLOQ_CBL_mon. XXX ??? | |
549
fe3d7c11b4b7
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
470
diff
changeset
|
179 class PtrQueueSet VALUE_OBJ_CLASS_SPEC { |
342 | 180 protected: |
181 Monitor* _cbl_mon; // Protects the fields below. | |
1111 | 182 BufferNode* _completed_buffers_head; |
183 BufferNode* _completed_buffers_tail; | |
184 int _n_completed_buffers; | |
185 int _process_completed_threshold; | |
342 | 186 volatile bool _process_completed; |
187 | |
188 // This (and the interpretation of the first element as a "next" | |
189 // pointer) are protected by the TLOQ_FL_lock. | |
190 Mutex* _fl_lock; | |
1111 | 191 BufferNode* _buf_free_list; |
342 | 192 size_t _buf_free_list_sz; |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
193 // Queue set can share a freelist. The _fl_owner variable |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
194 // specifies the owner. It is set to "this" by default. |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
195 PtrQueueSet* _fl_owner; |
342 | 196 |
197 // The size of all buffers in the set. | |
198 size_t _sz; | |
199 | |
200 bool _all_active; | |
201 | |
202 // If true, notify_all on _cbl_mon when the threshold is reached. | |
203 bool _notify_when_complete; | |
204 | |
205 // Maximum number of elements allowed on completed queue: after that, | |
206 // enqueuer does the work itself. Zero indicates no maximum. | |
207 int _max_completed_queue; | |
1111 | 208 int _completed_queue_padding; |
342 | 209 |
210 int completed_buffers_list_length(); | |
211 void assert_completed_buffer_list_len_correct_locked(); | |
212 void assert_completed_buffer_list_len_correct(); | |
213 | |
214 protected: | |
215 // A mutator thread does the the work of processing a buffer. | |
216 // Returns "true" iff the work is complete (and the buffer may be | |
217 // deallocated). | |
218 virtual bool mut_process_buffer(void** buf) { | |
219 ShouldNotReachHere(); | |
220 return false; | |
221 } | |
222 | |
223 public: | |
224 // Create an empty ptr queue set. | |
225 PtrQueueSet(bool notify_when_complete = false); | |
226 | |
227 // Because of init-order concerns, we can't pass these as constructor | |
228 // arguments. | |
229 void initialize(Monitor* cbl_mon, Mutex* fl_lock, | |
1111 | 230 int process_completed_threshold, |
231 int max_completed_queue, | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
232 PtrQueueSet *fl_owner = NULL) { |
342 | 233 _max_completed_queue = max_completed_queue; |
1111 | 234 _process_completed_threshold = process_completed_threshold; |
235 _completed_queue_padding = 0; | |
342 | 236 assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?"); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
237 _cbl_mon = cbl_mon; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
238 _fl_lock = fl_lock; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
239 _fl_owner = (fl_owner != NULL) ? fl_owner : this; |
342 | 240 } |
241 | |
242 // Return an empty oop array of size _sz (required to be non-zero). | |
243 void** allocate_buffer(); | |
244 | |
245 // Return an empty buffer to the free list. The "buf" argument is | |
246 // required to be a pointer to the head of an array of length "_sz". | |
247 void deallocate_buffer(void** buf); | |
248 | |
249 // Declares that "buf" is a complete buffer. | |
1111 | 250 void enqueue_complete_buffer(void** buf, size_t index = 0); |
251 | |
252 // To be invoked by the mutator. | |
253 bool process_or_enqueue_complete_buffer(void** buf); | |
342 | 254 |
255 bool completed_buffers_exist_dirty() { | |
256 return _n_completed_buffers > 0; | |
257 } | |
258 | |
259 bool process_completed_buffers() { return _process_completed; } | |
1111 | 260 void set_process_completed(bool x) { _process_completed = x; } |
342 | 261 |
1317
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1111
diff
changeset
|
262 bool is_active() { return _all_active; } |
342 | 263 |
264 // Set the buffer size. Should be called before any "enqueue" operation | |
265 // can be called. And should only be called once. | |
266 void set_buffer_size(size_t sz); | |
267 | |
268 // Get the buffer size. | |
269 size_t buffer_size() { return _sz; } | |
270 | |
1111 | 271 // Get/Set the number of completed buffers that triggers log processing. |
272 void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; } | |
273 int process_completed_threshold() const { return _process_completed_threshold; } | |
342 | 274 |
275 // Must only be called at a safe point. Indicates that the buffer free | |
276 // list size may be reduced, if that is deemed desirable. | |
277 void reduce_free_list(); | |
278 | |
1111 | 279 int completed_buffers_num() { return _n_completed_buffers; } |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
280 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
281 void merge_bufferlists(PtrQueueSet* src); |
1111 | 282 |
283 void set_max_completed_queue(int m) { _max_completed_queue = m; } | |
284 int max_completed_queue() { return _max_completed_queue; } | |
285 | |
286 void set_completed_queue_padding(int padding) { _completed_queue_padding = padding; } | |
287 int completed_queue_padding() { return _completed_queue_padding; } | |
288 | |
289 // Notify the consumer if the number of buffers crossed the threshold | |
290 void notify_if_necessary(); | |
342 | 291 }; |