Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/ptrQueue.hpp @ 12577:32d08d88c881
restricted initialization of ForeignCallProvider during VM startup to only be for the ForeignCallProvider of the host backend.
author | Doug Simon <doug.simon@oracle.com> |
---|---|
date | Fri, 25 Oct 2013 00:12:54 +0200 |
parents | 69f26e8e09f9 |
children | 69944b868a32 |
rev | line source |
---|---|
342 | 1 /* |
4787
2ace1c4ee8da
6888336: G1: avoid explicitly marking and pushing objects in survivor spaces
tonyp
parents:
2149
diff
changeset
|
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1317
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1317
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1317
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_PTRQUEUE_HPP |
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_PTRQUEUE_HPP | |
27 | |
28 #include "memory/allocation.hpp" | |
29 #include "utilities/sizes.hpp" | |
30 | |
342 | 31 // There are various techniques that require threads to be able to log |
32 // addresses. For example, a generational write barrier might log | |
33 // the addresses of modified old-generation objects. This type supports | |
34 // this operation. | |
35 | |
1111 | 36 // The definition of placement operator new(size_t, void*) in the <new>. |
37 #include <new> | |
38 | |
342 | 39 class PtrQueueSet; |
549
fe3d7c11b4b7
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
470
diff
changeset
|
40 class PtrQueue VALUE_OBJ_CLASS_SPEC { |
12258
69f26e8e09f9
8024760: add more types, fields and constants to VMStructs
twisti
parents:
4787
diff
changeset
|
41 friend class VMStructs; |
342 | 42 |
43 protected: | |
44 // The ptr queue set to which this queue belongs. | |
45 PtrQueueSet* _qset; | |
46 | |
47 // Whether updates should be logged. | |
48 bool _active; | |
49 | |
50 // The buffer. | |
51 void** _buf; | |
52 // The index at which an object was last enqueued. Starts at "_sz" | |
53 // (indicating an empty buffer) and goes towards zero. | |
54 size_t _index; | |
55 | |
56 // The size of the buffer. | |
57 size_t _sz; | |
58 | |
59 // If true, the queue is permanent, and doesn't need to deallocate | |
60 // its buffer in the destructor (since that obtains a lock which may not | |
61 // be legally locked by then. | |
62 bool _perm; | |
63 | |
64 // If there is a lock associated with this buffer, this is that lock. | |
65 Mutex* _lock; | |
66 | |
67 PtrQueueSet* qset() { return _qset; } | |
68 | |
69 public: | |
70 // Initialize this queue to contain a null buffer, and be part of the | |
71 // given PtrQueueSet. | |
2149 | 72 PtrQueue(PtrQueueSet* qset, bool perm = false, bool active = false); |
342 | 73 // Release any contained resources. |
4787
2ace1c4ee8da
6888336: G1: avoid explicitly marking and pushing objects in survivor spaces
tonyp
parents:
2149
diff
changeset
|
74 virtual void flush(); |
441
da9cb4e97a5f
6770608: G1: Mutator thread can flush barrier and satb queues during safepoint
iveresov
parents:
342
diff
changeset
|
75 // Calls flush() when destroyed. |
da9cb4e97a5f
6770608: G1: Mutator thread can flush barrier and satb queues during safepoint
iveresov
parents:
342
diff
changeset
|
76 ~PtrQueue() { flush(); } |
342 | 77 |
78 // Associate a lock with a ptr queue. | |
79 void set_lock(Mutex* lock) { _lock = lock; } | |
80 | |
81 void reset() { if (_buf != NULL) _index = _sz; } | |
82 | |
83 // Enqueues the given "obj". | |
84 void enqueue(void* ptr) { | |
85 if (!_active) return; | |
86 else enqueue_known_active(ptr); | |
87 } | |
88 | |
2149 | 89 // This method is called when we're doing the zero index handling |
90 // and gives a chance to the queues to do any pre-enqueueing | |
91 // processing they might want to do on the buffer. It should return | |
92 // true if the buffer should be enqueued, or false if enough | |
93 // entries were cleared from it so that it can be re-used. It should | |
94 // not return false if the buffer is still full (otherwise we can | |
95 // get into an infinite loop). | |
96 virtual bool should_enqueue_buffer() { return true; } | |
1111 | 97 void handle_zero_index(); |
342 | 98 void locking_enqueue_completed_buffer(void** buf); |
99 | |
100 void enqueue_known_active(void* ptr); | |
101 | |
102 size_t size() { | |
103 assert(_sz >= _index, "Invariant."); | |
104 return _buf == NULL ? 0 : _sz - _index; | |
105 } | |
106 | |
1842
6e0aac35bfa9
6980838: G1: guarantee(false) failed: thread has an unexpected active value in its SATB queue
tonyp
parents:
1552
diff
changeset
|
107 bool is_empty() { |
6e0aac35bfa9
6980838: G1: guarantee(false) failed: thread has an unexpected active value in its SATB queue
tonyp
parents:
1552
diff
changeset
|
108 return _buf == NULL || _sz == _index; |
6e0aac35bfa9
6980838: G1: guarantee(false) failed: thread has an unexpected active value in its SATB queue
tonyp
parents:
1552
diff
changeset
|
109 } |
6e0aac35bfa9
6980838: G1: guarantee(false) failed: thread has an unexpected active value in its SATB queue
tonyp
parents:
1552
diff
changeset
|
110 |
342 | 111 // Set the "active" property of the queue to "b". An enqueue to an |
112 // inactive thread is a no-op. Setting a queue to inactive resets its | |
113 // log to the empty state. | |
114 void set_active(bool b) { | |
115 _active = b; | |
116 if (!b && _buf != NULL) { | |
117 _index = _sz; | |
118 } else if (b && _buf != NULL) { | |
119 assert(_index == _sz, "invariant: queues are empty when activated."); | |
120 } | |
121 } | |
122 | |
1317
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1111
diff
changeset
|
123 bool is_active() { return _active; } |
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1111
diff
changeset
|
124 |
342 | 125 static int byte_index_to_index(int ind) { |
126 assert((ind % oopSize) == 0, "Invariant."); | |
127 return ind / oopSize; | |
128 } | |
129 | |
130 static int index_to_byte_index(int byte_ind) { | |
131 return byte_ind * oopSize; | |
132 } | |
133 | |
134 // To support compiler. | |
135 static ByteSize byte_offset_of_index() { | |
136 return byte_offset_of(PtrQueue, _index); | |
137 } | |
138 static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); } | |
139 | |
140 static ByteSize byte_offset_of_buf() { | |
141 return byte_offset_of(PtrQueue, _buf); | |
142 } | |
143 static ByteSize byte_width_of_buf() { return in_ByteSize(sizeof(void*)); } | |
144 | |
145 static ByteSize byte_offset_of_active() { | |
146 return byte_offset_of(PtrQueue, _active); | |
147 } | |
148 static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); } | |
149 | |
150 }; | |
151 | |
1111 | 152 class BufferNode { |
153 size_t _index; | |
154 BufferNode* _next; | |
155 public: | |
156 BufferNode() : _index(0), _next(NULL) { } | |
157 BufferNode* next() const { return _next; } | |
158 void set_next(BufferNode* n) { _next = n; } | |
159 size_t index() const { return _index; } | |
160 void set_index(size_t i) { _index = i; } | |
161 | |
162 // Align the size of the structure to the size of the pointer | |
163 static size_t aligned_size() { | |
164 static const size_t alignment = round_to(sizeof(BufferNode), sizeof(void*)); | |
165 return alignment; | |
166 } | |
167 | |
168 // BufferNode is allocated before the buffer. | |
169 // The chunk of memory that holds both of them is a block. | |
170 | |
171 // Produce a new BufferNode given a buffer. | |
172 static BufferNode* new_from_buffer(void** buf) { | |
173 return new (make_block_from_buffer(buf)) BufferNode; | |
174 } | |
175 | |
176 // The following are the required conversion routines: | |
177 static BufferNode* make_node_from_buffer(void** buf) { | |
178 return (BufferNode*)make_block_from_buffer(buf); | |
179 } | |
180 static void** make_buffer_from_node(BufferNode *node) { | |
181 return make_buffer_from_block(node); | |
182 } | |
183 static void* make_block_from_node(BufferNode *node) { | |
184 return (void*)node; | |
185 } | |
186 static void** make_buffer_from_block(void* p) { | |
187 return (void**)((char*)p + aligned_size()); | |
188 } | |
189 static void* make_block_from_buffer(void** p) { | |
190 return (void*)((char*)p - aligned_size()); | |
191 } | |
192 }; | |
193 | |
342 | 194 // A PtrQueueSet represents resources common to a set of pointer queues. |
195 // In particular, the individual queues allocate buffers from this shared | |
196 // set, and return completed buffers to the set. | |
197 // All these variables are are protected by the TLOQ_CBL_mon. XXX ??? | |
549
fe3d7c11b4b7
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
470
diff
changeset
|
198 class PtrQueueSet VALUE_OBJ_CLASS_SPEC { |
342 | 199 protected: |
200 Monitor* _cbl_mon; // Protects the fields below. | |
1111 | 201 BufferNode* _completed_buffers_head; |
202 BufferNode* _completed_buffers_tail; | |
203 int _n_completed_buffers; | |
204 int _process_completed_threshold; | |
342 | 205 volatile bool _process_completed; |
206 | |
207 // This (and the interpretation of the first element as a "next" | |
208 // pointer) are protected by the TLOQ_FL_lock. | |
209 Mutex* _fl_lock; | |
1111 | 210 BufferNode* _buf_free_list; |
342 | 211 size_t _buf_free_list_sz; |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
212 // Queue set can share a freelist. The _fl_owner variable |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
213 // specifies the owner. It is set to "this" by default. |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
214 PtrQueueSet* _fl_owner; |
342 | 215 |
216 // The size of all buffers in the set. | |
217 size_t _sz; | |
218 | |
219 bool _all_active; | |
220 | |
221 // If true, notify_all on _cbl_mon when the threshold is reached. | |
222 bool _notify_when_complete; | |
223 | |
224 // Maximum number of elements allowed on completed queue: after that, | |
225 // enqueuer does the work itself. Zero indicates no maximum. | |
226 int _max_completed_queue; | |
1111 | 227 int _completed_queue_padding; |
342 | 228 |
229 int completed_buffers_list_length(); | |
230 void assert_completed_buffer_list_len_correct_locked(); | |
231 void assert_completed_buffer_list_len_correct(); | |
232 | |
233 protected: | |
234 // A mutator thread does the the work of processing a buffer. | |
235 // Returns "true" iff the work is complete (and the buffer may be | |
236 // deallocated). | |
237 virtual bool mut_process_buffer(void** buf) { | |
238 ShouldNotReachHere(); | |
239 return false; | |
240 } | |
241 | |
242 public: | |
243 // Create an empty ptr queue set. | |
244 PtrQueueSet(bool notify_when_complete = false); | |
245 | |
246 // Because of init-order concerns, we can't pass these as constructor | |
247 // arguments. | |
248 void initialize(Monitor* cbl_mon, Mutex* fl_lock, | |
1111 | 249 int process_completed_threshold, |
250 int max_completed_queue, | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
251 PtrQueueSet *fl_owner = NULL) { |
342 | 252 _max_completed_queue = max_completed_queue; |
1111 | 253 _process_completed_threshold = process_completed_threshold; |
254 _completed_queue_padding = 0; | |
342 | 255 assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?"); |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
256 _cbl_mon = cbl_mon; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
257 _fl_lock = fl_lock; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
258 _fl_owner = (fl_owner != NULL) ? fl_owner : this; |
342 | 259 } |
260 | |
261 // Return an empty oop array of size _sz (required to be non-zero). | |
262 void** allocate_buffer(); | |
263 | |
264 // Return an empty buffer to the free list. The "buf" argument is | |
265 // required to be a pointer to the head of an array of length "_sz". | |
266 void deallocate_buffer(void** buf); | |
267 | |
268 // Declares that "buf" is a complete buffer. | |
1111 | 269 void enqueue_complete_buffer(void** buf, size_t index = 0); |
270 | |
271 // To be invoked by the mutator. | |
272 bool process_or_enqueue_complete_buffer(void** buf); | |
342 | 273 |
274 bool completed_buffers_exist_dirty() { | |
275 return _n_completed_buffers > 0; | |
276 } | |
277 | |
278 bool process_completed_buffers() { return _process_completed; } | |
1111 | 279 void set_process_completed(bool x) { _process_completed = x; } |
342 | 280 |
1317
d4197f8d516a
6935821: G1: threads created during marking do not active their SATB queues
tonyp
parents:
1111
diff
changeset
|
281 bool is_active() { return _all_active; } |
342 | 282 |
283 // Set the buffer size. Should be called before any "enqueue" operation | |
284 // can be called. And should only be called once. | |
285 void set_buffer_size(size_t sz); | |
286 | |
287 // Get the buffer size. | |
288 size_t buffer_size() { return _sz; } | |
289 | |
1111 | 290 // Get/Set the number of completed buffers that triggers log processing. |
291 void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; } | |
292 int process_completed_threshold() const { return _process_completed_threshold; } | |
342 | 293 |
294 // Must only be called at a safe point. Indicates that the buffer free | |
295 // list size may be reduced, if that is deemed desirable. | |
296 void reduce_free_list(); | |
297 | |
1111 | 298 int completed_buffers_num() { return _n_completed_buffers; } |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
299 |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
549
diff
changeset
|
300 void merge_bufferlists(PtrQueueSet* src); |
1111 | 301 |
302 void set_max_completed_queue(int m) { _max_completed_queue = m; } | |
303 int max_completed_queue() { return _max_completed_queue; } | |
304 | |
305 void set_completed_queue_padding(int padding) { _completed_queue_padding = padding; } | |
306 int completed_queue_padding() { return _completed_queue_padding; } | |
307 | |
308 // Notify the consumer if the number of buffers crossed the threshold | |
309 void notify_if_necessary(); | |
342 | 310 }; |
1972 | 311 |
312 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_PTRQUEUE_HPP |