0
|
1 /*
|
|
2 * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 # include "incls/_precompiled.incl"
|
|
26 # include "incls/_vmThread.cpp.incl"
|
|
27
|
|
28 // Dummy VM operation to act as first element in our circular double-linked list
|
|
29 class VM_Dummy: public VM_Operation {
|
|
30 VMOp_Type type() const { return VMOp_Dummy; }
|
|
31 void doit() {};
|
|
32 };
|
|
33
|
|
34 VMOperationQueue::VMOperationQueue() {
|
|
35 // The queue is a circular doubled-linked list, which always contains
|
|
36 // one element (i.e., one element means empty).
|
|
37 for(int i = 0; i < nof_priorities; i++) {
|
|
38 _queue_length[i] = 0;
|
|
39 _queue_counter = 0;
|
|
40 _queue[i] = new VM_Dummy();
|
|
41 _queue[i]->set_next(_queue[i]);
|
|
42 _queue[i]->set_prev(_queue[i]);
|
|
43 }
|
|
44 _drain_list = NULL;
|
|
45 }
|
|
46
|
|
47
|
|
48 bool VMOperationQueue::queue_empty(int prio) {
|
|
49 // It is empty if there is exactly one element
|
|
50 bool empty = (_queue[prio] == _queue[prio]->next());
|
|
51 assert( (_queue_length[prio] == 0 && empty) ||
|
|
52 (_queue_length[prio] > 0 && !empty), "sanity check");
|
|
53 return _queue_length[prio] == 0;
|
|
54 }
|
|
55
|
|
56 // Inserts an element to the right of the q element
|
|
57 void VMOperationQueue::insert(VM_Operation* q, VM_Operation* n) {
|
|
58 assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check");
|
|
59 n->set_prev(q);
|
|
60 n->set_next(q->next());
|
|
61 q->next()->set_prev(n);
|
|
62 q->set_next(n);
|
|
63 }
|
|
64
|
|
65 void VMOperationQueue::queue_add_front(int prio, VM_Operation *op) {
|
|
66 _queue_length[prio]++;
|
|
67 insert(_queue[prio]->next(), op);
|
|
68 }
|
|
69
|
|
70 void VMOperationQueue::queue_add_back(int prio, VM_Operation *op) {
|
|
71 _queue_length[prio]++;
|
|
72 insert(_queue[prio]->prev(), op);
|
|
73 }
|
|
74
|
|
75
|
|
76 void VMOperationQueue::unlink(VM_Operation* q) {
|
|
77 assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check");
|
|
78 q->prev()->set_next(q->next());
|
|
79 q->next()->set_prev(q->prev());
|
|
80 }
|
|
81
|
|
82 VM_Operation* VMOperationQueue::queue_remove_front(int prio) {
|
|
83 if (queue_empty(prio)) return NULL;
|
|
84 assert(_queue_length[prio] >= 0, "sanity check");
|
|
85 _queue_length[prio]--;
|
|
86 VM_Operation* r = _queue[prio]->next();
|
|
87 assert(r != _queue[prio], "cannot remove base element");
|
|
88 unlink(r);
|
|
89 return r;
|
|
90 }
|
|
91
|
|
92 VM_Operation* VMOperationQueue::queue_drain(int prio) {
|
|
93 if (queue_empty(prio)) return NULL;
|
|
94 DEBUG_ONLY(int length = _queue_length[prio];);
|
|
95 assert(length >= 0, "sanity check");
|
|
96 _queue_length[prio] = 0;
|
|
97 VM_Operation* r = _queue[prio]->next();
|
|
98 assert(r != _queue[prio], "cannot remove base element");
|
|
99 // remove links to base element from head and tail
|
|
100 r->set_prev(NULL);
|
|
101 _queue[prio]->prev()->set_next(NULL);
|
|
102 // restore queue to empty state
|
|
103 _queue[prio]->set_next(_queue[prio]);
|
|
104 _queue[prio]->set_prev(_queue[prio]);
|
|
105 assert(queue_empty(prio), "drain corrupted queue")
|
|
106 #ifdef DEBUG
|
|
107 int len = 0;
|
|
108 VM_Operation* cur;
|
|
109 for(cur = r; cur != NULL; cur=cur->next()) len++;
|
|
110 assert(len == length, "drain lost some ops");
|
|
111 #endif
|
|
112 return r;
|
|
113 }
|
|
114
|
|
115 void VMOperationQueue::queue_oops_do(int queue, OopClosure* f) {
|
|
116 VM_Operation* cur = _queue[queue];
|
|
117 cur = cur->next();
|
|
118 while (cur != _queue[queue]) {
|
|
119 cur->oops_do(f);
|
|
120 cur = cur->next();
|
|
121 }
|
|
122 }
|
|
123
|
|
124 void VMOperationQueue::drain_list_oops_do(OopClosure* f) {
|
|
125 VM_Operation* cur = _drain_list;
|
|
126 while (cur != NULL) {
|
|
127 cur->oops_do(f);
|
|
128 cur = cur->next();
|
|
129 }
|
|
130 }
|
|
131
|
|
132 //-----------------------------------------------------------------
|
|
133 // High-level interface
|
|
134 bool VMOperationQueue::add(VM_Operation *op) {
|
|
135 // Encapsulates VM queue policy. Currently, that
|
|
136 // only involves putting them on the right list
|
|
137 if (op->evaluate_at_safepoint()) {
|
|
138 queue_add_back(SafepointPriority, op);
|
|
139 return true;
|
|
140 }
|
|
141
|
|
142 queue_add_back(MediumPriority, op);
|
|
143 return true;
|
|
144 }
|
|
145
|
|
146 VM_Operation* VMOperationQueue::remove_next() {
|
|
147 // Assuming VMOperation queue is two-level priority queue. If there are
|
|
148 // more than two priorities, we need a different scheduling algorithm.
|
|
149 assert(SafepointPriority == 0 && MediumPriority == 1 && nof_priorities == 2,
|
|
150 "current algorithm does not work");
|
|
151
|
|
152 // simple counter based scheduling to prevent starvation of lower priority
|
|
153 // queue. -- see 4390175
|
|
154 int high_prio, low_prio;
|
|
155 if (_queue_counter++ < 10) {
|
|
156 high_prio = SafepointPriority;
|
|
157 low_prio = MediumPriority;
|
|
158 } else {
|
|
159 _queue_counter = 0;
|
|
160 high_prio = MediumPriority;
|
|
161 low_prio = SafepointPriority;
|
|
162 }
|
|
163
|
|
164 return queue_remove_front(queue_empty(high_prio) ? low_prio : high_prio);
|
|
165 }
|
|
166
|
|
167 void VMOperationQueue::oops_do(OopClosure* f) {
|
|
168 for(int i = 0; i < nof_priorities; i++) {
|
|
169 queue_oops_do(i, f);
|
|
170 }
|
|
171 drain_list_oops_do(f);
|
|
172 }
|
|
173
|
|
174
|
|
175 //------------------------------------------------------------------------------------------------------------------
|
|
176 // Implementation of VMThread stuff
|
|
177
|
|
178 bool VMThread::_should_terminate = false;
|
|
179 bool VMThread::_terminated = false;
|
|
180 Monitor* VMThread::_terminate_lock = NULL;
|
|
181 VMThread* VMThread::_vm_thread = NULL;
|
|
182 VM_Operation* VMThread::_cur_vm_operation = NULL;
|
|
183 VMOperationQueue* VMThread::_vm_queue = NULL;
|
|
184 PerfCounter* VMThread::_perf_accumulated_vm_operation_time = NULL;
|
|
185
|
|
186
|
|
187 void VMThread::create() {
|
|
188 assert(vm_thread() == NULL, "we can only allocate one VMThread");
|
|
189 _vm_thread = new VMThread();
|
|
190
|
|
191 // Create VM operation queue
|
|
192 _vm_queue = new VMOperationQueue();
|
|
193 guarantee(_vm_queue != NULL, "just checking");
|
|
194
|
|
195 _terminate_lock = new Monitor(Mutex::safepoint, "VMThread::_terminate_lock", true);
|
|
196
|
|
197 if (UsePerfData) {
|
|
198 // jvmstat performance counters
|
|
199 Thread* THREAD = Thread::current();
|
|
200 _perf_accumulated_vm_operation_time =
|
|
201 PerfDataManager::create_counter(SUN_THREADS, "vmOperationTime",
|
|
202 PerfData::U_Ticks, CHECK);
|
|
203 }
|
|
204 }
|
|
205
|
|
206
|
|
207 VMThread::VMThread() : Thread() {
|
|
208 // nothing to do
|
|
209 }
|
|
210
|
|
211 void VMThread::destroy() {
|
|
212 if (_vm_thread != NULL) {
|
|
213 delete _vm_thread;
|
|
214 _vm_thread = NULL; // VM thread is gone
|
|
215 }
|
|
216 }
|
|
217
|
|
218 void VMThread::run() {
|
|
219 assert(this == vm_thread(), "check");
|
|
220
|
|
221 this->initialize_thread_local_storage();
|
|
222 this->record_stack_base_and_size();
|
|
223 // Notify_lock wait checks on active_handles() to rewait in
|
|
224 // case of spurious wakeup, it should wait on the last
|
|
225 // value set prior to the notify
|
|
226 this->set_active_handles(JNIHandleBlock::allocate_block());
|
|
227
|
|
228 {
|
|
229 MutexLocker ml(Notify_lock);
|
|
230 Notify_lock->notify();
|
|
231 }
|
|
232 // Notify_lock is destroyed by Threads::create_vm()
|
|
233
|
|
234 int prio = (VMThreadPriority == -1)
|
|
235 ? os::java_to_os_priority[NearMaxPriority]
|
|
236 : VMThreadPriority;
|
|
237 // Note that I cannot call os::set_priority because it expects Java
|
|
238 // priorities and I am *explicitly* using OS priorities so that it's
|
|
239 // possible to set the VM thread priority higher than any Java thread.
|
|
240 os::set_native_priority( this, prio );
|
|
241
|
|
242 // Wait for VM_Operations until termination
|
|
243 this->loop();
|
|
244
|
|
245 // Note the intention to exit before safepointing.
|
|
246 // 6295565 This has the effect of waiting for any large tty
|
|
247 // outputs to finish.
|
|
248 if (xtty != NULL) {
|
|
249 ttyLocker ttyl;
|
|
250 xtty->begin_elem("destroy_vm");
|
|
251 xtty->stamp();
|
|
252 xtty->end_elem();
|
|
253 assert(should_terminate(), "termination flag must be set");
|
|
254 }
|
|
255
|
|
256 // 4526887 let VM thread exit at Safepoint
|
|
257 SafepointSynchronize::begin();
|
|
258
|
|
259 if (VerifyBeforeExit) {
|
|
260 HandleMark hm(VMThread::vm_thread());
|
|
261 // Among other things, this ensures that Eden top is correct.
|
|
262 Universe::heap()->prepare_for_verify();
|
|
263 os::check_heap();
|
|
264 Universe::verify(true, true); // Silent verification to not polute normal output
|
|
265 }
|
|
266
|
|
267 CompileBroker::set_should_block();
|
|
268
|
|
269 // wait for threads (compiler threads or daemon threads) in the
|
|
270 // _thread_in_native state to block.
|
|
271 VM_Exit::wait_for_threads_in_native_to_block();
|
|
272
|
|
273 // signal other threads that VM process is gone
|
|
274 {
|
|
275 // Note: we must have the _no_safepoint_check_flag. Mutex::lock() allows
|
|
276 // VM thread to enter any lock at Safepoint as long as its _owner is NULL.
|
|
277 // If that happens after _terminate_lock->wait() has unset _owner
|
|
278 // but before it actually drops the lock and waits, the notification below
|
|
279 // may get lost and we will have a hang. To avoid this, we need to use
|
|
280 // Mutex::lock_without_safepoint_check().
|
|
281 MutexLockerEx ml(_terminate_lock, Mutex::_no_safepoint_check_flag);
|
|
282 _terminated = true;
|
|
283 _terminate_lock->notify();
|
|
284 }
|
|
285
|
|
286 // Deletion must be done synchronously by the JNI DestroyJavaVM thread
|
|
287 // so that the VMThread deletion completes before the main thread frees
|
|
288 // up the CodeHeap.
|
|
289
|
|
290 }
|
|
291
|
|
292
|
|
293 // Notify the VMThread that the last non-daemon JavaThread has terminated,
|
|
294 // and wait until operation is performed.
|
|
295 void VMThread::wait_for_vm_thread_exit() {
|
|
296 { MutexLocker mu(VMOperationQueue_lock);
|
|
297 _should_terminate = true;
|
|
298 VMOperationQueue_lock->notify();
|
|
299 }
|
|
300
|
|
301 // Note: VM thread leaves at Safepoint. We are not stopped by Safepoint
|
|
302 // because this thread has been removed from the threads list. But anything
|
|
303 // that could get blocked by Safepoint should not be used after this point,
|
|
304 // otherwise we will hang, since there is no one can end the safepoint.
|
|
305
|
|
306 // Wait until VM thread is terminated
|
|
307 // Note: it should be OK to use Terminator_lock here. But this is called
|
|
308 // at a very delicate time (VM shutdown) and we are operating in non- VM
|
|
309 // thread at Safepoint. It's safer to not share lock with other threads.
|
|
310 { MutexLockerEx ml(_terminate_lock, Mutex::_no_safepoint_check_flag);
|
|
311 while(!VMThread::is_terminated()) {
|
|
312 _terminate_lock->wait(Mutex::_no_safepoint_check_flag);
|
|
313 }
|
|
314 }
|
|
315 }
|
|
316
|
|
317 void VMThread::print_on(outputStream* st) const {
|
|
318 st->print("\"%s\" ", name());
|
|
319 Thread::print_on(st);
|
|
320 st->cr();
|
|
321 }
|
|
322
|
|
323 void VMThread::evaluate_operation(VM_Operation* op) {
|
|
324 ResourceMark rm;
|
|
325
|
|
326 {
|
|
327 PerfTraceTime vm_op_timer(perf_accumulated_vm_operation_time());
|
|
328 op->evaluate();
|
|
329 }
|
|
330
|
|
331 // Last access of info in _cur_vm_operation!
|
|
332 bool c_heap_allocated = op->is_cheap_allocated();
|
|
333
|
|
334 // Mark as completed
|
|
335 if (!op->evaluate_concurrently()) {
|
|
336 op->calling_thread()->increment_vm_operation_completed_count();
|
|
337 }
|
|
338 // It is unsafe to access the _cur_vm_operation after the 'increment_vm_operation_completed_count' call,
|
|
339 // since if it is stack allocated the calling thread might have deallocated
|
|
340 if (c_heap_allocated) {
|
|
341 delete _cur_vm_operation;
|
|
342 }
|
|
343 }
|
|
344
|
|
345
|
|
346 void VMThread::loop() {
|
|
347 assert(_cur_vm_operation == NULL, "no current one should be executing");
|
|
348
|
|
349 while(true) {
|
|
350 VM_Operation* safepoint_ops = NULL;
|
|
351 //
|
|
352 // Wait for VM operation
|
|
353 //
|
|
354 // use no_safepoint_check to get lock without attempting to "sneak"
|
|
355 { MutexLockerEx mu_queue(VMOperationQueue_lock,
|
|
356 Mutex::_no_safepoint_check_flag);
|
|
357
|
|
358 // Look for new operation
|
|
359 assert(_cur_vm_operation == NULL, "no current one should be executing");
|
|
360 _cur_vm_operation = _vm_queue->remove_next();
|
|
361
|
|
362 // Stall time tracking code
|
|
363 if (PrintVMQWaitTime && _cur_vm_operation != NULL &&
|
|
364 !_cur_vm_operation->evaluate_concurrently()) {
|
|
365 long stall = os::javaTimeMillis() - _cur_vm_operation->timestamp();
|
|
366 if (stall > 0)
|
|
367 tty->print_cr("%s stall: %Ld", _cur_vm_operation->name(), stall);
|
|
368 }
|
|
369
|
|
370 while (!should_terminate() && _cur_vm_operation == NULL) {
|
|
371 // wait with a timeout to guarantee safepoints at regular intervals
|
|
372 bool timedout =
|
|
373 VMOperationQueue_lock->wait(Mutex::_no_safepoint_check_flag,
|
|
374 GuaranteedSafepointInterval);
|
|
375
|
|
376 // Support for self destruction
|
|
377 if ((SelfDestructTimer != 0) && !is_error_reported() &&
|
|
378 (os::elapsedTime() > SelfDestructTimer * 60)) {
|
|
379 tty->print_cr("VM self-destructed");
|
|
380 exit(-1);
|
|
381 }
|
|
382
|
|
383 if (timedout && (SafepointALot ||
|
|
384 SafepointSynchronize::is_cleanup_needed())) {
|
|
385 MutexUnlockerEx mul(VMOperationQueue_lock,
|
|
386 Mutex::_no_safepoint_check_flag);
|
|
387 // Force a safepoint since we have not had one for at least
|
|
388 // 'GuaranteedSafepointInterval' milliseconds. This will run all
|
|
389 // the clean-up processing that needs to be done regularly at a
|
|
390 // safepoint
|
|
391 SafepointSynchronize::begin();
|
|
392 #ifdef ASSERT
|
|
393 if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
|
|
394 #endif
|
|
395 SafepointSynchronize::end();
|
|
396 }
|
|
397 _cur_vm_operation = _vm_queue->remove_next();
|
|
398
|
|
399 // If we are at a safepoint we will evaluate all the operations that
|
|
400 // follow that also require a safepoint
|
|
401 if (_cur_vm_operation != NULL &&
|
|
402 _cur_vm_operation->evaluate_at_safepoint()) {
|
|
403 safepoint_ops = _vm_queue->drain_at_safepoint_priority();
|
|
404 }
|
|
405 }
|
|
406
|
|
407 if (should_terminate()) break;
|
|
408 } // Release mu_queue_lock
|
|
409
|
|
410 //
|
|
411 // Execute VM operation
|
|
412 //
|
|
413 { HandleMark hm(VMThread::vm_thread());
|
|
414
|
|
415 EventMark em("Executing VM operation: %s", vm_operation()->name());
|
|
416 assert(_cur_vm_operation != NULL, "we should have found an operation to execute");
|
|
417
|
|
418 // Give the VM thread an extra quantum. Jobs tend to be bursty and this
|
|
419 // helps the VM thread to finish up the job.
|
|
420 // FIXME: When this is enabled and there are many threads, this can degrade
|
|
421 // performance significantly.
|
|
422 if( VMThreadHintNoPreempt )
|
|
423 os::hint_no_preempt();
|
|
424
|
|
425 // If we are at a safepoint we will evaluate all the operations that
|
|
426 // follow that also require a safepoint
|
|
427 if (_cur_vm_operation->evaluate_at_safepoint()) {
|
|
428
|
|
429 if (PrintGCApplicationConcurrentTime) {
|
|
430 gclog_or_tty->print_cr("Application time: %3.7f seconds",
|
|
431 RuntimeService::last_application_time_sec());
|
|
432 }
|
|
433
|
|
434 _vm_queue->set_drain_list(safepoint_ops); // ensure ops can be scanned
|
|
435
|
|
436 SafepointSynchronize::begin();
|
|
437 evaluate_operation(_cur_vm_operation);
|
|
438 // now process all queued safepoint ops, iteratively draining
|
|
439 // the queue until there are none left
|
|
440 do {
|
|
441 _cur_vm_operation = safepoint_ops;
|
|
442 if (_cur_vm_operation != NULL) {
|
|
443 do {
|
|
444 // evaluate_operation deletes the op object so we have
|
|
445 // to grab the next op now
|
|
446 VM_Operation* next = _cur_vm_operation->next();
|
|
447 _vm_queue->set_drain_list(next);
|
|
448 evaluate_operation(_cur_vm_operation);
|
|
449 _cur_vm_operation = next;
|
|
450 if (PrintSafepointStatistics) {
|
|
451 SafepointSynchronize::inc_vmop_coalesced_count();
|
|
452 }
|
|
453 } while (_cur_vm_operation != NULL);
|
|
454 }
|
|
455 // There is a chance that a thread enqueued a safepoint op
|
|
456 // since we released the op-queue lock and initiated the safepoint.
|
|
457 // So we drain the queue again if there is anything there, as an
|
|
458 // optimization to try and reduce the number of safepoints.
|
|
459 // As the safepoint synchronizes us with JavaThreads we will see
|
|
460 // any enqueue made by a JavaThread, but the peek will not
|
|
461 // necessarily detect a concurrent enqueue by a GC thread, but
|
|
462 // that simply means the op will wait for the next major cycle of the
|
|
463 // VMThread - just as it would if the GC thread lost the race for
|
|
464 // the lock.
|
|
465 if (_vm_queue->peek_at_safepoint_priority()) {
|
|
466 // must hold lock while draining queue
|
|
467 MutexLockerEx mu_queue(VMOperationQueue_lock,
|
|
468 Mutex::_no_safepoint_check_flag);
|
|
469 safepoint_ops = _vm_queue->drain_at_safepoint_priority();
|
|
470 } else {
|
|
471 safepoint_ops = NULL;
|
|
472 }
|
|
473 } while(safepoint_ops != NULL);
|
|
474
|
|
475 _vm_queue->set_drain_list(NULL);
|
|
476
|
|
477 // Complete safepoint synchronization
|
|
478 SafepointSynchronize::end();
|
|
479
|
|
480 if (PrintGCApplicationStoppedTime) {
|
|
481 gclog_or_tty->print_cr("Total time for which application threads "
|
|
482 "were stopped: %3.7f seconds",
|
|
483 RuntimeService::last_safepoint_time_sec());
|
|
484 }
|
|
485
|
|
486 } else { // not a safepoint operation
|
|
487 if (TraceLongCompiles) {
|
|
488 elapsedTimer t;
|
|
489 t.start();
|
|
490 evaluate_operation(_cur_vm_operation);
|
|
491 t.stop();
|
|
492 double secs = t.seconds();
|
|
493 if (secs * 1e3 > LongCompileThreshold) {
|
|
494 // XXX - _cur_vm_operation should not be accessed after
|
|
495 // the completed count has been incremented; the waiting
|
|
496 // thread may have already freed this memory.
|
|
497 tty->print_cr("vm %s: %3.7f secs]", _cur_vm_operation->name(), secs);
|
|
498 }
|
|
499 } else {
|
|
500 evaluate_operation(_cur_vm_operation);
|
|
501 }
|
|
502
|
|
503 _cur_vm_operation = NULL;
|
|
504 }
|
|
505 }
|
|
506
|
|
507 //
|
|
508 // Notify (potential) waiting Java thread(s) - lock without safepoint
|
|
509 // check so that sneaking is not possible
|
|
510 { MutexLockerEx mu(VMOperationRequest_lock,
|
|
511 Mutex::_no_safepoint_check_flag);
|
|
512 VMOperationRequest_lock->notify_all();
|
|
513 }
|
|
514
|
|
515 //
|
|
516 // We want to make sure that we get to a safepoint regularly.
|
|
517 //
|
|
518 if (SafepointALot || SafepointSynchronize::is_cleanup_needed()) {
|
|
519 long interval = SafepointSynchronize::last_non_safepoint_interval();
|
|
520 bool max_time_exceeded = GuaranteedSafepointInterval != 0 && (interval > GuaranteedSafepointInterval);
|
|
521 if (SafepointALot || max_time_exceeded) {
|
|
522 HandleMark hm(VMThread::vm_thread());
|
|
523 SafepointSynchronize::begin();
|
|
524 SafepointSynchronize::end();
|
|
525 }
|
|
526 }
|
|
527 }
|
|
528 }
|
|
529
|
|
530 void VMThread::execute(VM_Operation* op) {
|
|
531 Thread* t = Thread::current();
|
|
532
|
|
533 if (!t->is_VM_thread()) {
|
|
534 // JavaThread or WatcherThread
|
|
535 t->check_for_valid_safepoint_state(true);
|
|
536
|
|
537 // New request from Java thread, evaluate prologue
|
|
538 if (!op->doit_prologue()) {
|
|
539 return; // op was cancelled
|
|
540 }
|
|
541
|
|
542 // Setup VM_operations for execution
|
|
543 op->set_calling_thread(t, Thread::get_priority(t));
|
|
544
|
|
545 // It does not make sense to execute the epilogue, if the VM operation object is getting
|
|
546 // deallocated by the VM thread.
|
|
547 bool concurrent = op->evaluate_concurrently();
|
|
548 bool execute_epilog = !op->is_cheap_allocated();
|
|
549 assert(!concurrent || op->is_cheap_allocated(), "concurrent => cheap_allocated");
|
|
550
|
|
551 // Get ticket number for non-concurrent VM operations
|
|
552 int ticket = 0;
|
|
553 if (!concurrent) {
|
|
554 ticket = t->vm_operation_ticket();
|
|
555 }
|
|
556
|
|
557 // Add VM operation to list of waiting threads. We are guaranteed not to block while holding the
|
|
558 // VMOperationQueue_lock, so we can block without a safepoint check. This allows vm operation requests
|
|
559 // to be queued up during a safepoint synchronization.
|
|
560 {
|
|
561 VMOperationQueue_lock->lock_without_safepoint_check();
|
|
562 bool ok = _vm_queue->add(op);
|
|
563 op->set_timestamp(os::javaTimeMillis());
|
|
564 VMOperationQueue_lock->notify();
|
|
565 VMOperationQueue_lock->unlock();
|
|
566 // VM_Operation got skipped
|
|
567 if (!ok) {
|
|
568 assert(concurrent, "can only skip concurrent tasks");
|
|
569 if (op->is_cheap_allocated()) delete op;
|
|
570 return;
|
|
571 }
|
|
572 }
|
|
573
|
|
574 if (!concurrent) {
|
|
575 // Wait for completion of request (non-concurrent)
|
|
576 // Note: only a JavaThread triggers the safepoint check when locking
|
|
577 MutexLocker mu(VMOperationRequest_lock);
|
|
578 while(t->vm_operation_completed_count() < ticket) {
|
|
579 VMOperationRequest_lock->wait(!t->is_Java_thread());
|
|
580 }
|
|
581 }
|
|
582
|
|
583 if (execute_epilog) {
|
|
584 op->doit_epilogue();
|
|
585 }
|
|
586 } else {
|
|
587 // invoked by VM thread; usually nested VM operation
|
|
588 assert(t->is_VM_thread(), "must be a VM thread");
|
|
589 VM_Operation* prev_vm_operation = vm_operation();
|
|
590 if (prev_vm_operation != NULL) {
|
|
591 // Check the VM operation allows nested VM operation. This normally not the case, e.g., the compiler
|
|
592 // does not allow nested scavenges or compiles.
|
|
593 if (!prev_vm_operation->allow_nested_vm_operations()) {
|
|
594 fatal2("Nested VM operation %s requested by operation %s", op->name(), vm_operation()->name());
|
|
595 }
|
|
596 op->set_calling_thread(prev_vm_operation->calling_thread(), prev_vm_operation->priority());
|
|
597 }
|
|
598
|
|
599 EventMark em("Executing %s VM operation: %s", prev_vm_operation ? "nested" : "", op->name());
|
|
600
|
|
601 // Release all internal handles after operation is evaluated
|
|
602 HandleMark hm(t);
|
|
603 _cur_vm_operation = op;
|
|
604
|
|
605 if (op->evaluate_at_safepoint() && !SafepointSynchronize::is_at_safepoint()) {
|
|
606 SafepointSynchronize::begin();
|
|
607 op->evaluate();
|
|
608 SafepointSynchronize::end();
|
|
609 } else {
|
|
610 op->evaluate();
|
|
611 }
|
|
612
|
|
613 // Free memory if needed
|
|
614 if (op->is_cheap_allocated()) delete op;
|
|
615
|
|
616 _cur_vm_operation = prev_vm_operation;
|
|
617 }
|
|
618 }
|
|
619
|
|
620
|
|
621 void VMThread::oops_do(OopClosure* f) {
|
|
622 Thread::oops_do(f);
|
|
623 _vm_queue->oops_do(f);
|
|
624 }
|
|
625
|
|
626 //------------------------------------------------------------------------------------------------------------------
|
|
627 #ifndef PRODUCT
|
|
628
|
|
629 void VMOperationQueue::verify_queue(int prio) {
|
|
630 // Check that list is correctly linked
|
|
631 int length = _queue_length[prio];
|
|
632 VM_Operation *cur = _queue[prio];
|
|
633 int i;
|
|
634
|
|
635 // Check forward links
|
|
636 for(i = 0; i < length; i++) {
|
|
637 cur = cur->next();
|
|
638 assert(cur != _queue[prio], "list to short (forward)");
|
|
639 }
|
|
640 assert(cur->next() == _queue[prio], "list to long (forward)");
|
|
641
|
|
642 // Check backwards links
|
|
643 cur = _queue[prio];
|
|
644 for(i = 0; i < length; i++) {
|
|
645 cur = cur->prev();
|
|
646 assert(cur != _queue[prio], "list to short (backwards)");
|
|
647 }
|
|
648 assert(cur->prev() == _queue[prio], "list to long (backwards)");
|
|
649 }
|
|
650
|
|
651 #endif
|
|
652
|
|
653 void VMThread::verify() {
|
|
654 oops_do(&VerifyOopClosure::verify_oop);
|
|
655 }
|