comparison src/share/vm/prims/jvmtiImpl.hpp @ 2195:bf8517f4e4d0

6766644: Redefinition of compiled method fails with assertion "Can not load classes with the Compiler thread" Summary: Defer posting events from the compiler thread: use service thread Reviewed-by: coleenp, dholmes, never, dcubed
author kamg
date Wed, 02 Feb 2011 14:38:01 -0500
parents 9afee0b9fc1d
children f91db74a6810
comparison
equal deleted inserted replaced
2194:face83fc8882 2195:bf8517f4e4d0
1 /* 1 /*
2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
431 static void print(); 431 static void print();
432 }; 432 };
433 433
434 #endif // !JVMTI_KERNEL 434 #endif // !JVMTI_KERNEL
435 435
436 /**
437 * When a thread (such as the compiler thread or VM thread) cannot post a
438 * JVMTI event itself because the event needs to be posted from a Java
439 * thread, then it can defer the event to the Service thread for posting.
440 * The information needed to post the event is encapsulated into this class
441 * and then enqueued onto the JvmtiDeferredEventQueue, where the Service
442 * thread will pick it up and post it.
443 *
444 * This is currently only used for posting compiled-method-load and unload
445 * events, which we don't want posted from the compiler thread.
446 */
447 class JvmtiDeferredEvent VALUE_OBJ_CLASS_SPEC {
448 friend class JvmtiDeferredEventQueue;
449 private:
450 typedef enum {
451 TYPE_NONE,
452 TYPE_COMPILED_METHOD_LOAD,
453 TYPE_COMPILED_METHOD_UNLOAD,
454 TYPE_FLUSH // pseudo-event used to implement flush_queue()
455 } Type;
456
457 Type _type;
458 union {
459 nmethod* compiled_method_load;
460 struct {
461 jmethodID method_id;
462 const void* code_begin;
463 } compiled_method_unload;
464 int* flush_state_addr;
465 } _event_data;
466
467 JvmtiDeferredEvent(Type t) : _type(t) {}
468
469 void set_compiled_method_load(nmethod* nm) {
470 assert(_type == TYPE_COMPILED_METHOD_LOAD, "must be");
471 _event_data.compiled_method_load = nm;
472 }
473
474 nmethod* compiled_method_load() const {
475 assert(_type == TYPE_COMPILED_METHOD_LOAD, "must be");
476 return _event_data.compiled_method_load;
477 }
478
479 void set_compiled_method_unload(jmethodID id, const void* code) {
480 assert(_type == TYPE_COMPILED_METHOD_UNLOAD, "must be");
481 _event_data.compiled_method_unload.method_id = id;
482 _event_data.compiled_method_unload.code_begin = code;
483 }
484
485 jmethodID compiled_method_unload_method_id() const {
486 assert(_type == TYPE_COMPILED_METHOD_UNLOAD, "must be");
487 return _event_data.compiled_method_unload.method_id;
488 }
489
490 const void* compiled_method_unload_code_begin() const {
491 assert(_type == TYPE_COMPILED_METHOD_UNLOAD, "must be");
492 return _event_data.compiled_method_unload.code_begin;
493 }
494
495 bool is_flush_event() const { return _type == TYPE_FLUSH; }
496
497 int* flush_state_addr() const {
498 assert(is_flush_event(), "must be");
499 return _event_data.flush_state_addr;
500 }
501
502 void set_flush_state_addr(int* flag) {
503 assert(is_flush_event(), "must be");
504 _event_data.flush_state_addr = flag;
505 }
506
507 public:
508
509 JvmtiDeferredEvent() : _type(TYPE_NONE) {}
510
511 // Factory methods
512 static JvmtiDeferredEvent compiled_method_load_event(nmethod* nm)
513 KERNEL_RETURN_(JvmtiDeferredEvent());
514 static JvmtiDeferredEvent compiled_method_unload_event(
515 jmethodID id, const void* code) KERNEL_RETURN_(JvmtiDeferredEvent());
516
517 // Actually posts the event.
518 void post() KERNEL_RETURN;
519 };
520
521 /**
522 * Events enqueued on this queue wake up the Service thread which dequeues
523 * and posts the events. The Service_lock is required to be held
524 * when operating on the queue (except for the "pending" events).
525 */
526 class JvmtiDeferredEventQueue : AllStatic {
527 friend class JvmtiDeferredEvent;
528 private:
529 class QueueNode : public CHeapObj {
530 private:
531 JvmtiDeferredEvent _event;
532 QueueNode* _next;
533
534 public:
535 QueueNode(const JvmtiDeferredEvent& event)
536 : _event(event), _next(NULL) {}
537
538 const JvmtiDeferredEvent& event() const { return _event; }
539 QueueNode* next() const { return _next; }
540
541 void set_next(QueueNode* next) { _next = next; }
542 };
543
544 static QueueNode* _queue_head; // Hold Service_lock to access
545 static QueueNode* _queue_tail; // Hold Service_lock to access
546 static volatile QueueNode* _pending_list; // Uses CAS for read/update
547
548 // Transfers events from the _pending_list to the _queue.
549 static void process_pending_events() KERNEL_RETURN;
550
551 static void flush_complete(int* flush_state) KERNEL_RETURN;
552
553 public:
554 // Must be holding Service_lock when calling these
555 static bool has_events() KERNEL_RETURN_(false);
556 static void enqueue(const JvmtiDeferredEvent& event) KERNEL_RETURN;
557 static JvmtiDeferredEvent dequeue() KERNEL_RETURN_(JvmtiDeferredEvent());
558
559 // This call blocks until all events enqueued prior to this call
560 // have been posted. The Service_lock is acquired and waited upon.
561 //
562 // Implemented by creating a "flush" event and placing it in the queue.
563 // When the flush event is "posted" it will call flush_complete(), which
564 // will release the caller.
565 //
566 // Can be called by any thread (maybe even the service thread itself).
567 // Not necessary for the caller to be a JavaThread.
568 static void flush_queue(Thread* current) KERNEL_RETURN;
569
570 // Used to enqueue events without using a lock, for times (such as during
571 // safepoint) when we can't or don't want to lock the Service_lock.
572 //
573 // Events will be held off to the side until there's a call to
574 // dequeue(), enqueue(), or process_pending_events() (all of which require
575 // the holding of the Service_lock), and will be enqueued at that time.
576 static void add_pending_event(const JvmtiDeferredEvent&) KERNEL_RETURN;
577 };
578
436 // Utility macro that checks for NULL pointers: 579 // Utility macro that checks for NULL pointers:
437 #define NULL_CHECK(X, Y) if ((X) == NULL) { return (Y); } 580 #define NULL_CHECK(X, Y) if ((X) == NULL) { return (Y); }
438 581
439 #endif // SHARE_VM_PRIMS_JVMTIIMPL_HPP 582 #endif // SHARE_VM_PRIMS_JVMTIIMPL_HPP