comparison src/share/vm/code/nmethod.hpp @ 20804:7848fc12602b

Merge with jdk8u40-b25
author Gilles Duboscq <gilles.m.duboscq@oracle.com>
date Tue, 07 Apr 2015 14:58:49 +0200
parents 8f2fb6bec986 b12a2a9b05ca
children 1621af5cb444
comparison
equal deleted inserted replaced
20184:84105dcdb05b 20804:7848fc12602b
67 // cache pc descs found in earlier inquiries 67 // cache pc descs found in earlier inquiries
68 class PcDescCache VALUE_OBJ_CLASS_SPEC { 68 class PcDescCache VALUE_OBJ_CLASS_SPEC {
69 friend class VMStructs; 69 friend class VMStructs;
70 private: 70 private:
71 enum { cache_size = 4 }; 71 enum { cache_size = 4 };
72 PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found 72 // The array elements MUST be volatile! Several threads may modify
73 // and read from the cache concurrently. find_pc_desc_internal has
74 // returned wrong results. C++ compiler (namely xlC12) may duplicate
75 // C++ field accesses if the elements are not volatile.
76 typedef PcDesc* PcDescPtr;
77 volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
73 public: 78 public:
74 PcDescCache() { debug_only(_pc_descs[0] = NULL); } 79 PcDescCache() { debug_only(_pc_descs[0] = NULL); }
75 void reset_to(PcDesc* initial_pc_desc); 80 void reset_to(PcDesc* initial_pc_desc);
76 PcDesc* find_pc_desc(int pc_offset, bool approximate); 81 PcDesc* find_pc_desc(int pc_offset, bool approximate);
77 void add_pc_desc(PcDesc* pc_desc); 82 void add_pc_desc(PcDesc* pc_desc);
109 class nmethod : public CodeBlob { 114 class nmethod : public CodeBlob {
110 friend class VMStructs; 115 friend class VMStructs;
111 friend class NMethodSweeper; 116 friend class NMethodSweeper;
112 friend class CodeCache; // scavengable oops 117 friend class CodeCache; // scavengable oops
113 private: 118 private:
119
120 // GC support to help figure out if an nmethod has been
121 // cleaned/unloaded by the current GC.
122 static unsigned char _global_unloading_clock;
123
114 // Shared fields for all nmethod's 124 // Shared fields for all nmethod's
115 Method* _method; 125 Method* _method;
116 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method 126 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
117 jmethodID _jmethod_id; // Cache of method()->jmethod_id() 127 jmethodID _jmethod_id; // Cache of method()->jmethod_id()
118 128
122 oop _speculation_log; 132 oop _speculation_log;
123 #endif 133 #endif
124 134
125 // To support simple linked-list chaining of nmethods: 135 // To support simple linked-list chaining of nmethods:
126 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head 136 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
127 nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods 137
138 union {
139 // Used by G1 to chain nmethods.
140 nmethod* _unloading_next;
141 // Used by non-G1 GCs to chain nmethods.
142 nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
143 };
128 144
129 static nmethod* volatile _oops_do_mark_nmethods; 145 static nmethod* volatile _oops_do_mark_nmethods;
130 nmethod* volatile _oops_do_mark_link; 146 nmethod* volatile _oops_do_mark_link;
131 147
132 AbstractCompiler* _compiler; // The compiler which compiled this nmethod 148 AbstractCompiler* _compiler; // The compiler which compiled this nmethod
183 unsigned int _lazy_critical_native:1; // Lazy JNI critical native 199 unsigned int _lazy_critical_native:1; // Lazy JNI critical native
184 unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints 200 unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
185 201
186 // Protected by Patching_lock 202 // Protected by Patching_lock
187 volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded} 203 volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded}
204
205 volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod
188 206
189 #ifdef ASSERT 207 #ifdef ASSERT
190 bool _oops_are_stale; // indicates that it's no longer safe to access oops section 208 bool _oops_are_stale; // indicates that it's no longer safe to access oops section
191 #endif 209 #endif
192 210
445 463
446 // Make the nmethod non entrant. The nmethod will continue to be 464 // Make the nmethod non entrant. The nmethod will continue to be
447 // alive. It is used when an uncommon trap happens. Returns true 465 // alive. It is used when an uncommon trap happens. Returns true
448 // if this thread changed the state of the nmethod or false if 466 // if this thread changed the state of the nmethod or false if
449 // another thread performed the transition. 467 // another thread performed the transition.
450 bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); } 468 bool make_not_entrant() {
469 assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant");
470 return make_not_entrant_or_zombie(not_entrant);
471 }
451 bool make_zombie() { return make_not_entrant_or_zombie(zombie); } 472 bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
452 473
453 // used by jvmti to track if the unload event has been reported 474 // used by jvmti to track if the unload event has been reported
454 bool unload_reported() { return _unload_reported; } 475 bool unload_reported() { return _unload_reported; }
455 void set_unload_reported() { _unload_reported = true; } 476 void set_unload_reported() { _unload_reported = true; }
477
478 void set_unloading_next(nmethod* next) { _unloading_next = next; }
479 nmethod* unloading_next() { return _unloading_next; }
480
481 static unsigned char global_unloading_clock() { return _global_unloading_clock; }
482 static void increase_unloading_clock();
483
484 void set_unloading_clock(unsigned char unloading_clock);
485 unsigned char unloading_clock();
456 486
457 bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; } 487 bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; }
458 void mark_for_deoptimization() { _marked_for_deoptimization = true; } 488 void mark_for_deoptimization() { _marked_for_deoptimization = true; }
459 489
460 void make_unloaded(BoolObjectClosure* is_alive, oop cause); 490 void make_unloaded(BoolObjectClosure* is_alive, oop cause);
544 // Exception cache support 574 // Exception cache support
545 ExceptionCache* exception_cache() const { return _exception_cache; } 575 ExceptionCache* exception_cache() const { return _exception_cache; }
546 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } 576 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
547 address handler_for_exception_and_pc(Handle exception, address pc); 577 address handler_for_exception_and_pc(Handle exception, address pc);
548 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); 578 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
549 void remove_from_exception_cache(ExceptionCache* ec); 579 void clean_exception_cache(BoolObjectClosure* is_alive);
550 580
551 // implicit exceptions support 581 // implicit exceptions support
552 address continuation_for_implicit_exception(address pc); 582 address continuation_for_implicit_exception(address pc);
553 583
554 // On-stack replacement support 584 // On-stack replacement support
567 void cleanup_inline_caches(); 597 void cleanup_inline_caches();
568 bool inlinecache_check_contains(address addr) const { 598 bool inlinecache_check_contains(address addr) const {
569 return (addr >= code_begin() && addr < verified_entry_point()); 599 return (addr >= code_begin() && addr < verified_entry_point());
570 } 600 }
571 601
602 // Verify calls to dead methods have been cleaned.
603 void verify_clean_inline_caches();
604 // Verify and count cached icholder relocations.
605 int verify_icholder_relocations();
572 // Check that all metadata is still alive 606 // Check that all metadata is still alive
573 void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive); 607 void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
574 608
575 // unlink and deallocate this nmethod 609 // unlink and deallocate this nmethod
576 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not 610 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
599 void set_speculation_log(oop speculation_log) { _speculation_log = speculation_log; } 633 void set_speculation_log(oop speculation_log) { _speculation_log = speculation_log; }
600 #endif 634 #endif
601 635
602 // GC support 636 // GC support
603 void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred); 637 void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
638 // The parallel versions are used by G1.
639 bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
640 void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
641
642 private:
643 // Unload a nmethod if the *root object is dead.
604 bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred); 644 bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
605 645 bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
646
647 void mark_metadata_on_stack_at(RelocIterator* iter_at_metadata);
648 void mark_metadata_on_stack_non_relocs();
649
650 public:
606 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, 651 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
607 OopClosure* f); 652 OopClosure* f);
608 void oops_do(OopClosure* f) { oops_do(f, false); } 653 void oops_do(OopClosure* f) { oops_do(f, false); }
609 void oops_do(OopClosure* f, bool allow_zombie); 654 void oops_do(OopClosure* f, bool allow_zombie);
610 bool detect_scavenge_root_oops(); 655 bool detect_scavenge_root_oops();