comparison src/share/vm/code/nmethod.hpp @ 2491:0654ee04b214

Merge with OpenJDK.
author Thomas Wuerthinger <thomas.wuerthinger@oracle.com>
date Fri, 22 Apr 2011 15:30:53 +0200
parents 0cd39a385a72 3d58a4983660
children 75a99b4f1c98
comparison
equal deleted inserted replaced
2490:29246b1d2d3c 2491:0654ee04b214
32 // exception/pc/handler information. 32 // exception/pc/handler information.
33 33
34 class ExceptionCache : public CHeapObj { 34 class ExceptionCache : public CHeapObj {
35 friend class VMStructs; 35 friend class VMStructs;
36 private: 36 private:
37 static address _unwind_handler;
38 enum { cache_size = 16 }; 37 enum { cache_size = 16 };
39 klassOop _exception_type; 38 klassOop _exception_type;
40 address _pc[cache_size]; 39 address _pc[cache_size];
41 address _handler[cache_size]; 40 address _handler[cache_size];
42 int _count; 41 int _count;
60 59
61 address match(Handle exception, address pc); 60 address match(Handle exception, address pc);
62 bool match_exception_with_space(Handle exception) ; 61 bool match_exception_with_space(Handle exception) ;
63 address test_address(address addr); 62 address test_address(address addr);
64 bool add_address_and_handler(address addr, address handler) ; 63 bool add_address_and_handler(address addr, address handler) ;
65
66 static address unwind_handler() { return _unwind_handler; }
67 }; 64 };
68 65
69 66
70 // cache pc descs found in earlier inquiries 67 // cache pc descs found in earlier inquiries
71 class PcDescCache VALUE_OBJ_CLASS_SPEC { 68 class PcDescCache VALUE_OBJ_CLASS_SPEC {
72 friend class VMStructs; 69 friend class VMStructs;
73 private: 70 private:
74 enum { cache_size = 4 }; 71 enum { cache_size = 4 };
75 PcDesc* _last_pc_desc; // most recent pc_desc found
76 PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found 72 PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found
77 public: 73 public:
78 PcDescCache() { debug_only(_last_pc_desc = NULL); } 74 PcDescCache() { debug_only(_pc_descs[0] = NULL); }
79 void reset_to(PcDesc* initial_pc_desc); 75 void reset_to(PcDesc* initial_pc_desc);
80 PcDesc* find_pc_desc(int pc_offset, bool approximate); 76 PcDesc* find_pc_desc(int pc_offset, bool approximate);
81 void add_pc_desc(PcDesc* pc_desc); 77 void add_pc_desc(PcDesc* pc_desc);
82 PcDesc* last_pc_desc() { return _last_pc_desc; } 78 PcDesc* last_pc_desc() { return _pc_descs[0]; }
83 }; 79 };
84 80
85 81
86 // nmethods (native methods) are the compiled code versions of Java methods. 82 // nmethods (native methods) are the compiled code versions of Java methods.
87 // 83 //
179 // set during construction 175 // set during construction
180 unsigned int _has_unsafe_access:1; // May fault due to unsafe access. 176 unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
181 unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes? 177 unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
182 178
183 // Protected by Patching_lock 179 // Protected by Patching_lock
184 unsigned char _state; // {alive, not_entrant, zombie, unloaded) 180 unsigned char _state; // {alive, not_entrant, zombie, unloaded}
185 181
186 #ifdef ASSERT 182 #ifdef ASSERT
187 bool _oops_are_stale; // indicates that it's no longer safe to access oops section 183 bool _oops_are_stale; // indicates that it's no longer safe to access oops section
188 #endif 184 #endif
189 185
195 191
196 jbyte _scavenge_root_state; 192 jbyte _scavenge_root_state;
197 193
198 NOT_PRODUCT(bool _has_debug_info; ) 194 NOT_PRODUCT(bool _has_debug_info; )
199 195
200 // Nmethod Flushing lock (if non-zero, then the nmethod is not removed) 196 // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
197 // and is not made into a zombie. However, once the nmethod is made into
198 // a zombie, it will be locked one final time if CompiledMethodUnload
199 // event processing needs to be done.
201 jint _lock_count; 200 jint _lock_count;
202 201
203 // not_entrant method removal. Each mark_sweep pass will update 202 // not_entrant method removal. Each mark_sweep pass will update
204 // this mark to current sweep invocation count if it is seen on the 203 // this mark to current sweep invocation count if it is seen on the
205 // stack. An not_entrant method can be removed when there is no 204 // stack. An not_entrant method can be removed when there is no
228 friend class nmethodLocker; 227 friend class nmethodLocker;
229 228
230 // For native wrappers 229 // For native wrappers
231 nmethod(methodOop method, 230 nmethod(methodOop method,
232 int nmethod_size, 231 int nmethod_size,
232 int compile_id,
233 CodeOffsets* offsets, 233 CodeOffsets* offsets,
234 CodeBuffer *code_buffer, 234 CodeBuffer *code_buffer,
235 int frame_size, 235 int frame_size,
236 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */ 236 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
237 ByteSize basic_lock_sp_offset, /* synchronized natives only */ 237 ByteSize basic_lock_sp_offset, /* synchronized natives only */
298 ImplicitExceptionTable* nul_chk_table, 298 ImplicitExceptionTable* nul_chk_table,
299 AbstractCompiler* compiler, 299 AbstractCompiler* compiler,
300 int comp_level); 300 int comp_level);
301 301
302 static nmethod* new_native_nmethod(methodHandle method, 302 static nmethod* new_native_nmethod(methodHandle method,
303 int compile_id,
303 CodeBuffer *code_buffer, 304 CodeBuffer *code_buffer,
304 int vep_offset, 305 int vep_offset,
305 int frame_complete, 306 int frame_complete,
306 int frame_size, 307 int frame_size,
307 ByteSize receiver_sp_offset, 308 ByteSize receiver_sp_offset,
458 inline void initialize_immediate_oop(oop* dest, jobject handle); 459 inline void initialize_immediate_oop(oop* dest, jobject handle);
459 460
460 public: 461 public:
461 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } 462 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
462 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); } 463 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
464 void verify_oop_relocations();
463 465
464 bool is_at_poll_return(address pc); 466 bool is_at_poll_return(address pc);
465 bool is_at_poll_or_poll_return(address pc); 467 bool is_at_poll_or_poll_return(address pc);
466 468
467 // Non-perm oop support 469 // Non-perm oop support
498 500
499 // implicit exceptions support 501 // implicit exceptions support
500 address continuation_for_implicit_exception(address pc); 502 address continuation_for_implicit_exception(address pc);
501 503
502 // On-stack replacement support 504 // On-stack replacement support
503 int osr_entry_bci() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _entry_bci; } 505 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
504 address osr_entry() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _osr_entry_point; } 506 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
505 void invalidate_osr_method(); 507 void invalidate_osr_method();
506 nmethod* osr_link() const { return _osr_link; } 508 nmethod* osr_link() const { return _osr_link; }
507 void set_osr_link(nmethod *n) { _osr_link = n; } 509 void set_osr_link(nmethod *n) { _osr_link = n; }
508 510
509 // tells whether frames described by this nmethod can be deoptimized 511 // tells whether frames described by this nmethod can be deoptimized
523 525
524 protected: 526 protected:
525 void flush(); 527 void flush();
526 528
527 public: 529 public:
528 // If returning true, it is unsafe to remove this nmethod even though it is a zombie 530 // When true is returned, it is unsafe to remove this nmethod even if
529 // nmethod, since the VM might have a reference to it. Should only be called from a safepoint. 531 // it is a zombie, since the VM or the ServiceThread might still be
532 // using it.
530 bool is_locked_by_vm() const { return _lock_count >0; } 533 bool is_locked_by_vm() const { return _lock_count >0; }
531 534
532 // See comment at definition of _last_seen_on_stack 535 // See comment at definition of _last_seen_on_stack
533 void mark_as_seen_on_stack(); 536 void mark_as_seen_on_stack();
534 bool can_not_entrant_be_converted(); 537 bool can_not_entrant_be_converted();
607 610
608 // verify operations 611 // verify operations
609 void verify(); 612 void verify();
610 void verify_scopes(); 613 void verify_scopes();
611 void verify_interrupt_point(address interrupt_point); 614 void verify_interrupt_point(address interrupt_point);
612
613 // print compilation helper
614 static void print_compilation(outputStream *st, const char *method_name, const char *title,
615 methodOop method, bool is_blocking, int compile_id, int bci, int comp_level);
616 615
617 // printing support 616 // printing support
618 void print() const; 617 void print() const;
619 void print_code(); 618 void print_code();
620 void print_relocations() PRODUCT_RETURN; 619 void print_relocations() PRODUCT_RETURN;
627 void print_nul_chk_table() PRODUCT_RETURN; 626 void print_nul_chk_table() PRODUCT_RETURN;
628 void print_nmethod(bool print_code); 627 void print_nmethod(bool print_code);
629 628
630 // need to re-define this from CodeBlob else the overload hides it 629 // need to re-define this from CodeBlob else the overload hides it
631 virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); } 630 virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); }
632 void print_on(outputStream* st, const char* title) const; 631 void print_on(outputStream* st, const char* msg) const;
633 632
634 // Logging 633 // Logging
635 void log_identity(xmlStream* log) const; 634 void log_identity(xmlStream* log) const;
636 void log_new_nmethod() const; 635 void log_new_nmethod() const;
637 void log_state_change() const; 636 void log_state_change() const;
693 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); } 692 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); }
694 static int entry_bci_offset() { return offset_of(nmethod, _entry_bci); } 693 static int entry_bci_offset() { return offset_of(nmethod, _entry_bci); }
695 694
696 }; 695 };
697 696
698 // Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method 697 // Locks an nmethod so its code will not get removed and it will not
698 // be made into a zombie, even if it is a not_entrant method. After the
699 // nmethod becomes a zombie, if CompiledMethodUnload event processing
700 // needs to be done, then lock_nmethod() is used directly to keep the
701 // generated code from being reused too early.
699 class nmethodLocker : public StackObj { 702 class nmethodLocker : public StackObj {
700 nmethod* _nm; 703 nmethod* _nm;
701 704
702 public: 705 public:
703 706
704 static void lock_nmethod(nmethod* nm); // note: nm can be NULL 707 // note: nm can be NULL
708 // Only JvmtiDeferredEvent::compiled_method_unload_event()
709 // should pass zombie_ok == true.
710 static void lock_nmethod(nmethod* nm, bool zombie_ok = false);
705 static void unlock_nmethod(nmethod* nm); // (ditto) 711 static void unlock_nmethod(nmethod* nm); // (ditto)
706 712
707 nmethodLocker(address pc); // derive nm from pc 713 nmethodLocker(address pc); // derive nm from pc
708 nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); } 714 nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
709 nmethodLocker() { _nm = NULL; } 715 nmethodLocker() { _nm = NULL; }