comparison src/share/vm/code/nmethod.hpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children c7c777385a15
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 // This class is used internally by nmethods, to cache
26 // exception/pc/handler information.
27
28 class ExceptionCache : public CHeapObj {
29 friend class VMStructs;
30 private:
31 static address _unwind_handler;
32 enum { cache_size = 16 };
33 klassOop _exception_type;
34 address _pc[cache_size];
35 address _handler[cache_size];
36 int _count;
37 ExceptionCache* _next;
38
39 address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
40 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
41 address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
42 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
43 int count() { return _count; }
44 void increment_count() { _count++; }
45
46 public:
47
48 ExceptionCache(Handle exception, address pc, address handler);
49
50 klassOop exception_type() { return _exception_type; }
51 klassOop* exception_type_addr() { return &_exception_type; }
52 ExceptionCache* next() { return _next; }
53 void set_next(ExceptionCache *ec) { _next = ec; }
54
55 address match(Handle exception, address pc);
56 bool match_exception_with_space(Handle exception) ;
57 address test_address(address addr);
58 bool add_address_and_handler(address addr, address handler) ;
59
60 static address unwind_handler() { return _unwind_handler; }
61 };
62
63
64 // cache pc descs found in earlier inquiries
65 class PcDescCache VALUE_OBJ_CLASS_SPEC {
66 friend class VMStructs;
67 private:
68 enum { cache_size = 4 };
69 PcDesc* _last_pc_desc; // most recent pc_desc found
70 PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found
71 public:
72 PcDescCache() { debug_only(_last_pc_desc = NULL); }
73 void reset_to(PcDesc* initial_pc_desc);
74 PcDesc* find_pc_desc(int pc_offset, bool approximate);
75 void add_pc_desc(PcDesc* pc_desc);
76 PcDesc* last_pc_desc() { return _last_pc_desc; }
77 };
78
79
80 // nmethods (native methods) are the compiled code versions of Java methods.
81
82 struct nmFlags {
83 friend class VMStructs;
84 unsigned int version:8; // version number (0 = first version)
85 unsigned int level:4; // optimization level
86 unsigned int age:4; // age (in # of sweep steps)
87
88 unsigned int state:2; // {alive, zombie, unloaded)
89
90 unsigned int isUncommonRecompiled:1; // recompiled because of uncommon trap?
91 unsigned int isToBeRecompiled:1; // to be recompiled as soon as it matures
92 unsigned int hasFlushedDependencies:1; // Used for maintenance of dependencies
93 unsigned int markedForReclamation:1; // Used by NMethodSweeper
94
95 unsigned int has_unsafe_access:1; // May fault due to unsafe access.
96
97 void clear();
98 };
99
100
101 // A nmethod contains:
102 // - header (the nmethod structure)
103 // [Relocation]
104 // - relocation information
105 // - constant part (doubles, longs and floats used in nmethod)
106 // [Code]
107 // - code body
108 // - exception handler
109 // - stub code
110 // [Debugging information]
111 // - oop array
112 // - data array
113 // - pcs
114 // [Exception handler table]
115 // - handler entry point array
116 // [Implicit Null Pointer exception table]
117 // - implicit null table array
118
119 class Dependencies;
120 class ExceptionHandlerTable;
121 class ImplicitExceptionTable;
122 class AbstractCompiler;
123 class xmlStream;
124
125 class nmethod : public CodeBlob {
126 friend class VMStructs;
127 friend class NMethodSweeper;
128 private:
129 // Shared fields for all nmethod's
130 static int _zombie_instruction_size;
131
132 methodOop _method;
133 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
134
135 nmethod* _link; // To support simple linked-list chaining of nmethods
136
137 AbstractCompiler* _compiler; // The compiler which compiled this nmethod
138
139 // Offsets for different nmethod parts
140 int _exception_offset;
141 // All deoptee's will resume execution at this location described by this offset
142 int _deoptimize_offset;
143 int _stub_offset;
144 int _consts_offset;
145 int _scopes_data_offset;
146 int _scopes_pcs_offset;
147 int _dependencies_offset;
148 int _handler_table_offset;
149 int _nul_chk_table_offset;
150 int _nmethod_end_offset;
151
152 // location in frame (offset for sp) that deopt can store the original
153 // pc during a deopt.
154 int _orig_pc_offset;
155
156 int _compile_id; // which compilation made this nmethod
157 int _comp_level; // compilation level
158
159 // offsets for entry points
160 address _entry_point; // entry point with class check
161 address _verified_entry_point; // entry point without class check
162 address _osr_entry_point; // entry point for on stack replacement
163
164 nmFlags flags; // various flags to keep track of nmethod state
165 bool _markedForDeoptimization; // Used for stack deoptimization
166 enum { alive = 0,
167 not_entrant = 1, // uncommon trap has happend but activations may still exist
168 zombie = 2,
169 unloaded = 3 };
170
171 // used by jvmti to track if an unload event has been posted for this nmethod.
172 bool _unload_reported;
173
174 NOT_PRODUCT(bool _has_debug_info; )
175
176 // Nmethod Flushing lock (if non-zero, then the nmethod is not removed)
177 jint _lock_count;
178
179 // not_entrant method removal. Each mark_sweep pass will update
180 // this mark to current sweep invocation count if it is seen on the
181 // stack. An not_entrant method can be removed when there is no
182 // more activations, i.e., when the _stack_traversal_mark is less than
183 // current sweep traversal index.
184 long _stack_traversal_mark;
185
186 ExceptionCache *_exception_cache;
187 PcDescCache _pc_desc_cache;
188
189 // These are only used for compiled synchronized native methods to
190 // locate the owner and stack slot for the BasicLock so that we can
191 // properly revoke the bias of the owner if necessary. They are
192 // needed because there is no debug information for compiled native
193 // wrappers and the oop maps are insufficient to allow
194 // frame::retrieve_receiver() to work. Currently they are expected
195 // to be byte offsets from the Java stack pointer for maximum code
196 // sharing between platforms. Note that currently biased locking
197 // will never cause Class instances to be biased but this code
198 // handles the static synchronized case as well.
199 ByteSize _compiled_synchronized_native_basic_lock_owner_sp_offset;
200 ByteSize _compiled_synchronized_native_basic_lock_sp_offset;
201
202 friend class nmethodLocker;
203
204 // For native wrappers
205 nmethod(methodOop method,
206 int nmethod_size,
207 CodeOffsets* offsets,
208 CodeBuffer *code_buffer,
209 int frame_size,
210 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
211 ByteSize basic_lock_sp_offset, /* synchronized natives only */
212 OopMapSet* oop_maps);
213
214 // Creation support
215 nmethod(methodOop method,
216 int nmethod_size,
217 int compile_id,
218 int entry_bci,
219 CodeOffsets* offsets,
220 int orig_pc_offset,
221 DebugInformationRecorder *recorder,
222 Dependencies* dependencies,
223 CodeBuffer *code_buffer,
224 int frame_size,
225 OopMapSet* oop_maps,
226 ExceptionHandlerTable* handler_table,
227 ImplicitExceptionTable* nul_chk_table,
228 AbstractCompiler* compiler,
229 int comp_level);
230
231 // helper methods
232 void* operator new(size_t size, int nmethod_size);
233 void check_store();
234
235 const char* reloc_string_for(u_char* begin, u_char* end);
236 void make_not_entrant_or_zombie(int state);
237 void inc_decompile_count();
238
239 // used to check that writes to nmFlags are done consistently.
240 static void check_safepoint() PRODUCT_RETURN;
241
242 // Used to manipulate the exception cache
243 void add_exception_cache_entry(ExceptionCache* new_entry);
244 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
245
246 // Inform external interfaces that a compiled method has been unloaded
247 inline void post_compiled_method_unload();
248
249 public:
250 // create nmethod with entry_bci
251 static nmethod* new_nmethod(methodHandle method,
252 int compile_id,
253 int entry_bci,
254 CodeOffsets* offsets,
255 int orig_pc_offset,
256 DebugInformationRecorder* recorder,
257 Dependencies* dependencies,
258 CodeBuffer *code_buffer,
259 int frame_size,
260 OopMapSet* oop_maps,
261 ExceptionHandlerTable* handler_table,
262 ImplicitExceptionTable* nul_chk_table,
263 AbstractCompiler* compiler,
264 int comp_level);
265
266 static nmethod* new_native_nmethod(methodHandle method,
267 CodeBuffer *code_buffer,
268 int vep_offset,
269 int frame_complete,
270 int frame_size,
271 ByteSize receiver_sp_offset,
272 ByteSize basic_lock_sp_offset,
273 OopMapSet* oop_maps);
274
275 // accessors
276 methodOop method() const { return _method; }
277 AbstractCompiler* compiler() const { return _compiler; }
278
279 #ifndef PRODUCT
280 bool has_debug_info() const { return _has_debug_info; }
281 void set_has_debug_info(bool f) { _has_debug_info = false; }
282 #endif // NOT PRODUCT
283
284 // type info
285 bool is_nmethod() const { return true; }
286 bool is_java_method() const { return !method()->is_native(); }
287 bool is_native_method() const { return method()->is_native(); }
288 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
289 bool is_osr_only_method() const { return is_osr_method(); }
290
291 bool is_compiled_by_c1() const;
292 bool is_compiled_by_c2() const;
293
294 // boundaries for different parts
295 address code_begin () const { return _entry_point; }
296 address code_end () const { return header_begin() + _stub_offset ; }
297 address exception_begin () const { return header_begin() + _exception_offset ; }
298 address deopt_handler_begin() const { return header_begin() + _deoptimize_offset ; }
299 address stub_begin () const { return header_begin() + _stub_offset ; }
300 address stub_end () const { return header_begin() + _consts_offset ; }
301 address consts_begin () const { return header_begin() + _consts_offset ; }
302 address consts_end () const { return header_begin() + _scopes_data_offset ; }
303 address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
304 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
305 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
306 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset); }
307 address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
308 address dependencies_end () const { return header_begin() + _handler_table_offset ; }
309 address handler_table_begin() const { return header_begin() + _handler_table_offset ; }
310 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
311 address nul_chk_table_begin() const { return header_begin() + _nul_chk_table_offset ; }
312 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
313
314 int code_size () const { return code_end () - code_begin (); }
315 int stub_size () const { return stub_end () - stub_begin (); }
316 int consts_size () const { return consts_end () - consts_begin (); }
317 int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
318 int scopes_pcs_size () const { return (intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin (); }
319 int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
320 int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
321 int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
322
323 int total_size () const;
324
325 bool code_contains (address addr) const { return code_begin () <= addr && addr < code_end (); }
326 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
327 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
328 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
329 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
330 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
331 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
332
333 // entry points
334 address entry_point() const { return _entry_point; } // normal entry point
335 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
336
337 // flag accessing and manipulation
338 bool is_in_use() const { return flags.state == alive; }
339 bool is_alive() const { return flags.state == alive || flags.state == not_entrant; }
340 bool is_not_entrant() const { return flags.state == not_entrant; }
341 bool is_zombie() const { return flags.state == zombie; }
342 bool is_unloaded() const { return flags.state == unloaded; }
343
344 // Make the nmethod non entrant. The nmethod will continue to be alive.
345 // It is used when an uncommon trap happens.
346 void make_not_entrant() { make_not_entrant_or_zombie(not_entrant); }
347 void make_zombie() { make_not_entrant_or_zombie(zombie); }
348
349 // used by jvmti to track if the unload event has been reported
350 bool unload_reported() { return _unload_reported; }
351 void set_unload_reported() { _unload_reported = true; }
352
353 bool is_marked_for_deoptimization() const { return _markedForDeoptimization; }
354 void mark_for_deoptimization() { _markedForDeoptimization = true; }
355
356 void make_unloaded(BoolObjectClosure* is_alive, oop cause);
357
358 bool has_dependencies() { return dependencies_size() != 0; }
359 void flush_dependencies(BoolObjectClosure* is_alive);
360 bool has_flushed_dependencies() { return flags.hasFlushedDependencies; }
361 void set_has_flushed_dependencies() {
362 check_safepoint();
363 assert(!has_flushed_dependencies(), "should only happen once");
364 flags.hasFlushedDependencies = 1;
365 }
366
367 bool is_marked_for_reclamation() const { return flags.markedForReclamation; }
368 void mark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 1; }
369 void unmark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 0; }
370
371 bool has_unsafe_access() const { return flags.has_unsafe_access; }
372 void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; }
373
374 int level() const { return flags.level; }
375 void set_level(int newLevel) { check_safepoint(); flags.level = newLevel; }
376
377 int comp_level() const { return _comp_level; }
378
379 int version() const { return flags.version; }
380 void set_version(int v);
381
382 // Sweeper support
383 long stack_traversal_mark() { return _stack_traversal_mark; }
384 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
385
386 // Exception cache support
387 ExceptionCache* exception_cache() const { return _exception_cache; }
388 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
389 address handler_for_exception_and_pc(Handle exception, address pc);
390 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
391 void remove_from_exception_cache(ExceptionCache* ec);
392
393 // implicit exceptions support
394 address continuation_for_implicit_exception(address pc);
395
396 // On-stack replacement support
397 int osr_entry_bci() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _entry_bci; }
398 address osr_entry() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _osr_entry_point; }
399 void invalidate_osr_method();
400 nmethod* link() const { return _link; }
401 void set_link(nmethod *n) { _link = n; }
402
403 // tells whether frames described by this nmethod can be deoptimized
404 // note: native wrappers cannot be deoptimized.
405 bool can_be_deoptimized() const { return is_java_method(); }
406
407 // Inline cache support
408 void clear_inline_caches();
409 void cleanup_inline_caches();
410 bool inlinecache_check_contains(address addr) const {
411 return (addr >= instructions_begin() && addr < verified_entry_point());
412 }
413
414 // unlink and deallocate this nmethod
415 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
416 // expected to use any other private methods/data in this class.
417
418 protected:
419 void flush();
420
421 public:
422 // If returning true, it is unsafe to remove this nmethod even though it is a zombie
423 // nmethod, since the VM might have a reference to it. Should only be called from a safepoint.
424 bool is_locked_by_vm() const { return _lock_count >0; }
425
426 // See comment at definition of _last_seen_on_stack
427 void mark_as_seen_on_stack();
428 bool can_not_entrant_be_converted();
429
430 // Evolution support. We make old (discarded) compiled methods point to new methodOops.
431 void set_method(methodOop method) { _method = method; }
432
433 // GC support
434 void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive,
435 bool unloading_occurred);
436 bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive,
437 oop* root, bool unloading_occurred);
438
439 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
440 OopClosure* f);
441 void oops_do(OopClosure* f);
442
443 // ScopeDesc for an instruction
444 ScopeDesc* scope_desc_at(address pc);
445
446 private:
447 ScopeDesc* scope_desc_in(address begin, address end);
448
449 address* orig_pc_addr(const frame* fr ) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
450
451 PcDesc* find_pc_desc_internal(address pc, bool approximate);
452
453 PcDesc* find_pc_desc(address pc, bool approximate) {
454 PcDesc* desc = _pc_desc_cache.last_pc_desc();
455 if (desc != NULL && desc->pc_offset() == pc - instructions_begin()) {
456 return desc;
457 }
458 return find_pc_desc_internal(pc, approximate);
459 }
460
461 public:
462 // ScopeDesc retrieval operation
463 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
464 // pc_desc_near returns the first PcDesc at or after the givne pc.
465 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
466
467 public:
468 // copying of debugging information
469 void copy_scopes_pcs(PcDesc* pcs, int count);
470 void copy_scopes_data(address buffer, int size);
471
472 // deopt
473 // return true is the pc is one would expect if the frame is being deopted.
474 bool is_deopt_pc(address pc);
475 // Accessor/mutator for the original pc of a frame before a frame was deopted.
476 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
477 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
478
479 // jvmti support:
480 void post_compiled_method_load_event();
481
482 // verify operations
483 void verify();
484 void verify_scopes();
485 void verify_interrupt_point(address interrupt_point);
486
487 // printing support
488 void print() const PRODUCT_RETURN;
489 void print_code() PRODUCT_RETURN;
490 void print_relocations() PRODUCT_RETURN;
491 void print_pcs() PRODUCT_RETURN;
492 void print_scopes() PRODUCT_RETURN;
493 void print_dependencies() PRODUCT_RETURN;
494 void print_value_on(outputStream* st) const PRODUCT_RETURN;
495 void print_calls(outputStream* st) PRODUCT_RETURN;
496 void print_handler_table() PRODUCT_RETURN;
497 void print_nul_chk_table() PRODUCT_RETURN;
498 void print_nmethod(bool print_code) PRODUCT_RETURN;
499
500 void print_on(outputStream* st, const char* title) const;
501
502 // Logging
503 void log_identity(xmlStream* log) const;
504 void log_new_nmethod() const;
505 void log_state_change(int state) const;
506
507 // Prints a comment for one native instruction (reloc info, pc desc)
508 void print_code_comment_on(outputStream* st, int column, address begin, address end) PRODUCT_RETURN;
509 static void print_statistics() PRODUCT_RETURN;
510
511 // Compiler task identification. Note that all OSR methods
512 // are numbered in an independent sequence if CICountOSR is true,
513 // and native method wrappers are also numbered independently if
514 // CICountNative is true.
515 int compile_id() const { return _compile_id; }
516 const char* compile_kind() const;
517
518 // For debugging
519 // CompiledIC* IC_at(char* p) const;
520 // PrimitiveIC* primitiveIC_at(char* p) const;
521 oop embeddedOop_at(address p);
522
523 // tells if any of this method's dependencies have been invalidated
524 // (this is expensive!)
525 bool check_all_dependencies();
526
527 // tells if this compiled method is dependent on the given changes,
528 // and the changes have invalidated it
529 bool check_dependency_on(DepChange& changes);
530
531 // Evolution support. Tells if this compiled method is dependent on any of
532 // methods m() of class dependee, such that if m() in dependee is replaced,
533 // this compiled method will have to be deoptimized.
534 bool is_evol_dependent_on(klassOop dependee);
535
536 // Fast breakpoint support. Tells if this compiled method is
537 // dependent on the given method. Returns true if this nmethod
538 // corresponds to the given method as well.
539 bool is_dependent_on_method(methodOop dependee);
540
541 // is it ok to patch at address?
542 bool is_patchable_at(address instr_address);
543
544 // UseBiasedLocking support
545 ByteSize compiled_synchronized_native_basic_lock_owner_sp_offset() {
546 return _compiled_synchronized_native_basic_lock_owner_sp_offset;
547 }
548 ByteSize compiled_synchronized_native_basic_lock_sp_offset() {
549 return _compiled_synchronized_native_basic_lock_sp_offset;
550 }
551
552 // support for code generation
553 static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); }
554 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); }
555 static int entry_bci_offset() { return offset_of(nmethod, _entry_bci); }
556
557 };
558
559 // Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method
560 class nmethodLocker : public StackObj {
561 nmethod* _nm;
562
563 static void lock_nmethod(nmethod* nm); // note: nm can be NULL
564 static void unlock_nmethod(nmethod* nm); // (ditto)
565
566 public:
567 nmethodLocker(address pc); // derive nm from pc
568 nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
569 nmethodLocker() { _nm = NULL; }
570 ~nmethodLocker() { unlock_nmethod(_nm); }
571
572 nmethod* code() { return _nm; }
573 void set_code(nmethod* new_nm) {
574 unlock_nmethod(_nm); // note: This works even if _nm==new_nm.
575 _nm = new_nm;
576 lock_nmethod(_nm);
577 }
578 };