# HG changeset patch # User kamg # Date 1295473913 28800 # Node ID 9afee0b9fc1d24bd538949a8027deb5e71fb26d4 # Parent 2f33b03bd915727739b63b6ca72955fcb7902797 7012505: BreakpointWithFullGC.sh fails with Internal Error (src/share/vm/oops/methodOop.cpp:220) Summary: Rebuild breakpoint cache at gc_epilogue instead of during oops_do Reviewed-by: dcubed, ysr, coleenp diff -r 2f33b03bd915 -r 9afee0b9fc1d src/share/vm/ci/ciEnv.cpp --- a/src/share/vm/ci/ciEnv.cpp Wed Jan 19 08:16:45 2011 -0800 +++ b/src/share/vm/ci/ciEnv.cpp Wed Jan 19 13:51:53 2011 -0800 @@ -409,15 +409,15 @@ } else { fail_type = _unloaded_ciinstance_klass; } - klassOop found_klass; + KlassHandle found_klass; if (!require_local) { - found_klass = - SystemDictionary::find_constrained_instance_or_array_klass(sym, loader, - KILL_COMPILE_ON_FATAL_(fail_type)); + klassOop kls = SystemDictionary::find_constrained_instance_or_array_klass( + sym, loader, KILL_COMPILE_ON_FATAL_(fail_type)); + found_klass = KlassHandle(THREAD, kls); } else { - found_klass = - SystemDictionary::find_instance_or_array_klass(sym, loader, domain, - KILL_COMPILE_ON_FATAL_(fail_type)); + klassOop kls = SystemDictionary::find_instance_or_array_klass( + sym, loader, domain, KILL_COMPILE_ON_FATAL_(fail_type)); + found_klass = KlassHandle(THREAD, kls); } // If we fail to find an array klass, look again for its element type. @@ -444,9 +444,9 @@ } } - if (found_klass != NULL) { + if (found_klass() != NULL) { // Found it. Build a CI handle. - return get_object(found_klass)->as_klass(); + return get_object(found_klass())->as_klass(); } if (require_local) return NULL; diff -r 2f33b03bd915 -r 9afee0b9fc1d src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Jan 19 08:16:45 2011 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Jan 19 13:51:53 2011 -0800 @@ -4979,6 +4979,7 @@ if (should_unload_classes()) { CodeCache::gc_epilogue(); } + JvmtiExport::gc_epilogue(); // If we encountered any (marking stack / work queue) overflow // events during the current CMS cycle, take appropriate diff -r 2f33b03bd915 -r 9afee0b9fc1d src/share/vm/gc_implementation/g1/g1MarkSweep.cpp --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Wed Jan 19 08:16:45 2011 -0800 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Wed Jan 19 13:51:53 2011 -0800 @@ -113,6 +113,7 @@ Threads::gc_epilogue(); CodeCache::gc_epilogue(); + JvmtiExport::gc_epilogue(); // refs processing: clean slate GenMarkSweep::_ref_processor = NULL; diff -r 2f33b03bd915 -r 9afee0b9fc1d src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Wed Jan 19 08:16:45 2011 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Wed Jan 19 13:51:53 2011 -0800 @@ -258,6 +258,7 @@ BiasedLocking::restore_marks(); Threads::gc_epilogue(); CodeCache::gc_epilogue(); + JvmtiExport::gc_epilogue(); COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); diff -r 2f33b03bd915 -r 9afee0b9fc1d src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Wed Jan 19 08:16:45 2011 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Wed Jan 19 13:51:53 2011 -0800 @@ -1054,6 +1054,7 @@ Threads::gc_epilogue(); CodeCache::gc_epilogue(); + JvmtiExport::gc_epilogue(); COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); diff -r 2f33b03bd915 -r 9afee0b9fc1d src/share/vm/memory/genMarkSweep.cpp --- a/src/share/vm/memory/genMarkSweep.cpp Wed Jan 19 08:16:45 2011 -0800 +++ b/src/share/vm/memory/genMarkSweep.cpp Wed Jan 19 13:51:53 2011 -0800 @@ -158,6 +158,7 @@ Threads::gc_epilogue(); CodeCache::gc_epilogue(); + JvmtiExport::gc_epilogue(); if (PrintGC && !PrintGCDetails) { gch->print_heap_change(gch_prev_used); diff -r 2f33b03bd915 -r 9afee0b9fc1d src/share/vm/prims/jvmtiExport.cpp --- a/src/share/vm/prims/jvmtiExport.cpp Wed Jan 19 08:16:45 2011 -0800 +++ b/src/share/vm/prims/jvmtiExport.cpp Wed Jan 19 13:51:53 2011 -0800 @@ -2268,6 +2268,14 @@ JvmtiVMObjectAllocEventCollector::oops_do_for_all_threads(f); } +void JvmtiExport::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { + JvmtiTagMap::weak_oops_do(is_alive, f); +} + +void JvmtiExport::gc_epilogue() { + JvmtiCurrentBreakpoints::gc_epilogue(); +} + // Onload raw monitor transition. void JvmtiExport::transition_pending_onload_raw_monitors() { JvmtiPendingMonitors::transition_raw_monitors(); diff -r 2f33b03bd915 -r 9afee0b9fc1d src/share/vm/prims/jvmtiExport.hpp --- a/src/share/vm/prims/jvmtiExport.hpp Wed Jan 19 08:16:45 2011 -0800 +++ b/src/share/vm/prims/jvmtiExport.hpp Wed Jan 19 13:51:53 2011 -0800 @@ -346,6 +346,8 @@ static void cleanup_thread (JavaThread* thread) KERNEL_RETURN; static void oops_do(OopClosure* f) KERNEL_RETURN; + static void weak_oops_do(BoolObjectClosure* b, OopClosure* f) KERNEL_RETURN; + static void gc_epilogue() KERNEL_RETURN; static void transition_pending_onload_raw_monitors() KERNEL_RETURN; diff -r 2f33b03bd915 -r 9afee0b9fc1d src/share/vm/prims/jvmtiImpl.cpp --- a/src/share/vm/prims/jvmtiImpl.cpp Wed Jan 19 08:16:45 2011 -0800 +++ b/src/share/vm/prims/jvmtiImpl.cpp Wed Jan 19 13:51:53 2011 -0800 @@ -212,7 +212,13 @@ for (int i=0; iat(i); e->oops_do(f); - _cache[i] = e->getCacheValue(); + } +} + +void GrowableCache::gc_epilogue() { + int len = _elements->length(); + for (int i=0; iat(i)->getCacheValue(); } } @@ -394,6 +400,10 @@ _bps.oops_do(f); } +void JvmtiBreakpoints::gc_epilogue() { + _bps.gc_epilogue(); +} + void JvmtiBreakpoints::print() { #ifndef PRODUCT ResourceMark rm; @@ -523,6 +533,12 @@ } } +void JvmtiCurrentBreakpoints::gc_epilogue() { + if (_jvmti_breakpoints != NULL) { + _jvmti_breakpoints->gc_epilogue(); + } +} + /////////////////////////////////////////////////////////////// // // class VM_GetOrSetLocal diff -r 2f33b03bd915 -r 9afee0b9fc1d src/share/vm/prims/jvmtiImpl.hpp --- a/src/share/vm/prims/jvmtiImpl.hpp Wed Jan 19 08:16:45 2011 -0800 +++ b/src/share/vm/prims/jvmtiImpl.hpp Wed Jan 19 13:51:53 2011 -0800 @@ -117,6 +117,8 @@ void clear(); // apply f to every element and update the cache void oops_do(OopClosure* f); + // update the cache after a full gc + void gc_epilogue(); }; @@ -148,6 +150,7 @@ void remove (int index) { _cache.remove(index); } void clear() { _cache.clear(); } void oops_do(OopClosure* f) { _cache.oops_do(f); } + void gc_epilogue() { _cache.gc_epilogue(); } }; @@ -282,6 +285,7 @@ int clear(JvmtiBreakpoint& bp); void clearall_in_class_at_safepoint(klassOop klass); void clearall(); + void gc_epilogue(); }; @@ -325,6 +329,7 @@ static inline bool is_breakpoint(address bcp); static void oops_do(OopClosure* f); + static void gc_epilogue(); }; // quickly test whether the bcp matches a cached breakpoint in the list diff -r 2f33b03bd915 -r 9afee0b9fc1d src/share/vm/runtime/jniHandles.cpp --- a/src/share/vm/runtime/jniHandles.cpp Wed Jan 19 08:16:45 2011 -0800 +++ b/src/share/vm/runtime/jniHandles.cpp Wed Jan 19 13:51:53 2011 -0800 @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" #include "oops/oop.inline.hpp" -#include "prims/jvmtiTagMap.hpp" +#include "prims/jvmtiExport.hpp" #include "runtime/jniHandles.hpp" #include "runtime/mutexLocker.hpp" #ifdef TARGET_OS_FAMILY_linux @@ -431,10 +431,10 @@ } /* - * JvmtiTagMap may also contain weak oops. The iteration of it is placed - * here so that we don't need to add it to each of the collectors. + * JVMTI data structures may also contain weak oops. The iteration of them + * is placed here so that we don't need to add it to each of the collectors. */ - JvmtiTagMap::weak_oops_do(is_alive, f); + JvmtiExport::weak_oops_do(is_alive, f); }