# HG changeset patch # User ehelin # Date 1386861224 -3600 # Node ID e3995ab443937d1bd542c9f405a2f2ac6ea63163 # Parent 9fbabcbb875b1c14726c0bd2e0942ebf826b73cd# Parent fa76dce60db72ecbf37448126500302d1e98e36b Merge diff -r 9fbabcbb875b -r e3995ab44393 src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp --- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Tue Dec 10 16:18:26 2013 -0500 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Thu Dec 12 16:13:44 2013 +0100 @@ -27,6 +27,7 @@ #include "gc_implementation/g1/concurrentG1RefineThread.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1HotCardCache.hpp" +#include "runtime/java.hpp" ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) : _threads(NULL), _n_threads(0), @@ -62,6 +63,10 @@ for (int i = _n_threads - 1; i >= 0; i--) { ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i); assert(t != NULL, "Conc refine should have been created"); + if (t->osthread() == NULL) { + vm_shutdown_during_initialization("Could not create ConcurrentG1RefineThread"); + } + assert(t->cg1r() == this, "Conc refine thread should refer to this"); _threads[i] = t; next = t; diff -r 9fbabcbb875b -r e3995ab44393 src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Tue Dec 10 16:18:26 2013 -0500 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu Dec 12 16:13:44 2013 +0100 @@ -553,6 +553,9 @@ _cmThread = new ConcurrentMarkThread(this); assert(cmThread() != NULL, "CM Thread should have been created"); assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); + if (_cmThread->osthread() == NULL) { + vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); + } assert(CGC_lock != NULL, "Where's the CGC_lock?"); assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency"); diff -r 9fbabcbb875b -r e3995ab44393 src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Tue Dec 10 16:18:26 2013 -0500 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu Dec 12 16:13:44 2013 +0100 @@ -2433,20 +2433,6 @@ _gc_tracer.report_object_count_after_gc(is_alive_closure()); } -void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) { - ClassLoaderData* cld = klass->class_loader_data(); - // The actual processing of the klass is done when we - // traverse the list of Klasses in the class loader data. - PSParallelCompact::follow_class_loader(cm, cld); -} - -void PSParallelCompact::adjust_klass(ParCompactionManager* cm, Klass* klass) { - ClassLoaderData* cld = klass->class_loader_data(); - // The actual processing of the klass is done when we - // traverse the list of Klasses in the class loader data. - PSParallelCompact::adjust_class_loader(cm, cld); -} - void PSParallelCompact::follow_class_loader(ParCompactionManager* cm, ClassLoaderData* cld) { PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); @@ -2455,13 +2441,6 @@ cld->oops_do(&mark_and_push_closure, &follow_klass_closure, true); } -void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm, - ClassLoaderData* cld) { - cld->oops_do(PSParallelCompact::adjust_pointer_closure(), - PSParallelCompact::adjust_klass_closure(), - true); -} - // This should be moved to the shared markSweep code! class PSAlwaysTrueClosure: public BoolObjectClosure { public: diff -r 9fbabcbb875b -r e3995ab44393 src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Tue Dec 10 16:18:26 2013 -0500 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Thu Dec 12 16:13:44 2013 +0100 @@ -1200,13 +1200,10 @@ T* p); template static inline void adjust_pointer(T* p); - static void follow_klass(ParCompactionManager* cm, Klass* klass); - static void adjust_klass(ParCompactionManager* cm, Klass* klass); + static inline void follow_klass(ParCompactionManager* cm, Klass* klass); static void follow_class_loader(ParCompactionManager* cm, ClassLoaderData* klass); - static void adjust_class_loader(ParCompactionManager* cm, - ClassLoaderData* klass); // Compaction support. // Return true if p is in the range [beg_addr, end_addr). @@ -1380,6 +1377,11 @@ } } +inline void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) { + oop holder = klass->klass_holder(); + PSParallelCompact::mark_and_push(cm, &holder); +} + template inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) { mark_and_push(_compaction_manager, p); diff -r 9fbabcbb875b -r e3995ab44393 src/share/vm/memory/metaspace.cpp --- a/src/share/vm/memory/metaspace.cpp Tue Dec 10 16:18:26 2013 -0500 +++ b/src/share/vm/memory/metaspace.cpp Thu Dec 12 16:13:44 2013 +0100 @@ -287,7 +287,7 @@ VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } // Returns true if "word_size" is available in the VirtualSpace - bool is_available(size_t word_size) { return _top + word_size <= end(); } + bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); } MetaWord* top() const { return _top; } void inc_top(size_t word_size) { _top += word_size; } @@ -3641,10 +3641,82 @@ } } + +#define assert_is_available_positive(word_size) \ + assert(vsn.is_available(word_size), \ + err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \ + "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ + (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end())); + +#define assert_is_available_negative(word_size) \ + assert(!vsn.is_available(word_size), \ + err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \ + "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ + (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end())); + + static void test_is_available_positive() { + // Reserve some memory. + VirtualSpaceNode vsn(os::vm_allocation_granularity()); + assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); + + // Commit some memory. + size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; + bool expanded = vsn.expand_by(commit_word_size, commit_word_size); + assert(expanded, "Failed to commit"); + + // Check that is_available accepts the committed size. + assert_is_available_positive(commit_word_size); + + // Check that is_available accepts half the committed size. + size_t expand_word_size = commit_word_size / 2; + assert_is_available_positive(expand_word_size); + } + + static void test_is_available_negative() { + // Reserve some memory. + VirtualSpaceNode vsn(os::vm_allocation_granularity()); + assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); + + // Commit some memory. + size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; + bool expanded = vsn.expand_by(commit_word_size, commit_word_size); + assert(expanded, "Failed to commit"); + + // Check that is_available doesn't accept a too large size. + size_t two_times_commit_word_size = commit_word_size * 2; + assert_is_available_negative(two_times_commit_word_size); + } + + static void test_is_available_overflow() { + // Reserve some memory. + VirtualSpaceNode vsn(os::vm_allocation_granularity()); + assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); + + // Commit some memory. + size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; + bool expanded = vsn.expand_by(commit_word_size, commit_word_size); + assert(expanded, "Failed to commit"); + + // Calculate a size that will overflow the virtual space size. + void* virtual_space_max = (void*)(uintptr_t)-1; + size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1); + size_t overflow_size = bottom_to_max + BytesPerWord; + size_t overflow_word_size = overflow_size / BytesPerWord; + + // Check that is_available can handle the overflow. + assert_is_available_negative(overflow_word_size); + } + + static void test_is_available() { + TestVirtualSpaceNodeTest::test_is_available_positive(); + TestVirtualSpaceNodeTest::test_is_available_negative(); + TestVirtualSpaceNodeTest::test_is_available_overflow(); + } }; void TestVirtualSpaceNode_test() { TestVirtualSpaceNodeTest::test(); + TestVirtualSpaceNodeTest::test_is_available(); } #endif diff -r 9fbabcbb875b -r e3995ab44393 src/share/vm/oops/instanceClassLoaderKlass.cpp --- a/src/share/vm/oops/instanceClassLoaderKlass.cpp Tue Dec 10 16:18:26 2013 -0500 +++ b/src/share/vm/oops/instanceClassLoaderKlass.cpp Thu Dec 12 16:13:44 2013 +0100 @@ -150,10 +150,6 @@ int InstanceClassLoaderKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { InstanceKlass::oop_update_pointers(cm, obj); - ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj); - if (loader_data != NULL) { - PSParallelCompact::adjust_class_loader(cm, loader_data); - } return size_helper(); } #endif // INCLUDE_ALL_GCS diff -r 9fbabcbb875b -r e3995ab44393 src/share/vm/oops/instanceKlass.cpp --- a/src/share/vm/oops/instanceKlass.cpp Tue Dec 10 16:18:26 2013 -0500 +++ b/src/share/vm/oops/instanceKlass.cpp Thu Dec 12 16:13:44 2013 +0100 @@ -2199,7 +2199,6 @@ obj, \ PSParallelCompact::adjust_pointer(p), \ assert_is_in) - obj->update_header(cm); return size; } diff -r 9fbabcbb875b -r e3995ab44393 src/share/vm/oops/instanceMirrorKlass.cpp --- a/src/share/vm/oops/instanceMirrorKlass.cpp Tue Dec 10 16:18:26 2013 -0500 +++ b/src/share/vm/oops/instanceMirrorKlass.cpp Thu Dec 12 16:13:44 2013 +0100 @@ -155,8 +155,13 @@ // Follow the klass field in the mirror. Klass* klass = java_lang_Class::as_Klass(obj); if (klass != NULL) { - // For anonymous classes we need to handle the class loader data, - // otherwise it won't be claimed and can be unloaded. + // An anonymous class doesn't have its own class loader, so the call + // to follow_klass will mark and push its java mirror instead of the + // class loader. When handling the java mirror for an anonymous class + // we need to make sure its class loader data is claimed, this is done + // by calling follow_class_loader explicitly. For non-anonymous classes + // the call to follow_class_loader is made when the class loader itself + // is handled. if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) { MarkSweep::follow_class_loader(klass->class_loader_data()); } else { @@ -183,7 +188,18 @@ // Follow the klass field in the mirror. Klass* klass = java_lang_Class::as_Klass(obj); if (klass != NULL) { - PSParallelCompact::follow_klass(cm, klass); + // An anonymous class doesn't have its own class loader, so the call + // to follow_klass will mark and push its java mirror instead of the + // class loader. When handling the java mirror for an anonymous class + // we need to make sure its class loader data is claimed, this is done + // by calling follow_class_loader explicitly. For non-anonymous classes + // the call to follow_class_loader is made when the class loader itself + // is handled. + if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) { + PSParallelCompact::follow_class_loader(cm, klass->class_loader_data()); + } else { + PSParallelCompact::follow_klass(cm, klass); + } } else { // If klass is NULL then this a mirror for a primitive type. // We don't have to follow them, since they are handled as strong @@ -332,17 +348,6 @@ int size = oop_size(obj); InstanceKlass::oop_update_pointers(cm, obj); - // Follow the klass field in the mirror. - Klass* klass = java_lang_Class::as_Klass(obj); - if (klass != NULL) { - PSParallelCompact::adjust_klass(cm, klass); - } else { - // If klass is NULL then this a mirror for a primitive type. - // We don't have to follow them, since they are handled as strong - // roots in Universe::oops_do. - assert(java_lang_Class::is_primitive(obj), "Sanity check"); - } - InstanceMirrorKlass_OOP_ITERATE( \ start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj),\ PSParallelCompact::adjust_pointer(p), \ diff -r 9fbabcbb875b -r e3995ab44393 src/share/vm/oops/objArrayKlass.cpp --- a/src/share/vm/oops/objArrayKlass.cpp Tue Dec 10 16:18:26 2013 -0500 +++ b/src/share/vm/oops/objArrayKlass.cpp Thu Dec 12 16:13:44 2013 +0100 @@ -587,7 +587,6 @@ assert (obj->is_objArray(), "obj must be obj array"); objArrayOop a = objArrayOop(obj); int size = a->object_size(); - a->update_header(cm); ObjArrayKlass_OOP_ITERATE(a, p, PSParallelCompact::adjust_pointer(p)) return size; } diff -r 9fbabcbb875b -r e3995ab44393 src/share/vm/oops/oop.hpp --- a/src/share/vm/oops/oop.hpp Tue Dec 10 16:18:26 2013 -0500 +++ b/src/share/vm/oops/oop.hpp Thu Dec 12 16:13:44 2013 +0100 @@ -328,11 +328,6 @@ // return the size of this oop. This is used by the MarkSweep collector. int adjust_pointers(); -#if INCLUDE_ALL_GCS - // Parallel old - void update_header(ParCompactionManager* cm); -#endif // INCLUDE_ALL_GCS - // mark-sweep support void follow_body(int begin, int end); diff -r 9fbabcbb875b -r e3995ab44393 src/share/vm/oops/oop.pcgc.inline.hpp --- a/src/share/vm/oops/oop.pcgc.inline.hpp Tue Dec 10 16:18:26 2013 -0500 +++ b/src/share/vm/oops/oop.pcgc.inline.hpp Thu Dec 12 16:13:44 2013 +0100 @@ -80,8 +80,4 @@ return forwardee(); } -inline void oopDesc::update_header(ParCompactionManager* cm) { - PSParallelCompact::adjust_klass(cm, klass()); -} - #endif // SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP