# HG changeset patch # User jcoomes # Date 1231971175 28800 # Node ID 65de26b5ea82694bd085afab1e5a377a377a7d54 # Parent fc7ab6287598874e544d40e03c3acb3869f796f8# Parent 0af8b0718fc9e7198d14baabfa54f91bcd1437e2 Merge diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/classfile/classFileParser.cpp --- a/src/share/vm/classfile/classFileParser.cpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/classfile/classFileParser.cpp Wed Jan 14 14:12:55 2009 -0800 @@ -232,7 +232,9 @@ length >= 1, "Illegal constant pool size %u in class file %s", length, CHECK_(nullHandle)); constantPoolOop constant_pool = - oopFactory::new_constantPool(length, CHECK_(nullHandle)); + oopFactory::new_constantPool(length, + methodOopDesc::IsSafeConc, + CHECK_(nullHandle)); constantPoolHandle cp (THREAD, constant_pool); cp->set_partially_loaded(); // Enables heap verify to work on partial constantPoolOops @@ -1675,7 +1677,8 @@ // All sizing information for a methodOop is finally available, now create it methodOop m_oop = oopFactory::new_method( code_length, access_flags, linenumber_table_length, - total_lvt_length, checked_exceptions_length, CHECK_(nullHandle)); + total_lvt_length, checked_exceptions_length, + methodOopDesc::IsSafeConc, CHECK_(nullHandle)); methodHandle m (THREAD, m_oop); ClassLoadingService::add_class_method_size(m_oop->size()*HeapWordSize); diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Wed Jan 14 14:12:55 2009 -0800 @@ -706,6 +706,30 @@ } } +// Apply the given closure to each live object in the space +// The usage of CompactibleFreeListSpace +// by the ConcurrentMarkSweepGeneration for concurrent GC's allows +// objects in the space with references to objects that are no longer +// valid. For example, an object may reference another object +// that has already been sweep up (collected). This method uses +// obj_is_alive() to determine whether it is safe to apply the closure to +// an object. See obj_is_alive() for details on how liveness of an +// object is decided. + +void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) { + assert_lock_strong(freelistLock()); + NOT_PRODUCT(verify_objects_initialized()); + HeapWord *cur, *limit; + size_t curSize; + for (cur = bottom(), limit = end(); cur < limit; + cur += curSize) { + curSize = block_size(cur); + if (block_is_obj(cur) && obj_is_alive(cur)) { + blk->do_object(oop(cur)); + } + } +} + void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { assert_locked(); @@ -861,7 +885,9 @@ } else { // must read from what 'p' points to in each loop. klassOop k = ((volatile oopDesc*)p)->klass_or_null(); - if (k != NULL && ((oopDesc*)p)->is_parsable()) { + if (k != NULL && + ((oopDesc*)p)->is_parsable() && + ((oopDesc*)p)->is_conc_safe()) { assert(k->is_oop(), "Should really be klass oop."); oop o = (oop)p; assert(o->is_oop(), "Should be an oop"); diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -481,6 +481,15 @@ void oop_iterate(OopClosure* cl); void object_iterate(ObjectClosure* blk); + // Apply the closure to each object in the space whose references + // point to objects in the heap. The usage of CompactibleFreeListSpace + // by the ConcurrentMarkSweepGeneration for concurrent GC's allows + // objects in the space with references to objects that are no longer + // valid. For example, an object may reference another object + // that has already been sweep up (collected). This method uses + // obj_is_alive() to determine whether it is safe to iterate of + // an object. + void safe_object_iterate(ObjectClosure* blk); void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); // Requires that "mr" be entirely within the space. diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Jan 14 14:12:55 2009 -0800 @@ -3018,6 +3018,16 @@ } void +ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) { + if (freelistLock()->owned_by_self()) { + Generation::safe_object_iterate(cl); + } else { + MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); + Generation::safe_object_iterate(cl); + } +} + +void ConcurrentMarkSweepGeneration::pre_adjust_pointers() { } @@ -6623,7 +6633,11 @@ if (_bitMap->isMarked(addr)) { // it's marked; is it potentially uninitialized? if (p->klass_or_null() != NULL) { - if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) { + // If is_conc_safe is false, the object may be undergoing + // change by the VM outside a safepoint. Don't try to + // scan it, but rather leave it for the remark phase. + if (CMSPermGenPrecleaningEnabled && + (!p->is_conc_safe() || !p->is_parsable())) { // Signal precleaning to redirty the card since // the klass pointer is already installed. assert(size == 0, "Initial value"); @@ -7001,7 +7015,6 @@ _mut->clear_range(mr); } DEBUG_ONLY(}) - // Note: the finger doesn't advance while we drain // the stack below. PushOrMarkClosure pushOrMarkClosure(_collector, @@ -8062,9 +8075,13 @@ #ifdef DEBUG if (oop(addr)->klass_or_null() != NULL && ( !_collector->should_unload_classes() - || oop(addr)->is_parsable())) { + || (oop(addr)->is_parsable()) && + oop(addr)->is_conc_safe())) { // Ignore mark word because we are running concurrent with mutators assert(oop(addr)->is_oop(true), "live block should be an oop"); + // is_conc_safe is checked before performing this assertion + // because an object that is not is_conc_safe may yet have + // the return from size() correct. assert(size == CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()), "P-mark and computed size do not agree"); @@ -8077,6 +8094,13 @@ (!_collector->should_unload_classes() || oop(addr)->is_parsable()), "Should be an initialized object"); + // Note that there are objects used during class redefinition + // (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite() + // which are discarded with their is_conc_safe state still + // false. These object may be floating garbage so may be + // seen here. If they are floating garbage their size + // should be attainable from their klass. Do not that + // is_conc_safe() is true for oop(addr). // Ignore mark word because we are running concurrent with mutators assert(oop(addr)->is_oop(true), "live block should be an oop"); // Verify that the bit map has no bits marked between diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -1212,6 +1212,7 @@ // More iteration support virtual void oop_iterate(MemRegion mr, OopClosure* cl); virtual void oop_iterate(OopClosure* cl); + virtual void safe_object_iterate(ObjectClosure* cl); virtual void object_iterate(ObjectClosure* cl); // Need to declare the full complement of closures, whether we'll diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -850,6 +850,7 @@ // Iterate over all objects, calling "cl.do_object" on each. virtual void object_iterate(ObjectClosure* cl); + virtual void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); } // Iterate over all objects allocated since the last collection, calling // "cl.do_object" on each. The heap must have been initialized properly diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -200,6 +200,7 @@ void oop_iterate(OopClosure* cl); void object_iterate(ObjectClosure* cl); + void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); } void permanent_oop_iterate(OopClosure* cl); void permanent_object_iterate(ObjectClosure* cl); diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/gc_interface/collectedHeap.hpp --- a/src/share/vm/gc_interface/collectedHeap.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -466,6 +466,10 @@ // This includes objects in permanent memory. virtual void object_iterate(ObjectClosure* cl) = 0; + // Similar to object_iterate() except iterates only + // over live objects. + virtual void safe_object_iterate(ObjectClosure* cl) = 0; + // Behaves the same as oop_iterate, except only traverses // interior pointers contained in permanent memory. If there // is no permanent memory, does nothing. diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/memory/genCollectedHeap.cpp --- a/src/share/vm/memory/genCollectedHeap.cpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/memory/genCollectedHeap.cpp Wed Jan 14 14:12:55 2009 -0800 @@ -910,6 +910,13 @@ perm_gen()->object_iterate(cl); } +void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { + for (int i = 0; i < _n_gens; i++) { + _gens[i]->safe_object_iterate(cl); + } + perm_gen()->safe_object_iterate(cl); +} + void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { for (int i = 0; i < _n_gens; i++) { _gens[i]->object_iterate_since_last_GC(cl); diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/memory/genCollectedHeap.hpp --- a/src/share/vm/memory/genCollectedHeap.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/memory/genCollectedHeap.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -215,6 +215,7 @@ void oop_iterate(OopClosure* cl); void oop_iterate(MemRegion mr, OopClosure* cl); void object_iterate(ObjectClosure* cl); + void safe_object_iterate(ObjectClosure* cl); void object_iterate_since_last_GC(ObjectClosure* cl); Space* space_containing(const void* addr) const; diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/memory/generation.cpp --- a/src/share/vm/memory/generation.cpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/memory/generation.cpp Wed Jan 14 14:12:55 2009 -0800 @@ -319,6 +319,21 @@ space_iterate(&blk); } +class GenerationSafeObjIterateClosure : public SpaceClosure { + private: + ObjectClosure* _cl; + public: + virtual void do_space(Space* s) { + s->safe_object_iterate(_cl); + } + GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {} +}; + +void Generation::safe_object_iterate(ObjectClosure* cl) { + GenerationSafeObjIterateClosure blk(cl); + space_iterate(&blk); +} + void Generation::prepare_for_compaction(CompactPoint* cp) { // Generic implementation, can be specialized CompactibleSpace* space = first_compaction_space(); diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/memory/generation.hpp --- a/src/share/vm/memory/generation.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/memory/generation.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -518,6 +518,11 @@ // each. virtual void object_iterate(ObjectClosure* cl); + // Iterate over all safe objects in the generation, calling "cl.do_object" on + // each. An object is safe if its references point to other objects in + // the heap. This defaults to object_iterate() unless overridden. + virtual void safe_object_iterate(ObjectClosure* cl); + // Iterate over all objects allocated in the generation since the last // collection, calling "cl.do_object" on each. The generation must have // been initialized properly to support this function, or else this call diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/memory/heapInspection.cpp --- a/src/share/vm/memory/heapInspection.cpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/memory/heapInspection.cpp Wed Jan 14 14:12:55 2009 -0800 @@ -263,6 +263,9 @@ if (!cit.allocation_failed()) { // Iterate over objects in the heap RecordInstanceClosure ric(&cit); + // If this operation encounters a bad object when using CMS, + // consider using safe_object_iterate() which avoids perm gen + // objects that may contain bad references. Universe::heap()->object_iterate(&ric); // Report if certain classes are not counted because of @@ -317,5 +320,8 @@ // Iterate over objects in the heap FindInstanceClosure fic(k, result); + // If this operation encounters a bad object when using CMS, + // consider using safe_object_iterate() which avoids perm gen + // objects that may contain bad references. Universe::heap()->object_iterate(&fic); } diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/memory/oopFactory.cpp --- a/src/share/vm/memory/oopFactory.cpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/memory/oopFactory.cpp Wed Jan 14 14:12:55 2009 -0800 @@ -82,9 +82,11 @@ } -constantPoolOop oopFactory::new_constantPool(int length, TRAPS) { +constantPoolOop oopFactory::new_constantPool(int length, + bool is_conc_safe, + TRAPS) { constantPoolKlass* ck = constantPoolKlass::cast(Universe::constantPoolKlassObj()); - return ck->allocate(length, CHECK_NULL); + return ck->allocate(length, is_conc_safe, CHECK_NULL); } @@ -105,11 +107,13 @@ int compressed_line_number_size, int localvariable_table_length, int checked_exceptions_length, + bool is_conc_safe, TRAPS) { klassOop cmkObj = Universe::constMethodKlassObj(); constMethodKlass* cmk = constMethodKlass::cast(cmkObj); return cmk->allocate(byte_code_size, compressed_line_number_size, localvariable_table_length, checked_exceptions_length, + is_conc_safe, CHECK_NULL); } @@ -117,14 +121,17 @@ methodOop oopFactory::new_method(int byte_code_size, AccessFlags access_flags, int compressed_line_number_size, int localvariable_table_length, - int checked_exceptions_length, TRAPS) { + int checked_exceptions_length, + bool is_conc_safe, + TRAPS) { methodKlass* mk = methodKlass::cast(Universe::methodKlassObj()); assert(!access_flags.is_native() || byte_code_size == 0, "native methods should not contain byte codes"); constMethodOop cm = new_constMethod(byte_code_size, compressed_line_number_size, localvariable_table_length, - checked_exceptions_length, CHECK_NULL); + checked_exceptions_length, + is_conc_safe, CHECK_NULL); constMethodHandle rw(THREAD, cm); return mk->allocate(rw, access_flags, CHECK_NULL); } diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/memory/oopFactory.hpp --- a/src/share/vm/memory/oopFactory.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/memory/oopFactory.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -81,7 +81,9 @@ static symbolHandle new_symbol_handle(const char* name, TRAPS) { return new_symbol_handle(name, (int)strlen(name), CHECK_(symbolHandle())); } // Constant pools - static constantPoolOop new_constantPool (int length, TRAPS); + static constantPoolOop new_constantPool (int length, + bool is_conc_safe, + TRAPS); static constantPoolCacheOop new_constantPoolCache(int length, TRAPS); // Instance classes @@ -93,9 +95,20 @@ static constMethodOop new_constMethod(int byte_code_size, int compressed_line_number_size, int localvariable_table_length, - int checked_exceptions_length, TRAPS); + int checked_exceptions_length, + bool is_conc_safe, + TRAPS); public: - static methodOop new_method(int byte_code_size, AccessFlags access_flags, int compressed_line_number_size, int localvariable_table_length, int checked_exceptions_length, TRAPS); + // Set is_conc_safe for methods which cannot safely be + // processed by concurrent GC even after the return of + // the method. + static methodOop new_method(int byte_code_size, + AccessFlags access_flags, + int compressed_line_number_size, + int localvariable_table_length, + int checked_exceptions_length, + bool is_conc_safe, + TRAPS); // Method Data containers static methodDataOop new_methodData(methodHandle method, TRAPS); diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/memory/space.cpp --- a/src/share/vm/memory/space.cpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/memory/space.cpp Wed Jan 14 14:12:55 2009 -0800 @@ -569,7 +569,15 @@ if (prev > mr.start()) { region_start_addr = prev; blk_start_addr = prev; - assert(blk_start_addr == block_start(region_start_addr), "invariant"); + // The previous invocation may have pushed "prev" beyond the + // last allocated block yet there may be still be blocks + // in this region due to a particular coalescing policy. + // Relax the assertion so that the case where the unallocated + // block is maintained and "prev" is beyond the unallocated + // block does not cause the assertion to fire. + assert((BlockOffsetArrayUseUnallocatedBlock && + (!is_in(prev))) || + (blk_start_addr == block_start(region_start_addr)), "invariant"); } else { region_start_addr = mr.start(); blk_start_addr = block_start(region_start_addr); @@ -705,6 +713,12 @@ object_iterate_from(bm, blk); } +// For a continguous space object_iterate() and safe_object_iterate() +// are the same. +void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { + object_iterate(blk); +} + void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) { assert(mark.space() == this, "Mark does not match space"); HeapWord* p = mark.point(); diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/memory/space.hpp --- a/src/share/vm/memory/space.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/memory/space.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -193,6 +193,9 @@ // each. Objects allocated by applications of the closure are not // included in the iteration. virtual void object_iterate(ObjectClosure* blk) = 0; + // Similar to object_iterate() except only iterates over + // objects whose internal references point to objects in the space. + virtual void safe_object_iterate(ObjectClosure* blk) = 0; // Iterate over all objects that intersect with mr, calling "cl->do_object" // on each. There is an exception to this: if this closure has already @@ -843,6 +846,9 @@ void oop_iterate(OopClosure* cl); void oop_iterate(MemRegion mr, OopClosure* cl); void object_iterate(ObjectClosure* blk); + // For contiguous spaces this method will iterate safely over objects + // in the space (i.e., between bottom and top) when at a safepoint. + void safe_object_iterate(ObjectClosure* blk); void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); // iterates on objects up to the safe limit HeapWord* object_iterate_careful(ObjectClosureCareful* cl); diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/oops/constMethodKlass.cpp --- a/src/share/vm/oops/constMethodKlass.cpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/oops/constMethodKlass.cpp Wed Jan 14 14:12:55 2009 -0800 @@ -49,10 +49,16 @@ return constMethodOop(obj)->object_is_parsable(); } +bool constMethodKlass::oop_is_conc_safe(oop obj) const { + assert(obj->is_constMethod(), "must be constMethod oop"); + return constMethodOop(obj)->is_conc_safe(); +} + constMethodOop constMethodKlass::allocate(int byte_code_size, int compressed_line_number_size, int localvariable_table_length, int checked_exceptions_length, + bool is_conc_safe, TRAPS) { int size = constMethodOopDesc::object_size(byte_code_size, @@ -75,6 +81,7 @@ compressed_line_number_size, localvariable_table_length); assert(cm->size() == size, "wrong size for object"); + cm->set_is_conc_safe(is_conc_safe); cm->set_partially_loaded(); assert(cm->is_parsable(), "Is safely parsable by gc"); return cm; diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/oops/constMethodKlass.hpp --- a/src/share/vm/oops/constMethodKlass.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/oops/constMethodKlass.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -32,12 +32,16 @@ // Testing bool oop_is_constMethod() const { return true; } virtual bool oop_is_parsable(oop obj) const; + virtual bool oop_is_conc_safe(oop obj) const; + // Allocation DEFINE_ALLOCATE_PERMANENT(constMethodKlass); constMethodOop allocate(int byte_code_size, int compressed_line_number_size, int localvariable_table_length, - int checked_exceptions_length, TRAPS); + int checked_exceptions_length, + bool is_conc_safe, + TRAPS); static klassOop create_klass(TRAPS); // Sizing diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/oops/constMethodOop.hpp --- a/src/share/vm/oops/constMethodOop.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/oops/constMethodOop.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -104,6 +104,7 @@ // loads and stores. This value may updated and read without a lock by // multiple threads, so is volatile. volatile uint64_t _fingerprint; + volatile bool _is_conc_safe; // if true, safe for concurrent GC processing public: oop* oop_block_beg() const { return adr_method(); } @@ -273,6 +274,8 @@ oop* adr_method() const { return (oop*)&_method; } oop* adr_stackmap_data() const { return (oop*)&_stackmap_data; } oop* adr_exception_table() const { return (oop*)&_exception_table; } + bool is_conc_safe() { return _is_conc_safe; } + void set_is_conc_safe(bool v) { _is_conc_safe = v; } // Unique id for the method static const u2 MAX_IDNUM; diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/oops/constantPoolKlass.cpp --- a/src/share/vm/oops/constantPoolKlass.cpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/oops/constantPoolKlass.cpp Wed Jan 14 14:12:55 2009 -0800 @@ -25,7 +25,7 @@ # include "incls/_precompiled.incl" # include "incls/_constantPoolKlass.cpp.incl" -constantPoolOop constantPoolKlass::allocate(int length, TRAPS) { +constantPoolOop constantPoolKlass::allocate(int length, bool is_conc_safe, TRAPS) { int size = constantPoolOopDesc::object_size(length); KlassHandle klass (THREAD, as_klassOop()); constantPoolOop c = @@ -38,6 +38,9 @@ c->set_flags(0); // only set to non-zero if constant pool is merged by RedefineClasses c->set_orig_length(0); + // if constant pool may change during RedefineClasses, it is created + // unsafe for GC concurrent processing. + c->set_is_conc_safe(is_conc_safe); // all fields are initialized; needed for GC // initialize tag array @@ -207,6 +210,11 @@ return size; } +bool constantPoolKlass::oop_is_conc_safe(oop obj) const { + assert(obj->is_constantPool(), "must be constantPool"); + return constantPoolOop(obj)->is_conc_safe(); +} + #ifndef SERIALGC int constantPoolKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { assert (obj->is_constantPool(), "obj must be constant pool"); diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/oops/constantPoolKlass.hpp --- a/src/share/vm/oops/constantPoolKlass.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/oops/constantPoolKlass.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -34,7 +34,7 @@ // Allocation DEFINE_ALLOCATE_PERMANENT(constantPoolKlass); - constantPoolOop allocate(int length, TRAPS); + constantPoolOop allocate(int length, bool is_conc_safe, TRAPS); static klassOop create_klass(TRAPS); // Casting from klassOop @@ -48,6 +48,8 @@ int object_size() const { return align_object_size(header_size()); } // Garbage collection + // Returns true is the object is safe for GC concurrent processing. + virtual bool oop_is_conc_safe(oop obj) const; void oop_follow_contents(oop obj); int oop_adjust_pointers(oop obj); diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/oops/constantPoolOop.hpp --- a/src/share/vm/oops/constantPoolOop.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/oops/constantPoolOop.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -43,6 +43,8 @@ klassOop _pool_holder; // the corresponding class int _flags; // a few header bits to describe contents for GC int _length; // number of elements in the array + volatile bool _is_conc_safe; // if true, safe for concurrent + // GC processing // only set to non-zero if constant pool is merged by RedefineClasses int _orig_length; @@ -379,6 +381,9 @@ static int object_size(int length) { return align_object_size(header_size() + length); } int object_size() { return object_size(length()); } + bool is_conc_safe() { return _is_conc_safe; } + void set_is_conc_safe(bool v) { _is_conc_safe = v; } + friend class constantPoolKlass; friend class ClassFileParser; friend class SystemDictionary; diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/oops/klass.hpp --- a/src/share/vm/oops/klass.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/oops/klass.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -606,8 +606,19 @@ #undef assert_same_query // Unless overridden, oop is parsable if it has a klass pointer. + // Parsability of an object is object specific. virtual bool oop_is_parsable(oop obj) const { return true; } + // Unless overridden, oop is safe for concurrent GC processing + // after its allocation is complete. The exception to + // this is the case where objects are changed after allocation. + // Class redefinition is one of the known exceptions. During + // class redefinition, an allocated class can changed in order + // order to create a merged class (the combiniation of the + // old class definition that has to be perserved and the new class + // definition which is being created. + virtual bool oop_is_conc_safe(oop obj) const { return true; } + // Access flags AccessFlags access_flags() const { return _access_flags; } void set_access_flags(AccessFlags flags) { _access_flags = flags; } diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/oops/methodOop.cpp --- a/src/share/vm/oops/methodOop.cpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/oops/methodOop.cpp Wed Jan 14 14:12:55 2009 -0800 @@ -792,15 +792,34 @@ AccessFlags flags = m->access_flags(); int checked_exceptions_len = m->checked_exceptions_length(); int localvariable_len = m->localvariable_table_length(); - methodOop newm_oop = oopFactory::new_method(new_code_length, flags, new_compressed_linenumber_size, localvariable_len, checked_exceptions_len, CHECK_(methodHandle())); + // Allocate newm_oop with the is_conc_safe parameter set + // to IsUnsafeConc to indicate that newm_oop is not yet + // safe for concurrent processing by a GC. + methodOop newm_oop = oopFactory::new_method(new_code_length, + flags, + new_compressed_linenumber_size, + localvariable_len, + checked_exceptions_len, + IsUnsafeConc, + CHECK_(methodHandle())); methodHandle newm (THREAD, newm_oop); int new_method_size = newm->method_size(); // Create a shallow copy of methodOopDesc part, but be careful to preserve the new constMethodOop constMethodOop newcm = newm->constMethod(); int new_const_method_size = newm->constMethod()->object_size(); + memcpy(newm(), m(), sizeof(methodOopDesc)); // Create shallow copy of constMethodOopDesc, but be careful to preserve the methodOop + // is_conc_safe is set to false because that is the value of + // is_conc_safe initialzied into newcm and the copy should + // not overwrite that value. During the window during which it is + // tagged as unsafe, some extra work could be needed during precleaning + // or concurrent marking but those phases will be correct. Setting and + // resetting is done in preference to a careful copying into newcm to + // avoid having to know the precise layout of a constMethodOop. + m->constMethod()->set_is_conc_safe(false); memcpy(newcm, m->constMethod(), sizeof(constMethodOopDesc)); + m->constMethod()->set_is_conc_safe(true); // Reset correct method/const method, method size, and parameter info newcm->set_method(newm()); newm->set_constMethod(newcm); @@ -831,6 +850,10 @@ m->localvariable_table_start(), localvariable_len * sizeof(LocalVariableTableElement)); } + + // Only set is_conc_safe to true when changes to newcm are + // complete. + newcm->set_is_conc_safe(true); return newm; } diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/oops/methodOop.hpp --- a/src/share/vm/oops/methodOop.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/oops/methodOop.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -129,6 +129,10 @@ volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry public: + + static const bool IsUnsafeConc = false; + static const bool IsSafeConc = true; + // accessors for instance variables constMethodOop constMethod() const { return _constMethod; } void set_constMethod(constMethodOop xconst) { oop_store_without_check((oop*)&_constMethod, (oop)xconst); } diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/oops/oop.hpp --- a/src/share/vm/oops/oop.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/oops/oop.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -108,6 +108,13 @@ // installation of their klass pointer. bool is_parsable(); + // Some perm gen objects that have been allocated and initialized + // can be changed by the VM when not at a safe point (class rededfinition + // is an example). Such objects should not be examined by the + // concurrent processing of a garbage collector if is_conc_safe() + // returns false. + bool is_conc_safe(); + // type test operations (inlined in oop.inline.h) bool is_instance() const; bool is_instanceRef() const; diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/oops/oop.inline.hpp --- a/src/share/vm/oops/oop.inline.hpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/oops/oop.inline.hpp Wed Jan 14 14:12:55 2009 -0800 @@ -435,6 +435,10 @@ return blueprint()->oop_is_parsable(this); } +inline bool oopDesc::is_conc_safe() { + return blueprint()->oop_is_conc_safe(this); +} + inline void update_barrier_set(void* p, oop v) { assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); oopDesc::bs()->write_ref_field(p, v); diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/prims/jvmtiRedefineClasses.cpp --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp Wed Jan 14 14:12:55 2009 -0800 @@ -1230,8 +1230,14 @@ // Constant pools are not easily reused so we allocate a new one // each time. + // merge_cp is created unsafe for concurrent GC processing. It + // should be marked safe before discarding it because, even if + // garbage. If it crosses a card boundary, it may be scanned + // in order to find the start of the first complete object on the card. constantPoolHandle merge_cp(THREAD, - oopFactory::new_constantPool(merge_cp_length, THREAD)); + oopFactory::new_constantPool(merge_cp_length, + methodOopDesc::IsUnsafeConc, + THREAD)); int orig_length = old_cp->orig_length(); if (orig_length == 0) { // This old_cp is an actual original constant pool. We save @@ -1274,6 +1280,7 @@ // rewriting so we can't use the old constant pool with the new // class. + merge_cp()->set_is_conc_safe(true); merge_cp = constantPoolHandle(); // toss the merged constant pool } else if (old_cp->length() < scratch_cp->length()) { // The old constant pool has fewer entries than the new constant @@ -1283,6 +1290,7 @@ // rewriting so we can't use the new constant pool with the old // class. + merge_cp()->set_is_conc_safe(true); merge_cp = constantPoolHandle(); // toss the merged constant pool } else { // The old constant pool has more entries than the new constant @@ -1296,6 +1304,7 @@ set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, THREAD); // drop local ref to the merged constant pool + merge_cp()->set_is_conc_safe(true); merge_cp = constantPoolHandle(); } } else { @@ -1325,7 +1334,10 @@ // GCed. set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, THREAD); + merge_cp()->set_is_conc_safe(true); } + assert(old_cp()->is_conc_safe(), "Just checking"); + assert(scratch_cp()->is_conc_safe(), "Just checking"); return JVMTI_ERROR_NONE; } // end merge_cp_and_rewrite() @@ -2314,13 +2326,16 @@ // worst case merge situation. We want to associate the minimum // sized constant pool with the klass to save space. constantPoolHandle smaller_cp(THREAD, - oopFactory::new_constantPool(scratch_cp_length, THREAD)); + oopFactory::new_constantPool(scratch_cp_length, + methodOopDesc::IsUnsafeConc, + THREAD)); // preserve orig_length() value in the smaller copy int orig_length = scratch_cp->orig_length(); assert(orig_length != 0, "sanity check"); smaller_cp->set_orig_length(orig_length); scratch_cp->copy_cp_to(1, scratch_cp_length - 1, smaller_cp, 1, THREAD); scratch_cp = smaller_cp; + smaller_cp()->set_is_conc_safe(true); } // attach new constant pool to klass @@ -2516,6 +2531,7 @@ rewrite_cp_refs_in_stack_map_table(method, THREAD); } // end for each method + assert(scratch_cp()->is_conc_safe(), "Just checking"); } // end set_new_constant_pool() diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/prims/jvmtiTagMap.cpp --- a/src/share/vm/prims/jvmtiTagMap.cpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/prims/jvmtiTagMap.cpp Wed Jan 14 14:12:55 2009 -0800 @@ -1320,6 +1320,9 @@ } // do the iteration + // If this operation encounters a bad object when using CMS, + // consider using safe_object_iterate() which avoids perm gen + // objects that may contain bad references. Universe::heap()->object_iterate(_blk); // when sharing is enabled we must iterate over the shared spaces diff -r fc7ab6287598 -r 65de26b5ea82 src/share/vm/services/heapDumper.cpp --- a/src/share/vm/services/heapDumper.cpp Fri Jan 09 14:39:07 2009 -0500 +++ b/src/share/vm/services/heapDumper.cpp Wed Jan 14 14:12:55 2009 -0800 @@ -1700,7 +1700,7 @@ // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk // of the heap dump. HeapObjectDumper obj_dumper(this, writer()); - Universe::heap()->object_iterate(&obj_dumper); + Universe::heap()->safe_object_iterate(&obj_dumper); // HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals do_threads();