# HG changeset patch # User coleenp # Date 1400192606 14400 # Node ID 7384f6a12fc15018edd28bd91c16f1978aaef33b # Parent 366c198c896da79045fb04a02b0dcc5f11e46f93 8038212: Method::is_valid_method() check has performance regression impact for stackwalking Summary: Only prune metaspace virtual spaces at safepoint so walking them is safe outside a safepoint. Reviewed-by: mgerdin, mgronlun, hseigel, stefank diff -r 366c198c896d -r 7384f6a12fc1 src/share/vm/classfile/classLoaderData.cpp --- a/src/share/vm/classfile/classLoaderData.cpp Thu May 15 09:25:27 2014 -0400 +++ b/src/share/vm/classfile/classLoaderData.cpp Thu May 15 18:23:26 2014 -0400 @@ -533,6 +533,8 @@ ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL; ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL; +bool ClassLoaderDataGraph::_should_purge = false; + // Add a new class loader data node to the list. Assign the newly created // ClassLoaderData into the java/lang/ClassLoader object as a hidden field ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRAPS) { @@ -655,32 +657,6 @@ return array; } -// For profiling and hsfind() only. Otherwise, this is unsafe (and slow). This -// is done lock free to avoid lock inversion problems. It is safe because -// new ClassLoaderData are added to the end of the CLDG, and only removed at -// safepoint. The _unloading list can be deallocated concurrently with CMS so -// this doesn't look in metaspace for classes that have been unloaded. -bool ClassLoaderDataGraph::contains(const void* x) { - if (DumpSharedSpaces) { - // There are only two metaspaces to worry about. - ClassLoaderData* ncld = ClassLoaderData::the_null_class_loader_data(); - return (ncld->ro_metaspace()->contains(x) || ncld->rw_metaspace()->contains(x)); - } - - if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(x)) { - return true; - } - - for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) { - if (cld->metaspace_or_null() != NULL && cld->metaspace_or_null()->contains(x)) { - return true; - } - } - - // Do not check unloading list because deallocation can be concurrent. - return false; -} - #ifndef PRODUCT bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) { for (ClassLoaderData* data = _head; data != NULL; data = data->next()) { @@ -739,6 +715,7 @@ } void ClassLoaderDataGraph::purge() { + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); ClassLoaderData* list = _unloading; _unloading = NULL; ClassLoaderData* next = list; diff -r 366c198c896d -r 7384f6a12fc1 src/share/vm/classfile/classLoaderData.hpp --- a/src/share/vm/classfile/classLoaderData.hpp Thu May 15 09:25:27 2014 -0400 +++ b/src/share/vm/classfile/classLoaderData.hpp Thu May 15 18:23:26 2014 -0400 @@ -66,6 +66,7 @@ static ClassLoaderData* _unloading; // CMS support. static ClassLoaderData* _saved_head; + static bool _should_purge; static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS); static void post_class_unload_events(void); @@ -86,12 +87,20 @@ static void remember_new_clds(bool remember) { _saved_head = (remember ? _head : NULL); } static GrowableArray* new_clds(); + static void set_should_purge(bool b) { _should_purge = b; } + static void purge_if_needed() { + // Only purge the CLDG for CMS if concurrent sweep is complete. + if (_should_purge) { + purge(); + // reset for next time. + set_should_purge(false); + } + } + static void dump_on(outputStream * const out) PRODUCT_RETURN; static void dump() { dump_on(tty); } static void verify(); - // expensive test for pointer in metaspace for debugging - static bool contains(const void* x); #ifndef PRODUCT static bool contains_loader_data(ClassLoaderData* loader_data); #endif diff -r 366c198c896d -r 7384f6a12fc1 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu May 15 09:25:27 2014 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu May 15 18:23:26 2014 -0400 @@ -6363,7 +6363,9 @@ verify_overflow_empty(); if (should_unload_classes()) { - ClassLoaderDataGraph::purge(); + // Delay purge to the beginning of the next safepoint. Metaspace::contains + // requires that the virtual spaces are stable and not deleted. + ClassLoaderDataGraph::set_should_purge(true); } _intra_sweep_timer.stop(); diff -r 366c198c896d -r 7384f6a12fc1 src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu May 15 09:25:27 2014 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu May 15 18:23:26 2014 -0400 @@ -5416,7 +5416,7 @@ if (_g1h->is_in_g1_reserved(p)) { _par_scan_state->push_on_queue(p); } else { - assert(!ClassLoaderDataGraph::contains((address)p), + assert(!Metaspace::contains((const void*)p), err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) " PTR_FORMAT, p)); _copy_non_heap_obj_cl->do_oop(p); diff -r 366c198c896d -r 7384f6a12fc1 src/share/vm/memory/allocation.cpp --- a/src/share/vm/memory/allocation.cpp Thu May 15 09:25:27 2014 -0400 +++ b/src/share/vm/memory/allocation.cpp Thu May 15 18:23:26 2014 -0400 @@ -75,7 +75,7 @@ } bool MetaspaceObj::is_metaspace_object() const { - return ClassLoaderDataGraph::contains((void*)this); + return Metaspace::contains((void*)this); } void MetaspaceObj::print_address_on(outputStream* st) const { diff -r 366c198c896d -r 7384f6a12fc1 src/share/vm/memory/metaspace.cpp --- a/src/share/vm/memory/metaspace.cpp Thu May 15 09:25:27 2014 -0400 +++ b/src/share/vm/memory/metaspace.cpp Thu May 15 18:23:26 2014 -0400 @@ -314,6 +314,8 @@ MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } + bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } + size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } @@ -555,6 +557,8 @@ void inc_virtual_space_count(); void dec_virtual_space_count(); + bool contains(const void* ptr); + // Unlink empty VirtualSpaceNodes and free it. void purge(ChunkManager* chunk_manager); @@ -639,8 +643,6 @@ // Accessors Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } void set_chunks_in_use(ChunkIndex index, Metachunk* v) { - // ensure lock-free iteration sees fully initialized node - OrderAccess::storestore(); _chunks_in_use[index] = v; } @@ -755,8 +757,6 @@ void print_on(outputStream* st) const; void locked_print_chunks_in_use_on(outputStream* st) const; - bool contains(const void *ptr); - void verify(); void verify_chunk_size(Metachunk* chunk); NOT_PRODUCT(void mangle_freed_chunks();) @@ -1076,6 +1076,7 @@ // nodes with a 0 container_count. Remove Metachunks in // the node from their respective freelists. void VirtualSpaceList::purge(ChunkManager* chunk_manager) { + assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); assert_lock_strong(SpaceManager::expand_lock()); // Don't use a VirtualSpaceListIterator because this // list is being changed and a straightforward use of an iterator is not safe. @@ -1109,8 +1110,8 @@ } #ifdef ASSERT if (purged_vsl != NULL) { - // List should be stable enough to use an iterator here. - VirtualSpaceListIterator iter(virtual_space_list()); + // List should be stable enough to use an iterator here. + VirtualSpaceListIterator iter(virtual_space_list()); while (iter.repeat()) { VirtualSpaceNode* vsl = iter.get_next(); assert(vsl != purged_vsl, "Purge of vsl failed"); @@ -1119,6 +1120,23 @@ #endif } + +// This function looks at the mmap regions in the metaspace without locking. +// The chunks are added with store ordering and not deleted except for at +// unloading time during a safepoint. +bool VirtualSpaceList::contains(const void* ptr) { + // List should be stable enough to use an iterator here because removing virtual + // space nodes is only allowed at a safepoint. + VirtualSpaceListIterator iter(virtual_space_list()); + while (iter.repeat()) { + VirtualSpaceNode* vsn = iter.get_next(); + if (vsn->contains(ptr)) { + return true; + } + } + return false; +} + void VirtualSpaceList::retire_current_virtual_space() { assert_lock_strong(SpaceManager::expand_lock()); @@ -1208,6 +1226,8 @@ } else { assert(new_entry->reserved_words() == vs_word_size, "Reserved memory size differs from requested memory size"); + // ensure lock-free iteration sees fully initialized node + OrderAccess::storestore(); link_vs(new_entry); return true; } @@ -2431,21 +2451,6 @@ return result; } -// This function looks at the chunks in the metaspace without locking. -// The chunks are added with store ordering and not deleted except for at -// unloading time. -bool SpaceManager::contains(const void *ptr) { - for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) - { - Metachunk* curr = chunks_in_use(i); - while (curr != NULL) { - if (curr->contains(ptr)) return true; - curr = curr->next(); - } - } - return false; -} - void SpaceManager::verify() { // If there are blocks in the dictionary, then // verfication of chunks does not work since @@ -3550,11 +3555,15 @@ } bool Metaspace::contains(const void* ptr) { - if (vsm()->contains(ptr)) return true; - if (using_class_space()) { - return class_vsm()->contains(ptr); + if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) { + return true; } - return false; + + if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { + return true; + } + + return get_space_list(NonClassType)->contains(ptr); } void Metaspace::verify() { @@ -3799,5 +3808,4 @@ TestVirtualSpaceNodeTest::test(); TestVirtualSpaceNodeTest::test_is_available(); } - #endif diff -r 366c198c896d -r 7384f6a12fc1 src/share/vm/memory/metaspace.hpp --- a/src/share/vm/memory/metaspace.hpp Thu May 15 09:25:27 2014 -0400 +++ b/src/share/vm/memory/metaspace.hpp Thu May 15 18:23:26 2014 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -232,7 +232,8 @@ MetaWord* expand_and_allocate(size_t size, MetadataType mdtype); - bool contains(const void* ptr); + static bool contains(const void* ptr); + void dump(outputStream* const out) const; // Free empty virtualspaces diff -r 366c198c896d -r 7384f6a12fc1 src/share/vm/oops/klass.cpp --- a/src/share/vm/oops/klass.cpp Thu May 15 09:25:27 2014 -0400 +++ b/src/share/vm/oops/klass.cpp Thu May 15 18:23:26 2014 -0400 @@ -648,7 +648,7 @@ // This can be expensive, but it is worth checking that this klass is actually // in the CLD graph but not in production. - assert(ClassLoaderDataGraph::contains((address)this), "Should be"); + assert(Metaspace::contains((address)this), "Should be"); guarantee(this->is_klass(),"should be klass"); diff -r 366c198c896d -r 7384f6a12fc1 src/share/vm/oops/method.cpp --- a/src/share/vm/oops/method.cpp Thu May 15 09:25:27 2014 -0400 +++ b/src/share/vm/oops/method.cpp Thu May 15 18:23:26 2014 -0400 @@ -1873,6 +1873,14 @@ loader_data->jmethod_ids()->clear_all_methods(); } +bool Method::has_method_vptr(const void* ptr) { + Method m; + // This assumes that the vtbl pointer is the first word of a C++ object. + // This assumption is also in universe.cpp patch_klass_vtble + void* vtbl2 = dereference_vptr((const void*)&m); + void* this_vtbl = dereference_vptr(ptr); + return vtbl2 == this_vtbl; +} // Check that this pointer is valid by checking that the vtbl pointer matches bool Method::is_valid_method() const { @@ -1881,12 +1889,7 @@ } else if (!is_metaspace_object()) { return false; } else { - Method m; - // This assumes that the vtbl pointer is the first word of a C++ object. - // This assumption is also in universe.cpp patch_klass_vtble - void* vtbl2 = dereference_vptr((void*)&m); - void* this_vtbl = dereference_vptr((void*)this); - return vtbl2 == this_vtbl; + return has_method_vptr((const void*)this); } } diff -r 366c198c896d -r 7384f6a12fc1 src/share/vm/oops/method.hpp --- a/src/share/vm/oops/method.hpp Thu May 15 09:25:27 2014 -0400 +++ b/src/share/vm/oops/method.hpp Thu May 15 18:23:26 2014 -0400 @@ -868,6 +868,7 @@ const char* internal_name() const { return "{method}"; } // Check for valid method pointer + static bool has_method_vptr(const void* ptr); bool is_valid_method() const; // Verify diff -r 366c198c896d -r 7384f6a12fc1 src/share/vm/runtime/os.cpp --- a/src/share/vm/runtime/os.cpp Thu May 15 09:25:27 2014 -0400 +++ b/src/share/vm/runtime/os.cpp Thu May 15 18:23:26 2014 -0400 @@ -1095,11 +1095,15 @@ } - // Check if in metaspace. - if (ClassLoaderDataGraph::contains((address)addr)) { - // Use addr->print() from the debugger instead (not here) - st->print_cr(INTPTR_FORMAT - " is pointing into metadata", addr); + // Check if in metaspace and print types that have vptrs (only method now) + if (Metaspace::contains(addr)) { + if (Method::has_method_vptr((const void*)addr)) { + ((Method*)addr)->print_value_on(st); + st->cr(); + } else { + // Use addr->print() from the debugger instead (not here) + st->print_cr(INTPTR_FORMAT " is pointing into metadata", addr); + } return; } diff -r 366c198c896d -r 7384f6a12fc1 src/share/vm/runtime/safepoint.cpp --- a/src/share/vm/runtime/safepoint.cpp Thu May 15 09:25:27 2014 -0400 +++ b/src/share/vm/runtime/safepoint.cpp Thu May 15 18:23:26 2014 -0400 @@ -538,6 +538,13 @@ gclog_or_tty->rotate_log(false); } + { + // CMS delays purging the CLDG until the beginning of the next safepoint and to + // make sure concurrent sweep is done + TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime); + ClassLoaderDataGraph::purge_if_needed(); + } + if (MemTracker::is_on()) { MemTracker::sync(); } diff -r 366c198c896d -r 7384f6a12fc1 src/share/vm/utilities/globalDefinitions.hpp --- a/src/share/vm/utilities/globalDefinitions.hpp Thu May 15 09:25:27 2014 -0400 +++ b/src/share/vm/utilities/globalDefinitions.hpp Thu May 15 18:23:26 2014 -0400 @@ -1349,7 +1349,7 @@ // All C++ compilers that we know of have the vtbl pointer in the first // word. If there are exceptions, this function needs to be made compiler // specific. -static inline void* dereference_vptr(void* addr) { +static inline void* dereference_vptr(const void* addr) { return *(void**)addr; }