comparison src/share/vm/memory/metaspace.cpp @ 20804:7848fc12602b

Merge with jdk8u40-b25
author Gilles Duboscq <gilles.m.duboscq@oracle.com>
date Tue, 07 Apr 2015 14:58:49 +0200
parents 52b4284cb496 03e6d34be1f5
children
comparison
equal deleted inserted replaced
20184:84105dcdb05b 20804:7848fc12602b
40 #include "runtime/atomic.inline.hpp" 40 #include "runtime/atomic.inline.hpp"
41 #include "runtime/globals.hpp" 41 #include "runtime/globals.hpp"
42 #include "runtime/init.hpp" 42 #include "runtime/init.hpp"
43 #include "runtime/java.hpp" 43 #include "runtime/java.hpp"
44 #include "runtime/mutex.hpp" 44 #include "runtime/mutex.hpp"
45 #include "runtime/orderAccess.hpp" 45 #include "runtime/orderAccess.inline.hpp"
46 #include "services/memTracker.hpp" 46 #include "services/memTracker.hpp"
47 #include "services/memoryService.hpp" 47 #include "services/memoryService.hpp"
48 #include "utilities/copy.hpp" 48 #include "utilities/copy.hpp"
49 #include "utilities/debug.hpp" 49 #include "utilities/debug.hpp"
50 50
411 411
412 // byte_size is the size of the associated virtualspace. 412 // byte_size is the size of the associated virtualspace.
413 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 413 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
414 assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); 414 assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
415 415
416 #if INCLUDE_CDS
416 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 417 // This allocates memory with mmap. For DumpSharedspaces, try to reserve
417 // configurable address, generally at the top of the Java heap so other 418 // configurable address, generally at the top of the Java heap so other
418 // memory addresses don't conflict. 419 // memory addresses don't conflict.
419 if (DumpSharedSpaces) { 420 if (DumpSharedSpaces) {
420 bool large_pages = false; // No large pages when dumping the CDS archive. 421 bool large_pages = false; // No large pages when dumping the CDS archive.
426 } else { 427 } else {
427 // Get a mmap region anywhere if the SharedBaseAddress fails. 428 // Get a mmap region anywhere if the SharedBaseAddress fails.
428 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 429 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
429 } 430 }
430 MetaspaceShared::set_shared_rs(&_rs); 431 MetaspaceShared::set_shared_rs(&_rs);
431 } else { 432 } else
433 #endif
434 {
432 bool large_pages = should_commit_large_pages_when_reserving(bytes); 435 bool large_pages = should_commit_large_pages_when_reserving(bytes);
433 436
434 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 437 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
435 } 438 }
436 439
1409 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); 1412 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1410 assert(value >= MetaspaceSize, "Not initialied properly?"); 1413 assert(value >= MetaspaceSize, "Not initialied properly?");
1411 return value; 1414 return value;
1412 } 1415 }
1413 1416
1414 size_t MetaspaceGC::inc_capacity_until_GC(size_t v) { 1417 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1415 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1418 assert_is_size_aligned(v, Metaspace::commit_alignment());
1416 1419
1417 return (size_t)Atomic::add_ptr(v, &_capacity_until_GC); 1420 size_t capacity_until_GC = (size_t) _capacity_until_GC;
1421 size_t new_value = capacity_until_GC + v;
1422
1423 if (new_value < capacity_until_GC) {
1424 // The addition wrapped around, set new_value to aligned max value.
1425 new_value = align_size_down(max_uintx, Metaspace::commit_alignment());
1426 }
1427
1428 intptr_t expected = (intptr_t) capacity_until_GC;
1429 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1430
1431 if (expected != actual) {
1432 return false;
1433 }
1434
1435 if (new_cap_until_GC != NULL) {
1436 *new_cap_until_GC = new_value;
1437 }
1438 if (old_cap_until_GC != NULL) {
1439 *old_cap_until_GC = capacity_until_GC;
1440 }
1441 return true;
1418 } 1442 }
1419 1443
1420 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 1444 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1421 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1445 assert_is_size_aligned(v, Metaspace::commit_alignment());
1422 1446
1512 // increment the HWM. 1536 // increment the HWM.
1513 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 1537 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1514 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); 1538 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1515 // Don't expand unless it's significant 1539 // Don't expand unless it's significant
1516 if (expand_bytes >= MinMetaspaceExpansion) { 1540 if (expand_bytes >= MinMetaspaceExpansion) {
1517 size_t new_capacity_until_GC = MetaspaceGC::inc_capacity_until_GC(expand_bytes); 1541 size_t new_capacity_until_GC = 0;
1542 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1543 assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1544
1518 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1545 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1519 new_capacity_until_GC, 1546 new_capacity_until_GC,
1520 MetaspaceGCThresholdUpdater::ComputeNewSize); 1547 MetaspaceGCThresholdUpdater::ComputeNewSize);
1521 if (PrintGCDetails && Verbose) { 1548 if (PrintGCDetails && Verbose) {
1522 gclog_or_tty->print_cr(" expanding:" 1549 gclog_or_tty->print_cr(" expanding:"
2935 // narrow_klass_base is the lower of the metaspace base and the cds base 2962 // narrow_klass_base is the lower of the metaspace base and the cds base
2936 // (if cds is enabled). The narrow_klass_shift depends on the distance 2963 // (if cds is enabled). The narrow_klass_shift depends on the distance
2937 // between the lower base and higher address. 2964 // between the lower base and higher address.
2938 address lower_base; 2965 address lower_base;
2939 address higher_address; 2966 address higher_address;
2967 #if INCLUDE_CDS
2940 if (UseSharedSpaces) { 2968 if (UseSharedSpaces) {
2941 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2969 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2942 (address)(metaspace_base + compressed_class_space_size())); 2970 (address)(metaspace_base + compressed_class_space_size()));
2943 lower_base = MIN2(metaspace_base, cds_base); 2971 lower_base = MIN2(metaspace_base, cds_base);
2944 } else { 2972 } else
2973 #endif
2974 {
2945 higher_address = metaspace_base + compressed_class_space_size(); 2975 higher_address = metaspace_base + compressed_class_space_size();
2946 lower_base = metaspace_base; 2976 lower_base = metaspace_base;
2947 2977
2948 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 2978 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
2949 // If compressed class space fits in lower 32G, we don't need a base. 2979 // If compressed class space fits in lower 32G, we don't need a base.
2960 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); 2990 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2961 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 2991 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2962 } 2992 }
2963 } 2993 }
2964 2994
2995 #if INCLUDE_CDS
2965 // Return TRUE if the specified metaspace_base and cds_base are close enough 2996 // Return TRUE if the specified metaspace_base and cds_base are close enough
2966 // to work with compressed klass pointers. 2997 // to work with compressed klass pointers.
2967 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 2998 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2968 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 2999 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2969 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3000 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2970 address lower_base = MIN2((address)metaspace_base, cds_base); 3001 address lower_base = MIN2((address)metaspace_base, cds_base);
2971 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 3002 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2972 (address)(metaspace_base + compressed_class_space_size())); 3003 (address)(metaspace_base + compressed_class_space_size()));
2973 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 3004 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
2974 } 3005 }
3006 #endif
2975 3007
2976 // Try to allocate the metaspace at the requested addr. 3008 // Try to allocate the metaspace at the requested addr.
2977 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 3009 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2978 assert(using_class_space(), "called improperly"); 3010 assert(using_class_space(), "called improperly");
2979 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3011 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2989 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 3021 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
2990 _reserve_alignment, 3022 _reserve_alignment,
2991 large_pages, 3023 large_pages,
2992 requested_addr, 0); 3024 requested_addr, 0);
2993 if (!metaspace_rs.is_reserved()) { 3025 if (!metaspace_rs.is_reserved()) {
3026 #if INCLUDE_CDS
2994 if (UseSharedSpaces) { 3027 if (UseSharedSpaces) {
2995 size_t increment = align_size_up(1*G, _reserve_alignment); 3028 size_t increment = align_size_up(1*G, _reserve_alignment);
2996 3029
2997 // Keep trying to allocate the metaspace, increasing the requested_addr 3030 // Keep trying to allocate the metaspace, increasing the requested_addr
2998 // by 1GB each time, until we reach an address that will no longer allow 3031 // by 1GB each time, until we reach an address that will no longer allow
3003 addr = addr + increment; 3036 addr = addr + increment;
3004 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3037 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3005 _reserve_alignment, large_pages, addr, 0); 3038 _reserve_alignment, large_pages, addr, 0);
3006 } 3039 }
3007 } 3040 }
3008 3041 #endif
3009 // If no successful allocation then try to allocate the space anywhere. If 3042 // If no successful allocation then try to allocate the space anywhere. If
3010 // that fails then OOM doom. At this point we cannot try allocating the 3043 // that fails then OOM doom. At this point we cannot try allocating the
3011 // metaspace as if UseCompressedClassPointers is off because too much 3044 // metaspace as if UseCompressedClassPointers is off because too much
3012 // initialization has happened that depends on UseCompressedClassPointers. 3045 // initialization has happened that depends on UseCompressedClassPointers.
3013 // So, UseCompressedClassPointers cannot be turned off at this point. 3046 // So, UseCompressedClassPointers cannot be turned off at this point.
3022 } 3055 }
3023 3056
3024 // If we got here then the metaspace got allocated. 3057 // If we got here then the metaspace got allocated.
3025 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 3058 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3026 3059
3060 #if INCLUDE_CDS
3027 // Verify that we can use shared spaces. Otherwise, turn off CDS. 3061 // Verify that we can use shared spaces. Otherwise, turn off CDS.
3028 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 3062 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3029 FileMapInfo::stop_sharing_and_unmap( 3063 FileMapInfo::stop_sharing_and_unmap(
3030 "Could not allocate metaspace at a compatible address"); 3064 "Could not allocate metaspace at a compatible address");
3031 } 3065 }
3032 3066 #endif
3033 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 3067 set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3034 UseSharedSpaces ? (address)cds_base : 0); 3068 UseSharedSpaces ? (address)cds_base : 0);
3035 3069
3036 initialize_class_space(metaspace_rs); 3070 initialize_class_space(metaspace_rs);
3037 3071
3105 3139
3106 void Metaspace::global_initialize() { 3140 void Metaspace::global_initialize() {
3107 MetaspaceGC::initialize(); 3141 MetaspaceGC::initialize();
3108 3142
3109 // Initialize the alignment for shared spaces. 3143 // Initialize the alignment for shared spaces.
3110 int max_alignment = os::vm_page_size(); 3144 int max_alignment = os::vm_allocation_granularity();
3111 size_t cds_total = 0; 3145 size_t cds_total = 0;
3112 3146
3113 MetaspaceShared::set_max_alignment(max_alignment); 3147 MetaspaceShared::set_max_alignment(max_alignment);
3114 3148
3115 if (DumpSharedSpaces) { 3149 if (DumpSharedSpaces) {
3150 #if INCLUDE_CDS
3151 MetaspaceShared::estimate_regions_size();
3152
3116 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 3153 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
3117 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); 3154 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3118 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 3155 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
3119 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 3156 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
3157
3158 // the min_misc_code_size estimate is based on MetaspaceShared::generate_vtable_methods()
3159 uintx min_misc_code_size = align_size_up(
3160 (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) *
3161 (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size,
3162 max_alignment);
3163
3164 if (SharedMiscCodeSize < min_misc_code_size) {
3165 report_out_of_shared_space(SharedMiscCode);
3166 }
3120 3167
3121 // Initialize with the sum of the shared space sizes. The read-only 3168 // Initialize with the sum of the shared space sizes. The read-only
3122 // and read write metaspace chunks will be allocated out of this and the 3169 // and read write metaspace chunks will be allocated out of this and the
3123 // remainder is the misc code and data chunks. 3170 // remainder is the misc code and data chunks.
3124 cds_total = FileMapInfo::shared_spaces_size(); 3171 cds_total = FileMapInfo::shared_spaces_size();
3148 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT, 3195 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3149 _space_list->current_virtual_space()->bottom()); 3196 _space_list->current_virtual_space()->bottom());
3150 } 3197 }
3151 3198
3152 Universe::set_narrow_klass_shift(0); 3199 Universe::set_narrow_klass_shift(0);
3153 #endif 3200 #endif // _LP64
3154 3201 #endif // INCLUDE_CDS
3155 } else { 3202 } else {
3203 #if INCLUDE_CDS
3156 // If using shared space, open the file that contains the shared space 3204 // If using shared space, open the file that contains the shared space
3157 // and map in the memory before initializing the rest of metaspace (so 3205 // and map in the memory before initializing the rest of metaspace (so
3158 // the addresses don't conflict) 3206 // the addresses don't conflict)
3159 address cds_address = NULL; 3207 address cds_address = NULL;
3160 if (UseSharedSpaces) { 3208 if (UseSharedSpaces) {
3161 FileMapInfo* mapinfo = new FileMapInfo(); 3209 FileMapInfo* mapinfo = new FileMapInfo();
3162 memset(mapinfo, 0, sizeof(FileMapInfo));
3163 3210
3164 // Open the shared archive file, read and validate the header. If 3211 // Open the shared archive file, read and validate the header. If
3165 // initialization fails, shared spaces [UseSharedSpaces] are 3212 // initialization fails, shared spaces [UseSharedSpaces] are
3166 // disabled and the file is closed. 3213 // disabled and the file is closed.
3167 // Map in spaces now also 3214 // Map in spaces now also
3168 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 3215 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3169 FileMapInfo::set_current_info(mapinfo);
3170 cds_total = FileMapInfo::shared_spaces_size(); 3216 cds_total = FileMapInfo::shared_spaces_size();
3171 cds_address = (address)mapinfo->region_base(0); 3217 cds_address = (address)mapinfo->region_base(0);
3172 } else { 3218 } else {
3173 assert(!mapinfo->is_open() && !UseSharedSpaces, 3219 assert(!mapinfo->is_open() && !UseSharedSpaces,
3174 "archive file not closed or shared spaces not disabled."); 3220 "archive file not closed or shared spaces not disabled.");
3175 } 3221 }
3176 } 3222 }
3177 3223 #endif // INCLUDE_CDS
3178 #ifdef _LP64 3224 #ifdef _LP64
3179 // If UseCompressedClassPointers is set then allocate the metaspace area 3225 // If UseCompressedClassPointers is set then allocate the metaspace area
3180 // above the heap and above the CDS area (if it exists). 3226 // above the heap and above the CDS area (if it exists).
3181 if (using_class_space()) { 3227 if (using_class_space()) {
3182 if (UseSharedSpaces) { 3228 if (UseSharedSpaces) {
3229 #if INCLUDE_CDS
3183 char* cds_end = (char*)(cds_address + cds_total); 3230 char* cds_end = (char*)(cds_address + cds_total);
3184 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); 3231 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3185 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 3232 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3233 #endif
3186 } else { 3234 } else {
3187 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 3235 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3188 allocate_metaspace_compressed_klass_ptrs(base, 0); 3236 allocate_metaspace_compressed_klass_ptrs(base, 0);
3189 } 3237 }
3190 } 3238 }
3191 #endif 3239 #endif // _LP64
3192 3240
3193 // Initialize these before initializing the VirtualSpaceList 3241 // Initialize these before initializing the VirtualSpaceList
3194 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 3242 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3195 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 3243 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3196 // Make the first class chunk bigger than a medium chunk so it's not put 3244 // Make the first class chunk bigger than a medium chunk so it's not put
3303 3351
3304 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 3352 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3305 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 3353 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3306 assert(delta_bytes > 0, "Must be"); 3354 assert(delta_bytes > 0, "Must be");
3307 3355
3308 size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes); 3356 size_t before = 0;
3309 3357 size_t after = 0;
3310 // capacity_until_GC might be updated concurrently, must calculate previous value. 3358 MetaWord* res;
3311 size_t before_inc = after_inc - delta_bytes; 3359 bool incremented;
3312 3360
3313 tracer()->report_gc_threshold(before_inc, after_inc, 3361 // Each thread increments the HWM at most once. Even if the thread fails to increment
3314 MetaspaceGCThresholdUpdater::ExpandAndAllocate); 3362 // the HWM, an allocation is still attempted. This is because another thread must then
3315 if (PrintGCDetails && Verbose) { 3363 // have incremented the HWM and therefore the allocation might still succeed.
3316 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT 3364 do {
3317 " to " SIZE_FORMAT, before_inc, after_inc); 3365 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3318 } 3366 res = allocate(word_size, mdtype);
3319 3367 } while (!incremented && res == NULL);
3320 return allocate(word_size, mdtype); 3368
3369 if (incremented) {
3370 tracer()->report_gc_threshold(before, after,
3371 MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3372 if (PrintGCDetails && Verbose) {
3373 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3374 " to " SIZE_FORMAT, before, after);
3375 }
3376 }
3377
3378 return res;
3321 } 3379 }
3322 3380
3323 // Space allocated in the Metaspace. This may 3381 // Space allocated in the Metaspace. This may
3324 // be across several metadata virtual spaces. 3382 // be across several metadata virtual spaces.
3325 char* Metaspace::bottom() const { 3383 char* Metaspace::bottom() const {
3364 return capacity_words_slow(mdtype) * BytesPerWord; 3422 return capacity_words_slow(mdtype) * BytesPerWord;
3365 } 3423 }
3366 3424
3367 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 3425 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3368 if (SafepointSynchronize::is_at_safepoint()) { 3426 if (SafepointSynchronize::is_at_safepoint()) {
3427 if (DumpSharedSpaces && PrintSharedSpaces) {
3428 record_deallocation(ptr, vsm()->get_raw_word_size(word_size));
3429 }
3430
3369 assert(Thread::current()->is_VM_thread(), "should be the VM thread"); 3431 assert(Thread::current()->is_VM_thread(), "should be the VM thread");
3370 // Don't take Heap_lock 3432 // Don't take Heap_lock
3371 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 3433 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3372 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 3434 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3373 // Dark matter. Too small for dictionary. 3435 // Dark matter. Too small for dictionary.
3418 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 3480 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3419 MetaWord* result = space->allocate(word_size, NonClassType); 3481 MetaWord* result = space->allocate(word_size, NonClassType);
3420 if (result == NULL) { 3482 if (result == NULL) {
3421 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 3483 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3422 } 3484 }
3423 3485 if (PrintSharedSpaces) {
3424 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); 3486 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3487 }
3425 3488
3426 // Zero initialize. 3489 // Zero initialize.
3427 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); 3490 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3428 3491
3429 return result; 3492 return result;
3518 } 3581 }
3519 3582
3520 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 3583 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3521 assert(DumpSharedSpaces, "sanity"); 3584 assert(DumpSharedSpaces, "sanity");
3522 3585
3523 AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize); 3586 int byte_size = (int)word_size * HeapWordSize;
3587 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3588
3524 if (_alloc_record_head == NULL) { 3589 if (_alloc_record_head == NULL) {
3525 _alloc_record_head = _alloc_record_tail = rec; 3590 _alloc_record_head = _alloc_record_tail = rec;
3526 } else { 3591 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3527 _alloc_record_tail->_next = rec; 3592 _alloc_record_tail->_next = rec;
3528 _alloc_record_tail = rec; 3593 _alloc_record_tail = rec;
3529 } 3594 } else {
3595 // slow linear search, but this doesn't happen that often, and only when dumping
3596 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3597 if (old->_ptr == ptr) {
3598 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3599 int remain_bytes = old->_byte_size - byte_size;
3600 assert(remain_bytes >= 0, "sanity");
3601 old->_type = type;
3602
3603 if (remain_bytes == 0) {
3604 delete(rec);
3605 } else {
3606 address remain_ptr = address(ptr) + byte_size;
3607 rec->_ptr = remain_ptr;
3608 rec->_byte_size = remain_bytes;
3609 rec->_type = MetaspaceObj::DeallocatedType;
3610 rec->_next = old->_next;
3611 old->_byte_size = byte_size;
3612 old->_next = rec;
3613 }
3614 return;
3615 }
3616 }
3617 assert(0, "reallocating a freed pointer that was not recorded");
3618 }
3619 }
3620
3621 void Metaspace::record_deallocation(void* ptr, size_t word_size) {
3622 assert(DumpSharedSpaces, "sanity");
3623
3624 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3625 if (rec->_ptr == ptr) {
3626 assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity");
3627 rec->_type = MetaspaceObj::DeallocatedType;
3628 return;
3629 }
3630 }
3631
3632 assert(0, "deallocating a pointer that was not recorded");
3530 } 3633 }
3531 3634
3532 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { 3635 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3533 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces"); 3636 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3534 3637