comparison src/share/vm/memory/metaspace.cpp @ 20375:6e0cb14ce59b

8046070: Class Data Sharing clean up and refactoring Summary: Cleaned up CDS to be more configurable, maintainable and extensible Reviewed-by: dholmes, coleenp, acorn, mchung
author iklam
date Thu, 21 Aug 2014 13:57:51 -0700
parents ce8f6bb717c9
children 622c6e0ad4d6
comparison
equal deleted inserted replaced
20374:999824269b71 20375:6e0cb14ce59b
411 411
412 // byte_size is the size of the associated virtualspace. 412 // byte_size is the size of the associated virtualspace.
413 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 413 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
414 assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); 414 assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
415 415
416 #if INCLUDE_CDS
416 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 417 // This allocates memory with mmap. For DumpSharedspaces, try to reserve
417 // configurable address, generally at the top of the Java heap so other 418 // configurable address, generally at the top of the Java heap so other
418 // memory addresses don't conflict. 419 // memory addresses don't conflict.
419 if (DumpSharedSpaces) { 420 if (DumpSharedSpaces) {
420 bool large_pages = false; // No large pages when dumping the CDS archive. 421 bool large_pages = false; // No large pages when dumping the CDS archive.
426 } else { 427 } else {
427 // Get a mmap region anywhere if the SharedBaseAddress fails. 428 // Get a mmap region anywhere if the SharedBaseAddress fails.
428 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 429 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
429 } 430 }
430 MetaspaceShared::set_shared_rs(&_rs); 431 MetaspaceShared::set_shared_rs(&_rs);
431 } else { 432 } else
433 #endif
434 {
432 bool large_pages = should_commit_large_pages_when_reserving(bytes); 435 bool large_pages = should_commit_large_pages_when_reserving(bytes);
433 436
434 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 437 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
435 } 438 }
436 439
2935 // narrow_klass_base is the lower of the metaspace base and the cds base 2938 // narrow_klass_base is the lower of the metaspace base and the cds base
2936 // (if cds is enabled). The narrow_klass_shift depends on the distance 2939 // (if cds is enabled). The narrow_klass_shift depends on the distance
2937 // between the lower base and higher address. 2940 // between the lower base and higher address.
2938 address lower_base; 2941 address lower_base;
2939 address higher_address; 2942 address higher_address;
2943 #if INCLUDE_CDS
2940 if (UseSharedSpaces) { 2944 if (UseSharedSpaces) {
2941 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2945 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2942 (address)(metaspace_base + compressed_class_space_size())); 2946 (address)(metaspace_base + compressed_class_space_size()));
2943 lower_base = MIN2(metaspace_base, cds_base); 2947 lower_base = MIN2(metaspace_base, cds_base);
2944 } else { 2948 } else
2949 #endif
2950 {
2945 higher_address = metaspace_base + compressed_class_space_size(); 2951 higher_address = metaspace_base + compressed_class_space_size();
2946 lower_base = metaspace_base; 2952 lower_base = metaspace_base;
2947 2953
2948 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 2954 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
2949 // If compressed class space fits in lower 32G, we don't need a base. 2955 // If compressed class space fits in lower 32G, we don't need a base.
2960 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); 2966 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2961 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 2967 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2962 } 2968 }
2963 } 2969 }
2964 2970
2971 #if INCLUDE_CDS
2965 // Return TRUE if the specified metaspace_base and cds_base are close enough 2972 // Return TRUE if the specified metaspace_base and cds_base are close enough
2966 // to work with compressed klass pointers. 2973 // to work with compressed klass pointers.
2967 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 2974 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2968 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 2975 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2969 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 2976 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2970 address lower_base = MIN2((address)metaspace_base, cds_base); 2977 address lower_base = MIN2((address)metaspace_base, cds_base);
2971 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2978 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2972 (address)(metaspace_base + compressed_class_space_size())); 2979 (address)(metaspace_base + compressed_class_space_size()));
2973 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 2980 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
2974 } 2981 }
2982 #endif
2975 2983
2976 // Try to allocate the metaspace at the requested addr. 2984 // Try to allocate the metaspace at the requested addr.
2977 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 2985 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2978 assert(using_class_space(), "called improperly"); 2986 assert(using_class_space(), "called improperly");
2979 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 2987 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2989 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 2997 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
2990 _reserve_alignment, 2998 _reserve_alignment,
2991 large_pages, 2999 large_pages,
2992 requested_addr, 0); 3000 requested_addr, 0);
2993 if (!metaspace_rs.is_reserved()) { 3001 if (!metaspace_rs.is_reserved()) {
3002 #if INCLUDE_CDS
2994 if (UseSharedSpaces) { 3003 if (UseSharedSpaces) {
2995 size_t increment = align_size_up(1*G, _reserve_alignment); 3004 size_t increment = align_size_up(1*G, _reserve_alignment);
2996 3005
2997 // Keep trying to allocate the metaspace, increasing the requested_addr 3006 // Keep trying to allocate the metaspace, increasing the requested_addr
2998 // by 1GB each time, until we reach an address that will no longer allow 3007 // by 1GB each time, until we reach an address that will no longer allow
3003 addr = addr + increment; 3012 addr = addr + increment;
3004 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3013 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3005 _reserve_alignment, large_pages, addr, 0); 3014 _reserve_alignment, large_pages, addr, 0);
3006 } 3015 }
3007 } 3016 }
3008 3017 #endif
3009 // If no successful allocation then try to allocate the space anywhere. If 3018 // If no successful allocation then try to allocate the space anywhere. If
3010 // that fails then OOM doom. At this point we cannot try allocating the 3019 // that fails then OOM doom. At this point we cannot try allocating the
3011 // metaspace as if UseCompressedClassPointers is off because too much 3020 // metaspace as if UseCompressedClassPointers is off because too much
3012 // initialization has happened that depends on UseCompressedClassPointers. 3021 // initialization has happened that depends on UseCompressedClassPointers.
3013 // So, UseCompressedClassPointers cannot be turned off at this point. 3022 // So, UseCompressedClassPointers cannot be turned off at this point.
3022 } 3031 }
3023 3032
3024 // If we got here then the metaspace got allocated. 3033 // If we got here then the metaspace got allocated.
3025 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 3034 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3026 3035
3036 #if INCLUDE_CDS
3027 // Verify that we can use shared spaces. Otherwise, turn off CDS. 3037 // Verify that we can use shared spaces. Otherwise, turn off CDS.
3028 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 3038 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3029 FileMapInfo::stop_sharing_and_unmap( 3039 FileMapInfo::stop_sharing_and_unmap(
3030 "Could not allocate metaspace at a compatible address"); 3040 "Could not allocate metaspace at a compatible address");
3031 } 3041 }
3032 3042 #endif
3033 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 3043 set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3034 UseSharedSpaces ? (address)cds_base : 0); 3044 UseSharedSpaces ? (address)cds_base : 0);
3035 3045
3036 initialize_class_space(metaspace_rs); 3046 initialize_class_space(metaspace_rs);
3037 3047
3111 size_t cds_total = 0; 3121 size_t cds_total = 0;
3112 3122
3113 MetaspaceShared::set_max_alignment(max_alignment); 3123 MetaspaceShared::set_max_alignment(max_alignment);
3114 3124
3115 if (DumpSharedSpaces) { 3125 if (DumpSharedSpaces) {
3126 #if INCLUDE_CDS
3116 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 3127 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
3117 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); 3128 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3118 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 3129 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
3119 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 3130 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
3120 3131
3148 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT, 3159 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3149 _space_list->current_virtual_space()->bottom()); 3160 _space_list->current_virtual_space()->bottom());
3150 } 3161 }
3151 3162
3152 Universe::set_narrow_klass_shift(0); 3163 Universe::set_narrow_klass_shift(0);
3153 #endif 3164 #endif // _LP64
3154 3165 #endif // INCLUDE_CDS
3155 } else { 3166 } else {
3167 #if INCLUDE_CDS
3156 // If using shared space, open the file that contains the shared space 3168 // If using shared space, open the file that contains the shared space
3157 // and map in the memory before initializing the rest of metaspace (so 3169 // and map in the memory before initializing the rest of metaspace (so
3158 // the addresses don't conflict) 3170 // the addresses don't conflict)
3159 address cds_address = NULL; 3171 address cds_address = NULL;
3160 if (UseSharedSpaces) { 3172 if (UseSharedSpaces) {
3161 FileMapInfo* mapinfo = new FileMapInfo(); 3173 FileMapInfo* mapinfo = new FileMapInfo();
3162 memset(mapinfo, 0, sizeof(FileMapInfo));
3163 3174
3164 // Open the shared archive file, read and validate the header. If 3175 // Open the shared archive file, read and validate the header. If
3165 // initialization fails, shared spaces [UseSharedSpaces] are 3176 // initialization fails, shared spaces [UseSharedSpaces] are
3166 // disabled and the file is closed. 3177 // disabled and the file is closed.
3167 // Map in spaces now also 3178 // Map in spaces now also
3168 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 3179 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3169 FileMapInfo::set_current_info(mapinfo);
3170 cds_total = FileMapInfo::shared_spaces_size(); 3180 cds_total = FileMapInfo::shared_spaces_size();
3171 cds_address = (address)mapinfo->region_base(0); 3181 cds_address = (address)mapinfo->region_base(0);
3172 } else { 3182 } else {
3173 assert(!mapinfo->is_open() && !UseSharedSpaces, 3183 assert(!mapinfo->is_open() && !UseSharedSpaces,
3174 "archive file not closed or shared spaces not disabled."); 3184 "archive file not closed or shared spaces not disabled.");
3175 } 3185 }
3176 } 3186 }
3177 3187 #endif // INCLUDE_CDS
3178 #ifdef _LP64 3188 #ifdef _LP64
3179 // If UseCompressedClassPointers is set then allocate the metaspace area 3189 // If UseCompressedClassPointers is set then allocate the metaspace area
3180 // above the heap and above the CDS area (if it exists). 3190 // above the heap and above the CDS area (if it exists).
3181 if (using_class_space()) { 3191 if (using_class_space()) {
3182 if (UseSharedSpaces) { 3192 if (UseSharedSpaces) {
3193 #if INCLUDE_CDS
3183 char* cds_end = (char*)(cds_address + cds_total); 3194 char* cds_end = (char*)(cds_address + cds_total);
3184 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); 3195 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3185 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 3196 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3197 #endif
3186 } else { 3198 } else {
3187 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 3199 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3188 allocate_metaspace_compressed_klass_ptrs(base, 0); 3200 allocate_metaspace_compressed_klass_ptrs(base, 0);
3189 } 3201 }
3190 } 3202 }
3191 #endif 3203 #endif // _LP64
3192 3204
3193 // Initialize these before initializing the VirtualSpaceList 3205 // Initialize these before initializing the VirtualSpaceList
3194 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 3206 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3195 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 3207 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3196 // Make the first class chunk bigger than a medium chunk so it's not put 3208 // Make the first class chunk bigger than a medium chunk so it's not put
3364 return capacity_words_slow(mdtype) * BytesPerWord; 3376 return capacity_words_slow(mdtype) * BytesPerWord;
3365 } 3377 }
3366 3378
3367 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 3379 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3368 if (SafepointSynchronize::is_at_safepoint()) { 3380 if (SafepointSynchronize::is_at_safepoint()) {
3381 if (DumpSharedSpaces && PrintSharedSpaces) {
3382 record_deallocation(ptr, vsm()->get_raw_word_size(word_size));
3383 }
3384
3369 assert(Thread::current()->is_VM_thread(), "should be the VM thread"); 3385 assert(Thread::current()->is_VM_thread(), "should be the VM thread");
3370 // Don't take Heap_lock 3386 // Don't take Heap_lock
3371 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 3387 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3372 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 3388 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3373 // Dark matter. Too small for dictionary. 3389 // Dark matter. Too small for dictionary.
3418 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 3434 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3419 MetaWord* result = space->allocate(word_size, NonClassType); 3435 MetaWord* result = space->allocate(word_size, NonClassType);
3420 if (result == NULL) { 3436 if (result == NULL) {
3421 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 3437 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3422 } 3438 }
3423 3439 if (PrintSharedSpaces) {
3424 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); 3440 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3441 }
3425 3442
3426 // Zero initialize. 3443 // Zero initialize.
3427 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); 3444 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3428 3445
3429 return result; 3446 return result;
3518 } 3535 }
3519 3536
3520 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 3537 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3521 assert(DumpSharedSpaces, "sanity"); 3538 assert(DumpSharedSpaces, "sanity");
3522 3539
3523 AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize); 3540 int byte_size = (int)word_size * HeapWordSize;
3541 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3542
3524 if (_alloc_record_head == NULL) { 3543 if (_alloc_record_head == NULL) {
3525 _alloc_record_head = _alloc_record_tail = rec; 3544 _alloc_record_head = _alloc_record_tail = rec;
3526 } else { 3545 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3527 _alloc_record_tail->_next = rec; 3546 _alloc_record_tail->_next = rec;
3528 _alloc_record_tail = rec; 3547 _alloc_record_tail = rec;
3529 } 3548 } else {
3549 // slow linear search, but this doesn't happen that often, and only when dumping
3550 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3551 if (old->_ptr == ptr) {
3552 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3553 int remain_bytes = old->_byte_size - byte_size;
3554 assert(remain_bytes >= 0, "sanity");
3555 old->_type = type;
3556
3557 if (remain_bytes == 0) {
3558 delete(rec);
3559 } else {
3560 address remain_ptr = address(ptr) + byte_size;
3561 rec->_ptr = remain_ptr;
3562 rec->_byte_size = remain_bytes;
3563 rec->_type = MetaspaceObj::DeallocatedType;
3564 rec->_next = old->_next;
3565 old->_byte_size = byte_size;
3566 old->_next = rec;
3567 }
3568 return;
3569 }
3570 }
3571 assert(0, "reallocating a freed pointer that was not recorded");
3572 }
3573 }
3574
3575 void Metaspace::record_deallocation(void* ptr, size_t word_size) {
3576 assert(DumpSharedSpaces, "sanity");
3577
3578 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3579 if (rec->_ptr == ptr) {
3580 assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity");
3581 rec->_type = MetaspaceObj::DeallocatedType;
3582 return;
3583 }
3584 }
3585
3586 assert(0, "deallocating a pointer that was not recorded");
3530 } 3587 }
3531 3588
3532 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { 3589 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3533 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces"); 3590 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3534 3591