comparison src/os/linux/vm/os_linux.cpp @ 6197:d2a62e0f25eb

6995781: Native Memory Tracking (Phase 1) 7151532: DCmd for hotspot native memory tracking Summary: Implementation of native memory tracking phase 1, which tracks VM native memory usage, and related DCmd Reviewed-by: acorn, coleenp, fparain
author zgu
date Thu, 28 Jun 2012 17:03:16 -0400
parents 7432b9db36ff
children 65906dc96aa1
comparison
equal deleted inserted replaced
6174:74533f63b116 6197:d2a62e0f25eb
369 // 369 //
370 // Important note: if the location of libjvm.so changes this 370 // Important note: if the location of libjvm.so changes this
371 // code needs to be changed accordingly. 371 // code needs to be changed accordingly.
372 372
373 // The next few definitions allow the code to be verbatim: 373 // The next few definitions allow the code to be verbatim:
374 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n)) 374 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal)
375 #define getenv(n) ::getenv(n) 375 #define getenv(n) ::getenv(n)
376 376
377 /* 377 /*
378 * See ld(1): 378 * See ld(1):
379 * The linker uses the following search paths to locate required 379 * The linker uses the following search paths to locate required
637 # define _CS_GNU_LIBPTHREAD_VERSION 3 637 # define _CS_GNU_LIBPTHREAD_VERSION 3
638 # endif 638 # endif
639 639
640 size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0); 640 size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
641 if (n > 0) { 641 if (n > 0) {
642 char *str = (char *)malloc(n); 642 char *str = (char *)malloc(n, mtInternal);
643 confstr(_CS_GNU_LIBC_VERSION, str, n); 643 confstr(_CS_GNU_LIBC_VERSION, str, n);
644 os::Linux::set_glibc_version(str); 644 os::Linux::set_glibc_version(str);
645 } else { 645 } else {
646 // _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version() 646 // _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version()
647 static char _gnu_libc_version[32]; 647 static char _gnu_libc_version[32];
650 os::Linux::set_glibc_version(_gnu_libc_version); 650 os::Linux::set_glibc_version(_gnu_libc_version);
651 } 651 }
652 652
653 n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0); 653 n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
654 if (n > 0) { 654 if (n > 0) {
655 char *str = (char *)malloc(n); 655 char *str = (char *)malloc(n, mtInternal);
656 confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n); 656 confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
657 // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells 657 // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
658 // us "NPTL-0.29" even we are running with LinuxThreads. Check if this 658 // us "NPTL-0.29" even we are running with LinuxThreads. Check if this
659 // is the case. LinuxThreads has a hard limit on max number of threads. 659 // is the case. LinuxThreads has a hard limit on max number of threads.
660 // So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value. 660 // So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
1683 } 1683 }
1684 } 1684 }
1685 // release the storage 1685 // release the storage
1686 for (int i = 0 ; i < n ; i++) { 1686 for (int i = 0 ; i < n ; i++) {
1687 if (pelements[i] != NULL) { 1687 if (pelements[i] != NULL) {
1688 FREE_C_HEAP_ARRAY(char, pelements[i]); 1688 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1689 } 1689 }
1690 } 1690 }
1691 if (pelements != NULL) { 1691 if (pelements != NULL) {
1692 FREE_C_HEAP_ARRAY(char*, pelements); 1692 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1693 } 1693 }
1694 } else { 1694 } else {
1695 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); 1695 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1696 } 1696 }
1697 } 1697 }
2467 2467
2468 // NOTE: Linux kernel does not really reserve the pages for us. 2468 // NOTE: Linux kernel does not really reserve the pages for us.
2469 // All it does is to check if there are enough free pages 2469 // All it does is to check if there are enough free pages
2470 // left at the time of mmap(). This could be a potential 2470 // left at the time of mmap(). This could be a potential
2471 // problem. 2471 // problem.
2472 bool os::commit_memory(char* addr, size_t size, bool exec) { 2472 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2473 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 2473 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2474 uintptr_t res = (uintptr_t) ::mmap(addr, size, prot, 2474 uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
2475 MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0); 2475 MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
2476 if (res != (uintptr_t) MAP_FAILED) { 2476 if (res != (uintptr_t) MAP_FAILED) {
2477 if (UseNUMAInterleaving) { 2477 if (UseNUMAInterleaving) {
2490 // Define MADV_HUGEPAGE here so we can build HotSpot on old systems. 2490 // Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
2491 #ifndef MADV_HUGEPAGE 2491 #ifndef MADV_HUGEPAGE
2492 #define MADV_HUGEPAGE 14 2492 #define MADV_HUGEPAGE 14
2493 #endif 2493 #endif
2494 2494
2495 bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, 2495 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2496 bool exec) { 2496 bool exec) {
2497 if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { 2497 if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
2498 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 2498 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2499 uintptr_t res = 2499 uintptr_t res =
2500 (uintptr_t) ::mmap(addr, size, prot, 2500 (uintptr_t) ::mmap(addr, size, prot,
2514 return true; 2514 return true;
2515 } 2515 }
2516 return false; 2516 return false;
2517 } 2517 }
2518 2518
2519 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2519 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2520 if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { 2520 if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
2521 // We don't check the return value: madvise(MADV_HUGEPAGE) may not 2521 // We don't check the return value: madvise(MADV_HUGEPAGE) may not
2522 // be supported or the memory may already be backed by huge pages. 2522 // be supported or the memory may already be backed by huge pages.
2523 ::madvise(addr, bytes, MADV_HUGEPAGE); 2523 ::madvise(addr, bytes, MADV_HUGEPAGE);
2524 } 2524 }
2525 } 2525 }
2526 2526
2527 void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) { 2527 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2528 // This method works by doing an mmap over an existing mmaping and effectively discarding 2528 // This method works by doing an mmap over an existing mmaping and effectively discarding
2529 // the existing pages. However it won't work for SHM-based large pages that cannot be 2529 // the existing pages. However it won't work for SHM-based large pages that cannot be
2530 // uncommitted at all. We don't do anything in this case to avoid creating a segment with 2530 // uncommitted at all. We don't do anything in this case to avoid creating a segment with
2531 // small pages on top of the SHM segment. This method always works for small pages, so we 2531 // small pages on top of the SHM segment. This method always works for small pages, so we
2532 // allow that in any case. 2532 // allow that in any case.
2644 2644
2645 2645
2646 if (numa_available() != -1) { 2646 if (numa_available() != -1) {
2647 set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes")); 2647 set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
2648 // Create a cpu -> node mapping 2648 // Create a cpu -> node mapping
2649 _cpu_to_node = new (ResourceObj::C_HEAP) GrowableArray<int>(0, true); 2649 _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
2650 rebuild_cpu_to_node_map(); 2650 rebuild_cpu_to_node_map();
2651 return true; 2651 return true;
2652 } 2652 }
2653 } 2653 }
2654 } 2654 }
2674 2674
2675 cpu_to_node()->clear(); 2675 cpu_to_node()->clear();
2676 cpu_to_node()->at_grow(cpu_num - 1); 2676 cpu_to_node()->at_grow(cpu_num - 1);
2677 size_t node_num = numa_get_groups_num(); 2677 size_t node_num = numa_get_groups_num();
2678 2678
2679 unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size); 2679 unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size, mtInternal);
2680 for (size_t i = 0; i < node_num; i++) { 2680 for (size_t i = 0; i < node_num; i++) {
2681 if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) { 2681 if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
2682 for (size_t j = 0; j < cpu_map_valid_size; j++) { 2682 for (size_t j = 0; j < cpu_map_valid_size; j++) {
2683 if (cpu_map[j] != 0) { 2683 if (cpu_map[j] != 0) {
2684 for (size_t k = 0; k < BitsPerCLong; k++) { 2684 for (size_t k = 0; k < BitsPerCLong; k++) {
2688 } 2688 }
2689 } 2689 }
2690 } 2690 }
2691 } 2691 }
2692 } 2692 }
2693 FREE_C_HEAP_ARRAY(unsigned long, cpu_map); 2693 FREE_C_HEAP_ARRAY(unsigned long, cpu_map, mtInternal);
2694 } 2694 }
2695 2695
2696 int os::Linux::get_node_by_cpu(int cpu_id) { 2696 int os::Linux::get_node_by_cpu(int cpu_id) {
2697 if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) { 2697 if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
2698 return cpu_to_node()->at(cpu_id); 2698 return cpu_to_node()->at(cpu_id);
2707 os::Linux::numa_available_func_t os::Linux::_numa_available; 2707 os::Linux::numa_available_func_t os::Linux::_numa_available;
2708 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory; 2708 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
2709 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory; 2709 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
2710 unsigned long* os::Linux::_numa_all_nodes; 2710 unsigned long* os::Linux::_numa_all_nodes;
2711 2711
2712 bool os::uncommit_memory(char* addr, size_t size) { 2712 bool os::pd_uncommit_memory(char* addr, size_t size) {
2713 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE, 2713 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
2714 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0); 2714 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
2715 return res != (uintptr_t) MAP_FAILED; 2715 return res != (uintptr_t) MAP_FAILED;
2716 } 2716 }
2717 2717
2772 // where we're going to put our guard pages, truncate the mapping at 2772 // where we're going to put our guard pages, truncate the mapping at
2773 // that point by munmap()ping it. This ensures that when we later 2773 // that point by munmap()ping it. This ensures that when we later
2774 // munmap() the guard pages we don't leave a hole in the stack 2774 // munmap() the guard pages we don't leave a hole in the stack
2775 // mapping. This only affects the main/initial thread, but guard 2775 // mapping. This only affects the main/initial thread, but guard
2776 // against future OS changes 2776 // against future OS changes
2777 bool os::create_stack_guard_pages(char* addr, size_t size) { 2777 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2778 uintptr_t stack_extent, stack_base; 2778 uintptr_t stack_extent, stack_base;
2779 bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true); 2779 bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true);
2780 if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) { 2780 if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) {
2781 assert(os::Linux::is_initial_thread(), 2781 assert(os::Linux::is_initial_thread(),
2782 "growable stack in non-initial thread"); 2782 "growable stack in non-initial thread");
2845 // 2845 //
2846 static int anon_munmap(char * addr, size_t size) { 2846 static int anon_munmap(char * addr, size_t size) {
2847 return ::munmap(addr, size) == 0; 2847 return ::munmap(addr, size) == 0;
2848 } 2848 }
2849 2849
2850 char* os::reserve_memory(size_t bytes, char* requested_addr, 2850 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2851 size_t alignment_hint) { 2851 size_t alignment_hint) {
2852 return anon_mmap(requested_addr, bytes, (requested_addr != NULL)); 2852 return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
2853 } 2853 }
2854 2854
2855 bool os::release_memory(char* addr, size_t size) { 2855 bool os::pd_release_memory(char* addr, size_t size) {
2856 return anon_munmap(addr, size); 2856 return anon_munmap(addr, size);
2857 } 2857 }
2858 2858
2859 static address highest_vm_reserved_address() { 2859 static address highest_vm_reserved_address() {
2860 return _highest_vm_reserved_address; 2860 return _highest_vm_reserved_address;
3147 } 3147 }
3148 3148
3149 // Reserve memory at an arbitrary address, only if that area is 3149 // Reserve memory at an arbitrary address, only if that area is
3150 // available (and not reserved for something else). 3150 // available (and not reserved for something else).
3151 3151
3152 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3152 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3153 const int max_tries = 10; 3153 const int max_tries = 10;
3154 char* base[max_tries]; 3154 char* base[max_tries];
3155 size_t size[max_tries]; 3155 size_t size[max_tries];
3156 const size_t gap = 0x000000; 3156 const size_t gap = 0x000000;
3157 3157
4669 // is expected to return 0 on failure and 1 on success to the jdk. 4669 // is expected to return 0 on failure and 1 on success to the jdk.
4670 return (ret < 0) ? 0 : 1; 4670 return (ret < 0) ? 0 : 1;
4671 } 4671 }
4672 4672
4673 // Map a block of memory. 4673 // Map a block of memory.
4674 char* os::map_memory(int fd, const char* file_name, size_t file_offset, 4674 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4675 char *addr, size_t bytes, bool read_only, 4675 char *addr, size_t bytes, bool read_only,
4676 bool allow_exec) { 4676 bool allow_exec) {
4677 int prot; 4677 int prot;
4678 int flags = MAP_PRIVATE; 4678 int flags = MAP_PRIVATE;
4679 4679
4699 return mapped_address; 4699 return mapped_address;
4700 } 4700 }
4701 4701
4702 4702
4703 // Remap a block of memory. 4703 // Remap a block of memory.
4704 char* os::remap_memory(int fd, const char* file_name, size_t file_offset, 4704 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4705 char *addr, size_t bytes, bool read_only, 4705 char *addr, size_t bytes, bool read_only,
4706 bool allow_exec) { 4706 bool allow_exec) {
4707 // same as map_memory() on this OS 4707 // same as map_memory() on this OS
4708 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 4708 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4709 allow_exec); 4709 allow_exec);
4710 } 4710 }
4711 4711
4712 4712
4713 // Unmap a block of memory. 4713 // Unmap a block of memory.
4714 bool os::unmap_memory(char* addr, size_t bytes) { 4714 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4715 return munmap(addr, bytes) == 0; 4715 return munmap(addr, bytes) == 0;
4716 } 4716 }
4717 4717
4718 static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time); 4718 static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time);
4719 4719