comparison src/os/solaris/vm/os_solaris.cpp @ 6197:d2a62e0f25eb

6995781: Native Memory Tracking (Phase 1) 7151532: DCmd for hotspot native memory tracking Summary: Implementation of native memory tracking phase 1, which tracks VM native memory usage, and related DCmd Reviewed-by: acorn, coleenp, fparain
author zgu
date Thu, 28 Jun 2012 17:03:16 -0400
parents 7432b9db36ff
children 65906dc96aa1
comparison
equal deleted inserted replaced
6174:74533f63b116 6197:d2a62e0f25eb
544 uint_t* id_length) { 544 uint_t* id_length) {
545 bool result = false; 545 bool result = false;
546 // Find the number of processors in the processor set. 546 // Find the number of processors in the processor set.
547 if (pset_info(pset, NULL, id_length, NULL) == 0) { 547 if (pset_info(pset, NULL, id_length, NULL) == 0) {
548 // Make up an array to hold their ids. 548 // Make up an array to hold their ids.
549 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length); 549 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
550 // Fill in the array with their processor ids. 550 // Fill in the array with their processor ids.
551 if (pset_info(pset, NULL, id_length, *id_array) == 0) { 551 if (pset_info(pset, NULL, id_length, *id_array) == 0) {
552 result = true; 552 result = true;
553 } 553 }
554 } 554 }
575 uint* id_length) { 575 uint* id_length) {
576 const processorid_t MAX_PROCESSOR_ID = 100000 ; 576 const processorid_t MAX_PROCESSOR_ID = 100000 ;
577 // Find the number of processors online. 577 // Find the number of processors online.
578 *id_length = sysconf(_SC_NPROCESSORS_ONLN); 578 *id_length = sysconf(_SC_NPROCESSORS_ONLN);
579 // Make up an array to hold their ids. 579 // Make up an array to hold their ids.
580 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length); 580 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
581 // Processors need not be numbered consecutively. 581 // Processors need not be numbered consecutively.
582 long found = 0; 582 long found = 0;
583 processorid_t next = 0; 583 processorid_t next = 0;
584 while (found < *id_length && next < MAX_PROCESSOR_ID) { 584 while (found < *id_length && next < MAX_PROCESSOR_ID) {
585 processor_info_t info; 585 processor_info_t info;
627 max_id = MAX2(max_id, id_array[m]); 627 max_id = MAX2(max_id, id_array[m]);
628 } 628 }
629 // The next id, to limit loops. 629 // The next id, to limit loops.
630 const processorid_t limit_id = max_id + 1; 630 const processorid_t limit_id = max_id + 1;
631 // Make up markers for available processors. 631 // Make up markers for available processors.
632 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id); 632 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
633 for (uint c = 0; c < limit_id; c += 1) { 633 for (uint c = 0; c < limit_id; c += 1) {
634 available_id[c] = false; 634 available_id[c] = false;
635 } 635 }
636 for (uint a = 0; a < id_length; a += 1) { 636 for (uint a = 0; a < id_length; a += 1) {
637 available_id[id_array[a]] = true; 637 available_id[id_array[a]] = true;
664 if (board * processors_per_board + 0 >= limit_id) { 664 if (board * processors_per_board + 0 >= limit_id) {
665 board = 0; 665 board = 0;
666 } 666 }
667 } 667 }
668 if (available_id != NULL) { 668 if (available_id != NULL) {
669 FREE_C_HEAP_ARRAY(bool, available_id); 669 FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
670 } 670 }
671 return true; 671 return true;
672 } 672 }
673 673
674 void os::set_native_thread_name(const char *name) { 674 void os::set_native_thread_name(const char *name) {
696 } else { 696 } else {
697 result = false; 697 result = false;
698 } 698 }
699 } 699 }
700 if (id_array != NULL) { 700 if (id_array != NULL) {
701 FREE_C_HEAP_ARRAY(processorid_t, id_array); 701 FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
702 } 702 }
703 return result; 703 return result;
704 } 704 }
705 705
706 bool os::bind_to_processor(uint processor_id) { 706 bool os::bind_to_processor(uint processor_id) {
769 // 769 //
770 // Important note: if the location of libjvm.so changes this 770 // Important note: if the location of libjvm.so changes this
771 // code needs to be changed accordingly. 771 // code needs to be changed accordingly.
772 772
773 // The next few definitions allow the code to be verbatim: 773 // The next few definitions allow the code to be verbatim:
774 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n)) 774 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal)
775 #define free(p) FREE_C_HEAP_ARRAY(char, p) 775 #define free(p) FREE_C_HEAP_ARRAY(char, p, mtInternal)
776 #define getenv(n) ::getenv(n) 776 #define getenv(n) ::getenv(n)
777 777
778 #define EXTENSIONS_DIR "/lib/ext" 778 #define EXTENSIONS_DIR "/lib/ext"
779 #define ENDORSED_DIR "/lib/endorsed" 779 #define ENDORSED_DIR "/lib/endorsed"
780 #define COMMON_DIR "/usr/jdk/packages" 780 #define COMMON_DIR "/usr/jdk/packages"
1925 } 1925 }
1926 } 1926 }
1927 // release the storage 1927 // release the storage
1928 for (int i = 0 ; i < n ; i++) { 1928 for (int i = 0 ; i < n ; i++) {
1929 if (pelements[i] != NULL) { 1929 if (pelements[i] != NULL) {
1930 FREE_C_HEAP_ARRAY(char, pelements[i]); 1930 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1931 } 1931 }
1932 } 1932 }
1933 if (pelements != NULL) { 1933 if (pelements != NULL) {
1934 FREE_C_HEAP_ARRAY(char*, pelements); 1934 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1935 } 1935 }
1936 } else { 1936 } else {
1937 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); 1937 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1938 } 1938 }
1939 } 1939 }
2660 2660
2661 Maxlibjsigsigs = Maxsignum; 2661 Maxlibjsigsigs = Maxsignum;
2662 2662
2663 // pending_signals has one int per signal 2663 // pending_signals has one int per signal
2664 // The additional signal is for SIGEXIT - exit signal to signal_thread 2664 // The additional signal is for SIGEXIT - exit signal to signal_thread
2665 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1)); 2665 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2666 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1))); 2666 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2667 2667
2668 if (UseSignalChaining) { 2668 if (UseSignalChaining) {
2669 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction) 2669 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2670 * (Maxsignum + 1)); 2670 * (Maxsignum + 1), mtInternal);
2671 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1))); 2671 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2672 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1)); 2672 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2673 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1))); 2673 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2674 } 2674 }
2675 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 )); 2675 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
2676 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1)); 2676 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2677 } 2677 }
2678 2678
2679 void os::signal_init_pd() { 2679 void os::signal_init_pd() {
2680 int ret; 2680 int ret;
2758 int os::vm_allocation_granularity() { 2758 int os::vm_allocation_granularity() {
2759 assert(page_size != -1, "must call os::init"); 2759 assert(page_size != -1, "must call os::init");
2760 return page_size; 2760 return page_size;
2761 } 2761 }
2762 2762
2763 bool os::commit_memory(char* addr, size_t bytes, bool exec) { 2763 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2764 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 2764 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2765 size_t size = bytes; 2765 size_t size = bytes;
2766 char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot); 2766 char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2767 if (res != NULL) { 2767 if (res != NULL) {
2768 if (UseNUMAInterleaving) { 2768 if (UseNUMAInterleaving) {
2771 return true; 2771 return true;
2772 } 2772 }
2773 return false; 2773 return false;
2774 } 2774 }
2775 2775
2776 bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint, 2776 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2777 bool exec) { 2777 bool exec) {
2778 if (commit_memory(addr, bytes, exec)) { 2778 if (commit_memory(addr, bytes, exec)) {
2779 if (UseMPSS && alignment_hint > (size_t)vm_page_size()) { 2779 if (UseMPSS && alignment_hint > (size_t)vm_page_size()) {
2780 // If the large page size has been set and the VM 2780 // If the large page size has been set and the VM
2781 // is using large pages, use the large page size 2781 // is using large pages, use the large page size
2801 } 2801 }
2802 return false; 2802 return false;
2803 } 2803 }
2804 2804
2805 // Uncommit the pages in a specified region. 2805 // Uncommit the pages in a specified region.
2806 void os::free_memory(char* addr, size_t bytes, size_t alignment_hint) { 2806 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2807 if (madvise(addr, bytes, MADV_FREE) < 0) { 2807 if (madvise(addr, bytes, MADV_FREE) < 0) {
2808 debug_only(warning("MADV_FREE failed.")); 2808 debug_only(warning("MADV_FREE failed."));
2809 return; 2809 return;
2810 } 2810 }
2811 } 2811 }
2812 2812
2813 bool os::create_stack_guard_pages(char* addr, size_t size) { 2813 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2814 return os::commit_memory(addr, size); 2814 return os::commit_memory(addr, size);
2815 } 2815 }
2816 2816
2817 bool os::remove_stack_guard_pages(char* addr, size_t size) { 2817 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2818 return os::uncommit_memory(addr, size); 2818 return os::uncommit_memory(addr, size);
2819 } 2819 }
2820 2820
2821 // Change the page size in a given range. 2821 // Change the page size in a given range.
2822 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2822 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2823 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); 2823 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2824 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned."); 2824 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2825 if (UseLargePages && UseMPSS) { 2825 if (UseLargePages && UseMPSS) {
2826 Solaris::set_mpss_range(addr, bytes, alignment_hint); 2826 Solaris::set_mpss_range(addr, bytes, alignment_hint);
2827 } 2827 }
3004 p = addrs[addrs_count - 1] + page_size; 3004 p = addrs[addrs_count - 1] + page_size;
3005 } 3005 }
3006 return end; 3006 return end;
3007 } 3007 }
3008 3008
3009 bool os::uncommit_memory(char* addr, size_t bytes) { 3009 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3010 size_t size = bytes; 3010 size_t size = bytes;
3011 // Map uncommitted pages PROT_NONE so we fail early if we touch an 3011 // Map uncommitted pages PROT_NONE so we fail early if we touch an
3012 // uncommitted page. Otherwise, the read/write might succeed if we 3012 // uncommitted page. Otherwise, the read/write might succeed if we
3013 // have enough swap space to back the physical page. 3013 // have enough swap space to back the physical page.
3014 return 3014 return
3043 // uncommitted page. Otherwise, the read/write might succeed if we 3043 // uncommitted page. Otherwise, the read/write might succeed if we
3044 // have enough swap space to back the physical page. 3044 // have enough swap space to back the physical page.
3045 return mmap_chunk(addr, bytes, flags, PROT_NONE); 3045 return mmap_chunk(addr, bytes, flags, PROT_NONE);
3046 } 3046 }
3047 3047
3048 char* os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { 3048 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
3049 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL)); 3049 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
3050 3050
3051 guarantee(requested_addr == NULL || requested_addr == addr, 3051 guarantee(requested_addr == NULL || requested_addr == addr,
3052 "OS failed to return requested mmap address."); 3052 "OS failed to return requested mmap address.");
3053 return addr; 3053 return addr;
3054 } 3054 }
3055 3055
3056 // Reserve memory at an arbitrary address, only if that area is 3056 // Reserve memory at an arbitrary address, only if that area is
3057 // available (and not reserved for something else). 3057 // available (and not reserved for something else).
3058 3058
3059 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3059 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3060 const int max_tries = 10; 3060 const int max_tries = 10;
3061 char* base[max_tries]; 3061 char* base[max_tries];
3062 size_t size[max_tries]; 3062 size_t size[max_tries];
3063 3063
3064 // Solaris adds a gap between mmap'ed regions. The size of the gap 3064 // Solaris adds a gap between mmap'ed regions. The size of the gap
3176 } 3176 }
3177 3177
3178 return (i < max_tries) ? requested_addr : NULL; 3178 return (i < max_tries) ? requested_addr : NULL;
3179 } 3179 }
3180 3180
3181 bool os::release_memory(char* addr, size_t bytes) { 3181 bool os::pd_release_memory(char* addr, size_t bytes) {
3182 size_t size = bytes; 3182 size_t size = bytes;
3183 return munmap(addr, size) == 0; 3183 return munmap(addr, size) == 0;
3184 } 3184 }
3185 3185
3186 static bool solaris_mprotect(char* addr, size_t bytes, int prot) { 3186 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
4790 return false; 4790 return false;
4791 } 4791 }
4792 lwpSize = 16*1024; 4792 lwpSize = 16*1024;
4793 for (;;) { 4793 for (;;) {
4794 ::lseek64 (lwpFile, 0, SEEK_SET); 4794 ::lseek64 (lwpFile, 0, SEEK_SET);
4795 lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize); 4795 lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal);
4796 if (::read(lwpFile, lwpArray, lwpSize) < 0) { 4796 if (::read(lwpFile, lwpArray, lwpSize) < 0) {
4797 if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n"); 4797 if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
4798 break; 4798 break;
4799 } 4799 }
4800 if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) { 4800 if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
4808 } 4808 }
4809 if (aslwpcount == 0) isT2 = true; 4809 if (aslwpcount == 0) isT2 = true;
4810 break; 4810 break;
4811 } 4811 }
4812 lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize; 4812 lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
4813 FREE_C_HEAP_ARRAY(char, lwpArray); // retry. 4813 FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal); // retry.
4814 } 4814 }
4815 4815
4816 FREE_C_HEAP_ARRAY(char, lwpArray); 4816 FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);
4817 ::close (lwpFile); 4817 ::close (lwpFile);
4818 if (ThreadPriorityVerbose) { 4818 if (ThreadPriorityVerbose) {
4819 if (isT2) tty->print_cr("We are running with a T2 libthread\n"); 4819 if (isT2) tty->print_cr("We are running with a T2 libthread\n");
4820 else tty->print_cr("We are not running with a T2 libthread\n"); 4820 else tty->print_cr("We are not running with a T2 libthread\n");
4821 } 4821 }
5135 if (UseNUMA) { 5135 if (UseNUMA) {
5136 if (!Solaris::liblgrp_init()) { 5136 if (!Solaris::liblgrp_init()) {
5137 UseNUMA = false; 5137 UseNUMA = false;
5138 } else { 5138 } else {
5139 size_t lgrp_limit = os::numa_get_groups_num(); 5139 size_t lgrp_limit = os::numa_get_groups_num();
5140 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit); 5140 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
5141 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); 5141 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
5142 FREE_C_HEAP_ARRAY(int, lgrp_ids); 5142 FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
5143 if (lgrp_num < 2) { 5143 if (lgrp_num < 2) {
5144 // There's only one locality group, disable NUMA. 5144 // There's only one locality group, disable NUMA.
5145 UseNUMA = false; 5145 UseNUMA = false;
5146 } 5146 }
5147 } 5147 }
5483 *bytes = end - cur; 5483 *bytes = end - cur;
5484 return 1; 5484 return 1;
5485 } 5485 }
5486 5486
5487 // Map a block of memory. 5487 // Map a block of memory.
5488 char* os::map_memory(int fd, const char* file_name, size_t file_offset, 5488 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5489 char *addr, size_t bytes, bool read_only, 5489 char *addr, size_t bytes, bool read_only,
5490 bool allow_exec) { 5490 bool allow_exec) {
5491 int prot; 5491 int prot;
5492 int flags; 5492 int flags;
5493 5493
5515 return mapped_address; 5515 return mapped_address;
5516 } 5516 }
5517 5517
5518 5518
5519 // Remap a block of memory. 5519 // Remap a block of memory.
5520 char* os::remap_memory(int fd, const char* file_name, size_t file_offset, 5520 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5521 char *addr, size_t bytes, bool read_only, 5521 char *addr, size_t bytes, bool read_only,
5522 bool allow_exec) { 5522 bool allow_exec) {
5523 // same as map_memory() on this OS 5523 // same as map_memory() on this OS
5524 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 5524 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5525 allow_exec); 5525 allow_exec);
5526 } 5526 }
5527 5527
5528 5528
5529 // Unmap a block of memory. 5529 // Unmap a block of memory.
5530 bool os::unmap_memory(char* addr, size_t bytes) { 5530 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5531 return munmap(addr, bytes) == 0; 5531 return munmap(addr, bytes) == 0;
5532 } 5532 }
5533 5533
5534 void os::pause() { 5534 void os::pause() {
5535 char filename[MAX_PATH]; 5535 char filename[MAX_PATH];