Mercurial > hg > graal-compiler
comparison src/os/linux/vm/os_linux.cpp @ 12144:d8e99408faad
8009062: poor performance of JNI AttachCurrentThread after fix for 7017193
Summary: don't re-evaluate stack bounds for main thread before install guard page
Reviewed-by: coleenp, dholmes, dlong
author | dsamersoff |
---|---|
date | Thu, 29 Aug 2013 21:48:23 +0400 |
parents | 4c84d351cca9 |
children | bb57d48691f5 |
comparison
equal
deleted
inserted
replaced
12142:76482cbba706 | 12144:d8e99408faad |
---|---|
2941 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE, | 2941 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE, |
2942 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0); | 2942 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0); |
2943 return res != (uintptr_t) MAP_FAILED; | 2943 return res != (uintptr_t) MAP_FAILED; |
2944 } | 2944 } |
2945 | 2945 |
2946 static | |
2947 address get_stack_commited_bottom(address bottom, size_t size) { | |
2948 address nbot = bottom; | |
2949 address ntop = bottom + size; | |
2950 | |
2951 size_t page_sz = os::vm_page_size(); | |
2952 unsigned pages = size / page_sz; | |
2953 | |
2954 unsigned char vec[1]; | |
2955 unsigned imin = 1, imax = pages + 1, imid; | |
2956 int mincore_return_value; | |
2957 | |
2958 while (imin < imax) { | |
2959 imid = (imax + imin) / 2; | |
2960 nbot = ntop - (imid * page_sz); | |
2961 | |
2962 // Use a trick with mincore to check whether the page is mapped or not. | |
2963 // mincore sets vec to 1 if page resides in memory and to 0 if page | |
2964 // is swapped output but if page we are asking for is unmapped | |
2965 // it returns -1,ENOMEM | |
2966 mincore_return_value = mincore(nbot, page_sz, vec); | |
2967 | |
2968 if (mincore_return_value == -1) { | |
2969 // Page is not mapped go up | |
2970 // to find first mapped page | |
2971 if (errno != EAGAIN) { | |
2972 assert(errno == ENOMEM, "Unexpected mincore errno"); | |
2973 imax = imid; | |
2974 } | |
2975 } else { | |
2976 // Page is mapped go down | |
2977 // to find first not mapped page | |
2978 imin = imid + 1; | |
2979 } | |
2980 } | |
2981 | |
2982 nbot = nbot + page_sz; | |
2983 | |
2984 // Adjust stack bottom one page up if last checked page is not mapped | |
2985 if (mincore_return_value == -1) { | |
2986 nbot = nbot + page_sz; | |
2987 } | |
2988 | |
2989 return nbot; | |
2990 } | |
2991 | |
2992 | |
2946 // Linux uses a growable mapping for the stack, and if the mapping for | 2993 // Linux uses a growable mapping for the stack, and if the mapping for |
2947 // the stack guard pages is not removed when we detach a thread the | 2994 // the stack guard pages is not removed when we detach a thread the |
2948 // stack cannot grow beyond the pages where the stack guard was | 2995 // stack cannot grow beyond the pages where the stack guard was |
2949 // mapped. If at some point later in the process the stack expands to | 2996 // mapped. If at some point later in the process the stack expands to |
2950 // that point, the Linux kernel cannot expand the stack any further | 2997 // that point, the Linux kernel cannot expand the stack any further |
2955 // so if the stack mapping has already grown beyond the guard pages at | 3002 // so if the stack mapping has already grown beyond the guard pages at |
2956 // the time we create them, we have to truncate the stack mapping. | 3003 // the time we create them, we have to truncate the stack mapping. |
2957 // So, we need to know the extent of the stack mapping when | 3004 // So, we need to know the extent of the stack mapping when |
2958 // create_stack_guard_pages() is called. | 3005 // create_stack_guard_pages() is called. |
2959 | 3006 |
2960 // Find the bounds of the stack mapping. Return true for success. | |
2961 // | |
2962 // We only need this for stacks that are growable: at the time of | 3007 // We only need this for stacks that are growable: at the time of |
2963 // writing thread stacks don't use growable mappings (i.e. those | 3008 // writing thread stacks don't use growable mappings (i.e. those |
2964 // creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this | 3009 // creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this |
2965 // only applies to the main thread. | 3010 // only applies to the main thread. |
2966 | 3011 |
2967 static | |
2968 bool get_stack_bounds(uintptr_t *bottom, uintptr_t *top) { | |
2969 | |
2970 char buf[128]; | |
2971 int fd, sz; | |
2972 | |
2973 if ((fd = ::open("/proc/self/maps", O_RDONLY)) < 0) { | |
2974 return false; | |
2975 } | |
2976 | |
2977 const char kw[] = "[stack]"; | |
2978 const int kwlen = sizeof(kw)-1; | |
2979 | |
2980 // Address part of /proc/self/maps couldn't be more than 128 bytes | |
2981 while ((sz = os::get_line_chars(fd, buf, sizeof(buf))) > 0) { | |
2982 if (sz > kwlen && ::memcmp(buf+sz-kwlen, kw, kwlen) == 0) { | |
2983 // Extract addresses | |
2984 if (sscanf(buf, "%" SCNxPTR "-%" SCNxPTR, bottom, top) == 2) { | |
2985 uintptr_t sp = (uintptr_t) __builtin_frame_address(0); | |
2986 if (sp >= *bottom && sp <= *top) { | |
2987 ::close(fd); | |
2988 return true; | |
2989 } | |
2990 } | |
2991 } | |
2992 } | |
2993 | |
2994 ::close(fd); | |
2995 return false; | |
2996 } | |
2997 | |
2998 | |
2999 // If the (growable) stack mapping already extends beyond the point | 3012 // If the (growable) stack mapping already extends beyond the point |
3000 // where we're going to put our guard pages, truncate the mapping at | 3013 // where we're going to put our guard pages, truncate the mapping at |
3001 // that point by munmap()ping it. This ensures that when we later | 3014 // that point by munmap()ping it. This ensures that when we later |
3002 // munmap() the guard pages we don't leave a hole in the stack | 3015 // munmap() the guard pages we don't leave a hole in the stack |
3003 // mapping. This only affects the main/initial thread, but guard | 3016 // mapping. This only affects the main/initial thread |
3004 // against future OS changes | 3017 |
3005 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { | 3018 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { |
3006 uintptr_t stack_extent, stack_base; | 3019 |
3007 bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true); | 3020 if (os::Linux::is_initial_thread()) { |
3008 if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) { | 3021 // As we manually grow stack up to bottom inside create_attached_thread(), |
3009 assert(os::Linux::is_initial_thread(), | 3022 // it's likely that os::Linux::initial_thread_stack_bottom is mapped and |
3010 "growable stack in non-initial thread"); | 3023 // we don't need to do anything special. |
3011 if (stack_extent < (uintptr_t)addr) | 3024 // Check it first, before calling heavy function. |
3012 ::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent); | 3025 uintptr_t stack_extent = (uintptr_t) os::Linux::initial_thread_stack_bottom(); |
3026 unsigned char vec[1]; | |
3027 | |
3028 if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) { | |
3029 // Fallback to slow path on all errors, including EAGAIN | |
3030 stack_extent = (uintptr_t) get_stack_commited_bottom( | |
3031 os::Linux::initial_thread_stack_bottom(), | |
3032 (size_t)addr - stack_extent); | |
3033 } | |
3034 | |
3035 if (stack_extent < (uintptr_t)addr) { | |
3036 ::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent)); | |
3037 } | |
3013 } | 3038 } |
3014 | 3039 |
3015 return os::commit_memory(addr, size, !ExecMem); | 3040 return os::commit_memory(addr, size, !ExecMem); |
3016 } | 3041 } |
3017 | 3042 |
3018 // If this is a growable mapping, remove the guard pages entirely by | 3043 // If this is a growable mapping, remove the guard pages entirely by |
3019 // munmap()ping them. If not, just call uncommit_memory(). This only | 3044 // munmap()ping them. If not, just call uncommit_memory(). This only |
3020 // affects the main/initial thread, but guard against future OS changes | 3045 // affects the main/initial thread, but guard against future OS changes |
3046 // It's safe to always unmap guard pages for initial thread because we | |
3047 // always place it right after end of the mapped region | |
3048 | |
3021 bool os::remove_stack_guard_pages(char* addr, size_t size) { | 3049 bool os::remove_stack_guard_pages(char* addr, size_t size) { |
3022 uintptr_t stack_extent, stack_base; | 3050 uintptr_t stack_extent, stack_base; |
3023 bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true); | 3051 |
3024 if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) { | 3052 if (os::Linux::is_initial_thread()) { |
3025 assert(os::Linux::is_initial_thread(), | |
3026 "growable stack in non-initial thread"); | |
3027 | |
3028 return ::munmap(addr, size) == 0; | 3053 return ::munmap(addr, size) == 0; |
3029 } | 3054 } |
3030 | 3055 |
3031 return os::uncommit_memory(addr, size); | 3056 return os::uncommit_memory(addr, size); |
3032 } | 3057 } |