comparison src/os/windows/vm/os_windows.cpp @ 11173:6b0fd0964b87

Merge with http://hg.openjdk.java.net/hsx/hsx25/hotspot/
author Doug Simon <doug.simon@oracle.com>
date Wed, 31 Jul 2013 11:00:54 +0200
parents 836a62f43af9 af21010d1062
children 3cce976666d9
comparison
equal deleted inserted replaced
10912:4ea54634f03e 11173:6b0fd0964b87
1418 return 0; 1418 return 0;
1419 } 1419 }
1420 1420
1421 bool os::dll_address_to_library_name(address addr, char* buf, 1421 bool os::dll_address_to_library_name(address addr, char* buf,
1422 int buflen, int* offset) { 1422 int buflen, int* offset) {
1423 // buf is not optional, but offset is optional
1424 assert(buf != NULL, "sanity check");
1425
1423 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1426 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1424 // return the full path to the DLL file, sometimes it returns path 1427 // return the full path to the DLL file, sometimes it returns path
1425 // to the corresponding PDB file (debug info); sometimes it only 1428 // to the corresponding PDB file (debug info); sometimes it only
1426 // returns partial path, which makes life painful. 1429 // returns partial path, which makes life painful.
1427 1430
1428 struct _modinfo mi; 1431 struct _modinfo mi;
1429 mi.addr = addr; 1432 mi.addr = addr;
1430 mi.full_path = buf; 1433 mi.full_path = buf;
1431 mi.buflen = buflen; 1434 mi.buflen = buflen;
1432 int pid = os::current_process_id(); 1435 int pid = os::current_process_id();
1433 if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) { 1436 if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) {
1434 // buf already contains path name 1437 // buf already contains path name
1435 if (offset) *offset = addr - mi.base_addr; 1438 if (offset) *offset = addr - mi.base_addr;
1436 return true; 1439 return true;
1437 } else { 1440 }
1438 if (buf) buf[0] = '\0'; 1441
1439 if (offset) *offset = -1; 1442 buf[0] = '\0';
1440 return false; 1443 if (offset) *offset = -1;
1441 } 1444 return false;
1442 } 1445 }
1443 1446
1444 bool os::dll_address_to_function_name(address addr, char *buf, 1447 bool os::dll_address_to_function_name(address addr, char *buf,
1445 int buflen, int *offset) { 1448 int buflen, int *offset) {
1449 // buf is not optional, but offset is optional
1450 assert(buf != NULL, "sanity check");
1451
1446 if (Decoder::decode(addr, buf, buflen, offset)) { 1452 if (Decoder::decode(addr, buf, buflen, offset)) {
1447 return true; 1453 return true;
1448 } 1454 }
1449 if (offset != NULL) *offset = -1; 1455 if (offset != NULL) *offset = -1;
1450 if (buf != NULL) buf[0] = '\0'; 1456 buf[0] = '\0';
1451 return false; 1457 return false;
1452 } 1458 }
1453 1459
1454 // save the start and end address of jvm.dll into param[0] and param[1] 1460 // save the start and end address of jvm.dll into param[0] and param[1]
1455 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr, 1461 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr,
2331 address pc = (address) exceptionInfo->ContextRecord->Rip; 2337 address pc = (address) exceptionInfo->ContextRecord->Rip;
2332 #else 2338 #else
2333 address pc = (address) exceptionInfo->ContextRecord->Eip; 2339 address pc = (address) exceptionInfo->ContextRecord->Eip;
2334 #endif 2340 #endif
2335 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady 2341 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady
2342
2343 // Handle SafeFetch32 and SafeFetchN exceptions.
2344 if (StubRoutines::is_safefetch_fault(pc)) {
2345 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2346 }
2336 2347
2337 #ifndef _WIN64 2348 #ifndef _WIN64
2338 // Execution protection violation - win32 running on AMD64 only 2349 // Execution protection violation - win32 running on AMD64 only
2339 // Handled first to avoid misdiagnosis as a "normal" access violation; 2350 // Handled first to avoid misdiagnosis as a "normal" access violation;
2340 // This is safe to do because we have a new/unique ExceptionInformation 2351 // This is safe to do because we have a new/unique ExceptionInformation
2539 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2550 address addr = (address) exceptionRecord->ExceptionInformation[1];
2540 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) { 2551 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) {
2541 addr = (address)((uintptr_t)addr & 2552 addr = (address)((uintptr_t)addr &
2542 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2553 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2543 os::commit_memory((char *)addr, thread->stack_base() - addr, 2554 os::commit_memory((char *)addr, thread->stack_base() - addr,
2544 false ); 2555 !ExecMem);
2545 return EXCEPTION_CONTINUE_EXECUTION; 2556 return EXCEPTION_CONTINUE_EXECUTION;
2546 } 2557 }
2547 else 2558 else
2548 #endif 2559 #endif
2549 { 2560 {
2704 } 2715 }
2705 return (address)-1; 2716 return (address)-1;
2706 } 2717 }
2707 #endif 2718 #endif
2708 2719
2720 #ifndef PRODUCT
2721 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) {
2722 // Install a win32 structured exception handler around the test
2723 // function call so the VM can generate an error dump if needed.
2724 __try {
2725 (*funcPtr)();
2726 } __except(topLevelExceptionFilter(
2727 (_EXCEPTION_POINTERS*)_exception_info())) {
2728 // Nothing to do.
2729 }
2730 }
2731 #endif
2732
2709 // Virtual Memory 2733 // Virtual Memory
2710 2734
2711 int os::vm_page_size() { return os::win32::vm_page_size(); } 2735 int os::vm_page_size() { return os::win32::vm_page_size(); }
2712 int os::vm_allocation_granularity() { 2736 int os::vm_allocation_granularity() {
2713 return os::win32::vm_allocation_granularity(); 2737 return os::win32::vm_allocation_granularity();
2890 size_of_reserve, // size of Reserve 2914 size_of_reserve, // size of Reserve
2891 MEM_RESERVE, 2915 MEM_RESERVE,
2892 PAGE_READWRITE); 2916 PAGE_READWRITE);
2893 // If reservation failed, return NULL 2917 // If reservation failed, return NULL
2894 if (p_buf == NULL) return NULL; 2918 if (p_buf == NULL) return NULL;
2895 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2919 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC);
2896 os::release_memory(p_buf, bytes + chunk_size); 2920 os::release_memory(p_buf, bytes + chunk_size);
2897 2921
2898 // we still need to round up to a page boundary (in case we are using large pages) 2922 // we still need to round up to a page boundary (in case we are using large pages)
2899 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2923 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2900 // instead we handle this in the bytes_to_rq computation below 2924 // instead we handle this in the bytes_to_rq computation below
2956 size_t bytes_to_release = bytes - bytes_remaining; 2980 size_t bytes_to_release = bytes - bytes_remaining;
2957 // NMT has yet to record any individual blocks, so it 2981 // NMT has yet to record any individual blocks, so it
2958 // need to create a dummy 'reserve' record to match 2982 // need to create a dummy 'reserve' record to match
2959 // the release. 2983 // the release.
2960 MemTracker::record_virtual_memory_reserve((address)p_buf, 2984 MemTracker::record_virtual_memory_reserve((address)p_buf,
2961 bytes_to_release, CALLER_PC); 2985 bytes_to_release, mtNone, CALLER_PC);
2962 os::release_memory(p_buf, bytes_to_release); 2986 os::release_memory(p_buf, bytes_to_release);
2963 } 2987 }
2964 #ifdef ASSERT 2988 #ifdef ASSERT
2965 if (should_inject_error) { 2989 if (should_inject_error) {
2966 if (TracePageSizes && Verbose) { 2990 if (TracePageSizes && Verbose) {
2976 count++; 3000 count++;
2977 } 3001 }
2978 // Although the memory is allocated individually, it is returned as one. 3002 // Although the memory is allocated individually, it is returned as one.
2979 // NMT records it as one block. 3003 // NMT records it as one block.
2980 address pc = CALLER_PC; 3004 address pc = CALLER_PC;
2981 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, pc);
2982 if ((flags & MEM_COMMIT) != 0) { 3005 if ((flags & MEM_COMMIT) != 0) {
2983 MemTracker::record_virtual_memory_commit((address)p_buf, bytes, pc); 3006 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc);
3007 } else {
3008 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc);
2984 } 3009 }
2985 3010
2986 // made it this far, success 3011 // made it this far, success
2987 return p_buf; 3012 return p_buf;
2988 } 3013 }
3169 // normal policy just allocate it all at once 3194 // normal policy just allocate it all at once
3170 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3195 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3171 char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot); 3196 char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
3172 if (res != NULL) { 3197 if (res != NULL) {
3173 address pc = CALLER_PC; 3198 address pc = CALLER_PC;
3174 MemTracker::record_virtual_memory_reserve((address)res, bytes, pc); 3199 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
3175 MemTracker::record_virtual_memory_commit((address)res, bytes, pc);
3176 } 3200 }
3177 3201
3178 return res; 3202 return res;
3179 } 3203 }
3180 } 3204 }
3181 3205
3182 bool os::release_memory_special(char* base, size_t bytes) { 3206 bool os::release_memory_special(char* base, size_t bytes) {
3183 assert(base != NULL, "Sanity check"); 3207 assert(base != NULL, "Sanity check");
3184 // Memory allocated via reserve_memory_special() is committed
3185 MemTracker::record_virtual_memory_uncommit((address)base, bytes);
3186 return release_memory(base, bytes); 3208 return release_memory(base, bytes);
3187 } 3209 }
3188 3210
3189 void os::print_statistics() { 3211 void os::print_statistics() {
3212 }
3213
3214 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3215 int err = os::get_last_error();
3216 char buf[256];
3217 size_t buf_len = os::lasterror(buf, sizeof(buf));
3218 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3219 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3220 exec, buf_len != 0 ? buf : "<no_error_string>", err);
3190 } 3221 }
3191 3222
3192 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3223 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3193 if (bytes == 0) { 3224 if (bytes == 0) {
3194 // Don't bother the OS with noops. 3225 // Don't bother the OS with noops.
3201 3232
3202 // unless we have NUMAInterleaving enabled, the range of a commit 3233 // unless we have NUMAInterleaving enabled, the range of a commit
3203 // is always within a reserve covered by a single VirtualAlloc 3234 // is always within a reserve covered by a single VirtualAlloc
3204 // in that case we can just do a single commit for the requested size 3235 // in that case we can just do a single commit for the requested size
3205 if (!UseNUMAInterleaving) { 3236 if (!UseNUMAInterleaving) {
3206 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) return false; 3237 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3238 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3239 return false;
3240 }
3207 if (exec) { 3241 if (exec) {
3208 DWORD oldprot; 3242 DWORD oldprot;
3209 // Windows doc says to use VirtualProtect to get execute permissions 3243 // Windows doc says to use VirtualProtect to get execute permissions
3210 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) return false; 3244 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3245 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3246 return false;
3247 }
3211 } 3248 }
3212 return true; 3249 return true;
3213 } else { 3250 } else {
3214 3251
3215 // when NUMAInterleaving is enabled, the commit might cover a range that 3252 // when NUMAInterleaving is enabled, the commit might cover a range that
3220 char * next_alloc_addr = addr; 3257 char * next_alloc_addr = addr;
3221 while (bytes_remaining > 0) { 3258 while (bytes_remaining > 0) {
3222 MEMORY_BASIC_INFORMATION alloc_info; 3259 MEMORY_BASIC_INFORMATION alloc_info;
3223 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3260 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3224 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3261 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3225 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, PAGE_READWRITE) == NULL) 3262 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3263 PAGE_READWRITE) == NULL) {
3264 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3265 exec);)
3226 return false; 3266 return false;
3267 }
3227 if (exec) { 3268 if (exec) {
3228 DWORD oldprot; 3269 DWORD oldprot;
3229 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, PAGE_EXECUTE_READWRITE, &oldprot)) 3270 if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3271 PAGE_EXECUTE_READWRITE, &oldprot)) {
3272 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3273 exec);)
3230 return false; 3274 return false;
3275 }
3231 } 3276 }
3232 bytes_remaining -= bytes_to_rq; 3277 bytes_remaining -= bytes_to_rq;
3233 next_alloc_addr += bytes_to_rq; 3278 next_alloc_addr += bytes_to_rq;
3234 } 3279 }
3235 } 3280 }
3237 return true; 3282 return true;
3238 } 3283 }
3239 3284
3240 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3285 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3241 bool exec) { 3286 bool exec) {
3242 return commit_memory(addr, size, exec); 3287 // alignment_hint is ignored on this OS
3288 return pd_commit_memory(addr, size, exec);
3289 }
3290
3291 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3292 const char* mesg) {
3293 assert(mesg != NULL, "mesg must be specified");
3294 if (!pd_commit_memory(addr, size, exec)) {
3295 warn_fail_commit_memory(addr, size, exec);
3296 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
3297 }
3298 }
3299
3300 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3301 size_t alignment_hint, bool exec,
3302 const char* mesg) {
3303 // alignment_hint is ignored on this OS
3304 pd_commit_memory_or_exit(addr, size, exec, mesg);
3243 } 3305 }
3244 3306
3245 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3307 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3246 if (bytes == 0) { 3308 if (bytes == 0) {
3247 // Don't bother the OS with noops. 3309 // Don't bother the OS with noops.
3255 bool os::pd_release_memory(char* addr, size_t bytes) { 3317 bool os::pd_release_memory(char* addr, size_t bytes) {
3256 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3318 return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3257 } 3319 }
3258 3320
3259 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3321 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3260 return os::commit_memory(addr, size); 3322 return os::commit_memory(addr, size, !ExecMem);
3261 } 3323 }
3262 3324
3263 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3325 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3264 return os::uncommit_memory(addr, size); 3326 return os::uncommit_memory(addr, size);
3265 } 3327 }
3279 3341
3280 DWORD old_status; 3342 DWORD old_status;
3281 3343
3282 // Strange enough, but on Win32 one can change protection only for committed 3344 // Strange enough, but on Win32 one can change protection only for committed
3283 // memory, not a big deal anyway, as bytes less or equal than 64K 3345 // memory, not a big deal anyway, as bytes less or equal than 64K
3284 if (!is_committed && !commit_memory(addr, bytes, prot == MEM_PROT_RWX)) { 3346 if (!is_committed) {
3285 fatal("cannot commit protection page"); 3347 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3348 "cannot commit protection page");
3286 } 3349 }
3287 // One cannot use os::guard_memory() here, as on Win32 guard page 3350 // One cannot use os::guard_memory() here, as on Win32 guard page
3288 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3351 // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3289 // 3352 //
3290 // Pages in the region become guard pages. Any attempt to access a guard page 3353 // Pages in the region become guard pages. Any attempt to access a guard page
4639 } 4702 }
4640 } else { 4703 } else {
4641 jio_fprintf(stderr, 4704 jio_fprintf(stderr,
4642 "Could not open pause file '%s', continuing immediately.\n", filename); 4705 "Could not open pause file '%s', continuing immediately.\n", filename);
4643 } 4706 }
4707 }
4708
4709 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
4710 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
4711 }
4712
4713 /*
4714 * See the caveats for this class in os_windows.hpp
4715 * Protects the callback call so that raised OS EXCEPTIONS causes a jump back
4716 * into this method and returns false. If no OS EXCEPTION was raised, returns
4717 * true.
4718 * The callback is supposed to provide the method that should be protected.
4719 */
4720 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
4721 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
4722 assert(!WatcherThread::watcher_thread()->has_crash_protection(),
4723 "crash_protection already set?");
4724
4725 bool success = true;
4726 __try {
4727 WatcherThread::watcher_thread()->set_crash_protection(this);
4728 cb.call();
4729 } __except(EXCEPTION_EXECUTE_HANDLER) {
4730 // only for protection, nothing to do
4731 success = false;
4732 }
4733 WatcherThread::watcher_thread()->set_crash_protection(NULL);
4734 return success;
4644 } 4735 }
4645 4736
4646 // An Event wraps a win32 "CreateEvent" kernel handle. 4737 // An Event wraps a win32 "CreateEvent" kernel handle.
4647 // 4738 //
4648 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4739 // We have a number of choices regarding "CreateEvent" win32 handle leakage: