Mercurial > hg > graal-jvmci-8
annotate src/os/windows/vm/os_windows.cpp @ 342:37f87013dfd8
6711316: Open source the Garbage-First garbage collector
Summary: First mercurial integration of the code for the Garbage-First garbage collector.
Reviewed-by: apetrusenko, iveresov, jmasa, sgoldman, tonyp, ysr
author | ysr |
---|---|
date | Thu, 05 Jun 2008 15:57:56 -0700 |
parents | 8bd1e4487c18 |
children | 1ee8caae33af |
rev | line source |
---|---|
0 | 1 /* |
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #ifdef _WIN64 | |
26 // Must be at least Windows 2000 or XP to use VectoredExceptions | |
27 #define _WIN32_WINNT 0x500 | |
28 #endif | |
29 | |
30 // do not include precompiled header file | |
31 # include "incls/_os_windows.cpp.incl" | |
32 | |
33 #ifdef _DEBUG | |
34 #include <crtdbg.h> | |
35 #endif | |
36 | |
37 | |
38 #include <windows.h> | |
39 #include <sys/types.h> | |
40 #include <sys/stat.h> | |
41 #include <sys/timeb.h> | |
42 #include <objidl.h> | |
43 #include <shlobj.h> | |
44 | |
45 #include <malloc.h> | |
46 #include <signal.h> | |
47 #include <direct.h> | |
48 #include <errno.h> | |
49 #include <fcntl.h> | |
50 #include <io.h> | |
51 #include <process.h> // For _beginthreadex(), _endthreadex() | |
52 #include <imagehlp.h> // For os::dll_address_to_function_name | |
53 | |
54 /* for enumerating dll libraries */ | |
55 #include <tlhelp32.h> | |
56 #include <vdmdbg.h> | |
57 | |
58 // for timer info max values which include all bits | |
59 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) | |
60 | |
61 // For DLL loading/load error detection | |
62 // Values of PE COFF | |
63 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c | |
64 #define IMAGE_FILE_SIGNATURE_LENGTH 4 | |
65 | |
66 static HANDLE main_process; | |
67 static HANDLE main_thread; | |
68 static int main_thread_id; | |
69 | |
70 static FILETIME process_creation_time; | |
71 static FILETIME process_exit_time; | |
72 static FILETIME process_user_time; | |
73 static FILETIME process_kernel_time; | |
74 | |
75 #ifdef _WIN64 | |
76 PVOID topLevelVectoredExceptionHandler = NULL; | |
77 #endif | |
78 | |
79 #ifdef _M_IA64 | |
80 #define __CPU__ ia64 | |
81 #elif _M_AMD64 | |
82 #define __CPU__ amd64 | |
83 #else | |
84 #define __CPU__ i486 | |
85 #endif | |
86 | |
87 // save DLL module handle, used by GetModuleFileName | |
88 | |
89 HINSTANCE vm_lib_handle; | |
90 static int getLastErrorString(char *buf, size_t len); | |
91 | |
92 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { | |
93 switch (reason) { | |
94 case DLL_PROCESS_ATTACH: | |
95 vm_lib_handle = hinst; | |
96 if(ForceTimeHighResolution) | |
97 timeBeginPeriod(1L); | |
98 break; | |
99 case DLL_PROCESS_DETACH: | |
100 if(ForceTimeHighResolution) | |
101 timeEndPeriod(1L); | |
102 #ifdef _WIN64 | |
103 if (topLevelVectoredExceptionHandler != NULL) { | |
104 RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler); | |
105 topLevelVectoredExceptionHandler = NULL; | |
106 } | |
107 #endif | |
108 break; | |
109 default: | |
110 break; | |
111 } | |
112 return true; | |
113 } | |
114 | |
115 static inline double fileTimeAsDouble(FILETIME* time) { | |
116 const double high = (double) ((unsigned int) ~0); | |
117 const double split = 10000000.0; | |
118 double result = (time->dwLowDateTime / split) + | |
119 time->dwHighDateTime * (high/split); | |
120 return result; | |
121 } | |
122 | |
123 // Implementation of os | |
124 | |
125 bool os::getenv(const char* name, char* buffer, int len) { | |
126 int result = GetEnvironmentVariable(name, buffer, len); | |
127 return result > 0 && result < len; | |
128 } | |
129 | |
130 | |
131 // No setuid programs under Windows. | |
132 bool os::have_special_privileges() { | |
133 return false; | |
134 } | |
135 | |
136 | |
137 // This method is a periodic task to check for misbehaving JNI applications | |
138 // under CheckJNI, we can add any periodic checks here. | |
139 // For Windows at the moment does nothing | |
140 void os::run_periodic_checks() { | |
141 return; | |
142 } | |
143 | |
144 #ifndef _WIN64 | |
145 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); | |
146 #endif | |
147 void os::init_system_properties_values() { | |
148 /* sysclasspath, java_home, dll_dir */ | |
149 { | |
150 char *home_path; | |
151 char *dll_path; | |
152 char *pslash; | |
153 char *bin = "\\bin"; | |
154 char home_dir[MAX_PATH]; | |
155 | |
156 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) { | |
157 os::jvm_path(home_dir, sizeof(home_dir)); | |
158 // Found the full path to jvm[_g].dll. | |
159 // Now cut the path to <java_home>/jre if we can. | |
160 *(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */ | |
161 pslash = strrchr(home_dir, '\\'); | |
162 if (pslash != NULL) { | |
163 *pslash = '\0'; /* get rid of \{client|server} */ | |
164 pslash = strrchr(home_dir, '\\'); | |
165 if (pslash != NULL) | |
166 *pslash = '\0'; /* get rid of \bin */ | |
167 } | |
168 } | |
169 | |
170 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1); | |
171 if (home_path == NULL) | |
172 return; | |
173 strcpy(home_path, home_dir); | |
174 Arguments::set_java_home(home_path); | |
175 | |
176 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1); | |
177 if (dll_path == NULL) | |
178 return; | |
179 strcpy(dll_path, home_dir); | |
180 strcat(dll_path, bin); | |
181 Arguments::set_dll_dir(dll_path); | |
182 | |
183 if (!set_boot_path('\\', ';')) | |
184 return; | |
185 } | |
186 | |
187 /* library_path */ | |
188 #define EXT_DIR "\\lib\\ext" | |
189 #define BIN_DIR "\\bin" | |
190 #define PACKAGE_DIR "\\Sun\\Java" | |
191 { | |
192 /* Win32 library search order (See the documentation for LoadLibrary): | |
193 * | |
194 * 1. The directory from which application is loaded. | |
195 * 2. The current directory | |
196 * 3. The system wide Java Extensions directory (Java only) | |
197 * 4. System directory (GetSystemDirectory) | |
198 * 5. Windows directory (GetWindowsDirectory) | |
199 * 6. The PATH environment variable | |
200 */ | |
201 | |
202 char *library_path; | |
203 char tmp[MAX_PATH]; | |
204 char *path_str = ::getenv("PATH"); | |
205 | |
206 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + | |
207 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10); | |
208 | |
209 library_path[0] = '\0'; | |
210 | |
211 GetModuleFileName(NULL, tmp, sizeof(tmp)); | |
212 *(strrchr(tmp, '\\')) = '\0'; | |
213 strcat(library_path, tmp); | |
214 | |
215 strcat(library_path, ";."); | |
216 | |
217 GetWindowsDirectory(tmp, sizeof(tmp)); | |
218 strcat(library_path, ";"); | |
219 strcat(library_path, tmp); | |
220 strcat(library_path, PACKAGE_DIR BIN_DIR); | |
221 | |
222 GetSystemDirectory(tmp, sizeof(tmp)); | |
223 strcat(library_path, ";"); | |
224 strcat(library_path, tmp); | |
225 | |
226 GetWindowsDirectory(tmp, sizeof(tmp)); | |
227 strcat(library_path, ";"); | |
228 strcat(library_path, tmp); | |
229 | |
230 if (path_str) { | |
231 strcat(library_path, ";"); | |
232 strcat(library_path, path_str); | |
233 } | |
234 | |
235 Arguments::set_library_path(library_path); | |
236 FREE_C_HEAP_ARRAY(char, library_path); | |
237 } | |
238 | |
239 /* Default extensions directory */ | |
240 { | |
241 char path[MAX_PATH]; | |
242 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; | |
243 GetWindowsDirectory(path, MAX_PATH); | |
244 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, | |
245 path, PACKAGE_DIR, EXT_DIR); | |
246 Arguments::set_ext_dirs(buf); | |
247 } | |
248 #undef EXT_DIR | |
249 #undef BIN_DIR | |
250 #undef PACKAGE_DIR | |
251 | |
252 /* Default endorsed standards directory. */ | |
253 { | |
254 #define ENDORSED_DIR "\\lib\\endorsed" | |
255 size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR); | |
256 char * buf = NEW_C_HEAP_ARRAY(char, len); | |
257 sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR); | |
258 Arguments::set_endorsed_dirs(buf); | |
259 #undef ENDORSED_DIR | |
260 } | |
261 | |
262 #ifndef _WIN64 | |
263 SetUnhandledExceptionFilter(Handle_FLT_Exception); | |
264 #endif | |
265 | |
266 // Done | |
267 return; | |
268 } | |
269 | |
270 void os::breakpoint() { | |
271 DebugBreak(); | |
272 } | |
273 | |
274 // Invoked from the BREAKPOINT Macro | |
275 extern "C" void breakpoint() { | |
276 os::breakpoint(); | |
277 } | |
278 | |
279 // Returns an estimate of the current stack pointer. Result must be guaranteed | |
280 // to point into the calling threads stack, and be no lower than the current | |
281 // stack pointer. | |
282 | |
283 address os::current_stack_pointer() { | |
284 int dummy; | |
285 address sp = (address)&dummy; | |
286 return sp; | |
287 } | |
288 | |
289 // os::current_stack_base() | |
290 // | |
291 // Returns the base of the stack, which is the stack's | |
292 // starting address. This function must be called | |
293 // while running on the stack of the thread being queried. | |
294 | |
295 address os::current_stack_base() { | |
296 MEMORY_BASIC_INFORMATION minfo; | |
297 address stack_bottom; | |
298 size_t stack_size; | |
299 | |
300 VirtualQuery(&minfo, &minfo, sizeof(minfo)); | |
301 stack_bottom = (address)minfo.AllocationBase; | |
302 stack_size = minfo.RegionSize; | |
303 | |
304 // Add up the sizes of all the regions with the same | |
305 // AllocationBase. | |
306 while( 1 ) | |
307 { | |
308 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); | |
309 if ( stack_bottom == (address)minfo.AllocationBase ) | |
310 stack_size += minfo.RegionSize; | |
311 else | |
312 break; | |
313 } | |
314 | |
315 #ifdef _M_IA64 | |
316 // IA64 has memory and register stacks | |
317 stack_size = stack_size / 2; | |
318 #endif | |
319 return stack_bottom + stack_size; | |
320 } | |
321 | |
322 size_t os::current_stack_size() { | |
323 size_t sz; | |
324 MEMORY_BASIC_INFORMATION minfo; | |
325 VirtualQuery(&minfo, &minfo, sizeof(minfo)); | |
326 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; | |
327 return sz; | |
328 } | |
329 | |
330 | |
331 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); | |
332 | |
333 // Thread start routine for all new Java threads | |
334 static unsigned __stdcall java_start(Thread* thread) { | |
335 // Try to randomize the cache line index of hot stack frames. | |
336 // This helps when threads of the same stack traces evict each other's | |
337 // cache lines. The threads can be either from the same JVM instance, or | |
338 // from different JVM instances. The benefit is especially true for | |
339 // processors with hyperthreading technology. | |
340 static int counter = 0; | |
341 int pid = os::current_process_id(); | |
342 _alloca(((pid ^ counter++) & 7) * 128); | |
343 | |
344 OSThread* osthr = thread->osthread(); | |
345 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); | |
346 | |
347 if (UseNUMA) { | |
348 int lgrp_id = os::numa_get_group_id(); | |
349 if (lgrp_id != -1) { | |
350 thread->set_lgrp_id(lgrp_id); | |
351 } | |
352 } | |
353 | |
354 | |
355 if (UseVectoredExceptions) { | |
356 // If we are using vectored exception we don't need to set a SEH | |
357 thread->run(); | |
358 } | |
359 else { | |
360 // Install a win32 structured exception handler around every thread created | |
361 // by VM, so VM can genrate error dump when an exception occurred in non- | |
362 // Java thread (e.g. VM thread). | |
363 __try { | |
364 thread->run(); | |
365 } __except(topLevelExceptionFilter( | |
366 (_EXCEPTION_POINTERS*)_exception_info())) { | |
367 // Nothing to do. | |
368 } | |
369 } | |
370 | |
371 // One less thread is executing | |
372 // When the VMThread gets here, the main thread may have already exited | |
373 // which frees the CodeHeap containing the Atomic::add code | |
374 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { | |
375 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); | |
376 } | |
377 | |
378 return 0; | |
379 } | |
380 | |
381 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) { | |
382 // Allocate the OSThread object | |
383 OSThread* osthread = new OSThread(NULL, NULL); | |
384 if (osthread == NULL) return NULL; | |
385 | |
386 // Initialize support for Java interrupts | |
387 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); | |
388 if (interrupt_event == NULL) { | |
389 delete osthread; | |
390 return NULL; | |
391 } | |
392 osthread->set_interrupt_event(interrupt_event); | |
393 | |
394 // Store info on the Win32 thread into the OSThread | |
395 osthread->set_thread_handle(thread_handle); | |
396 osthread->set_thread_id(thread_id); | |
397 | |
398 if (UseNUMA) { | |
399 int lgrp_id = os::numa_get_group_id(); | |
400 if (lgrp_id != -1) { | |
401 thread->set_lgrp_id(lgrp_id); | |
402 } | |
403 } | |
404 | |
405 // Initial thread state is INITIALIZED, not SUSPENDED | |
406 osthread->set_state(INITIALIZED); | |
407 | |
408 return osthread; | |
409 } | |
410 | |
411 | |
412 bool os::create_attached_thread(JavaThread* thread) { | |
413 #ifdef ASSERT | |
414 thread->verify_not_published(); | |
415 #endif | |
416 HANDLE thread_h; | |
417 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), | |
418 &thread_h, THREAD_ALL_ACCESS, false, 0)) { | |
419 fatal("DuplicateHandle failed\n"); | |
420 } | |
421 OSThread* osthread = create_os_thread(thread, thread_h, | |
422 (int)current_thread_id()); | |
423 if (osthread == NULL) { | |
424 return false; | |
425 } | |
426 | |
427 // Initial thread state is RUNNABLE | |
428 osthread->set_state(RUNNABLE); | |
429 | |
430 thread->set_osthread(osthread); | |
431 return true; | |
432 } | |
433 | |
434 bool os::create_main_thread(JavaThread* thread) { | |
435 #ifdef ASSERT | |
436 thread->verify_not_published(); | |
437 #endif | |
438 if (_starting_thread == NULL) { | |
439 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); | |
440 if (_starting_thread == NULL) { | |
441 return false; | |
442 } | |
443 } | |
444 | |
445 // The primordial thread is runnable from the start) | |
446 _starting_thread->set_state(RUNNABLE); | |
447 | |
448 thread->set_osthread(_starting_thread); | |
449 return true; | |
450 } | |
451 | |
452 // Allocate and initialize a new OSThread | |
453 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { | |
454 unsigned thread_id; | |
455 | |
456 // Allocate the OSThread object | |
457 OSThread* osthread = new OSThread(NULL, NULL); | |
458 if (osthread == NULL) { | |
459 return false; | |
460 } | |
461 | |
462 // Initialize support for Java interrupts | |
463 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); | |
464 if (interrupt_event == NULL) { | |
465 delete osthread; | |
466 return NULL; | |
467 } | |
468 osthread->set_interrupt_event(interrupt_event); | |
469 osthread->set_interrupted(false); | |
470 | |
471 thread->set_osthread(osthread); | |
472 | |
473 if (stack_size == 0) { | |
474 switch (thr_type) { | |
475 case os::java_thread: | |
476 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss | |
477 if (JavaThread::stack_size_at_create() > 0) | |
478 stack_size = JavaThread::stack_size_at_create(); | |
479 break; | |
480 case os::compiler_thread: | |
481 if (CompilerThreadStackSize > 0) { | |
482 stack_size = (size_t)(CompilerThreadStackSize * K); | |
483 break; | |
484 } // else fall through: | |
485 // use VMThreadStackSize if CompilerThreadStackSize is not defined | |
486 case os::vm_thread: | |
487 case os::pgc_thread: | |
488 case os::cgc_thread: | |
489 case os::watcher_thread: | |
490 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); | |
491 break; | |
492 } | |
493 } | |
494 | |
495 // Create the Win32 thread | |
496 // | |
497 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() | |
498 // does not specify stack size. Instead, it specifies the size of | |
499 // initially committed space. The stack size is determined by | |
500 // PE header in the executable. If the committed "stack_size" is larger | |
501 // than default value in the PE header, the stack is rounded up to the | |
502 // nearest multiple of 1MB. For example if the launcher has default | |
503 // stack size of 320k, specifying any size less than 320k does not | |
504 // affect the actual stack size at all, it only affects the initial | |
505 // commitment. On the other hand, specifying 'stack_size' larger than | |
506 // default value may cause significant increase in memory usage, because | |
507 // not only the stack space will be rounded up to MB, but also the | |
508 // entire space is committed upfront. | |
509 // | |
510 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' | |
511 // for CreateThread() that can treat 'stack_size' as stack size. However we | |
512 // are not supposed to call CreateThread() directly according to MSDN | |
513 // document because JVM uses C runtime library. The good news is that the | |
514 // flag appears to work with _beginthredex() as well. | |
515 | |
516 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION | |
517 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) | |
518 #endif | |
519 | |
520 HANDLE thread_handle = | |
521 (HANDLE)_beginthreadex(NULL, | |
522 (unsigned)stack_size, | |
523 (unsigned (__stdcall *)(void*)) java_start, | |
524 thread, | |
525 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, | |
526 &thread_id); | |
527 if (thread_handle == NULL) { | |
528 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again | |
529 // without the flag. | |
530 thread_handle = | |
531 (HANDLE)_beginthreadex(NULL, | |
532 (unsigned)stack_size, | |
533 (unsigned (__stdcall *)(void*)) java_start, | |
534 thread, | |
535 CREATE_SUSPENDED, | |
536 &thread_id); | |
537 } | |
538 if (thread_handle == NULL) { | |
539 // Need to clean up stuff we've allocated so far | |
540 CloseHandle(osthread->interrupt_event()); | |
541 thread->set_osthread(NULL); | |
542 delete osthread; | |
543 return NULL; | |
544 } | |
545 | |
546 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); | |
547 | |
548 // Store info on the Win32 thread into the OSThread | |
549 osthread->set_thread_handle(thread_handle); | |
550 osthread->set_thread_id(thread_id); | |
551 | |
552 // Initial thread state is INITIALIZED, not SUSPENDED | |
553 osthread->set_state(INITIALIZED); | |
554 | |
555 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain | |
556 return true; | |
557 } | |
558 | |
559 | |
560 // Free Win32 resources related to the OSThread | |
561 void os::free_thread(OSThread* osthread) { | |
562 assert(osthread != NULL, "osthread not set"); | |
563 CloseHandle(osthread->thread_handle()); | |
564 CloseHandle(osthread->interrupt_event()); | |
565 delete osthread; | |
566 } | |
567 | |
568 | |
569 static int has_performance_count = 0; | |
570 static jlong first_filetime; | |
571 static jlong initial_performance_count; | |
572 static jlong performance_frequency; | |
573 | |
574 | |
575 jlong as_long(LARGE_INTEGER x) { | |
576 jlong result = 0; // initialization to avoid warning | |
577 set_high(&result, x.HighPart); | |
578 set_low(&result, x.LowPart); | |
579 return result; | |
580 } | |
581 | |
582 | |
583 jlong os::elapsed_counter() { | |
584 LARGE_INTEGER count; | |
585 if (has_performance_count) { | |
586 QueryPerformanceCounter(&count); | |
587 return as_long(count) - initial_performance_count; | |
588 } else { | |
589 FILETIME wt; | |
590 GetSystemTimeAsFileTime(&wt); | |
591 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime); | |
592 } | |
593 } | |
594 | |
595 | |
596 jlong os::elapsed_frequency() { | |
597 if (has_performance_count) { | |
598 return performance_frequency; | |
599 } else { | |
600 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601. | |
601 return 10000000; | |
602 } | |
603 } | |
604 | |
605 | |
606 julong os::available_memory() { | |
607 return win32::available_memory(); | |
608 } | |
609 | |
610 julong os::win32::available_memory() { | |
611 // FIXME: GlobalMemoryStatus() may return incorrect value if total memory | |
612 // is larger than 4GB | |
613 MEMORYSTATUS ms; | |
614 GlobalMemoryStatus(&ms); | |
615 | |
616 return (julong)ms.dwAvailPhys; | |
617 } | |
618 | |
619 julong os::physical_memory() { | |
620 return win32::physical_memory(); | |
621 } | |
622 | |
623 julong os::allocatable_physical_memory(julong size) { | |
20
e195fe4c40c7
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
0
diff
changeset
|
624 #ifdef _LP64 |
e195fe4c40c7
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
0
diff
changeset
|
625 return size; |
e195fe4c40c7
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
0
diff
changeset
|
626 #else |
e195fe4c40c7
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
0
diff
changeset
|
627 // Limit to 1400m because of the 2gb address space wall |
0 | 628 return MIN2(size, (julong)1400*M); |
20
e195fe4c40c7
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
0
diff
changeset
|
629 #endif |
0 | 630 } |
631 | |
632 // VC6 lacks DWORD_PTR | |
633 #if _MSC_VER < 1300 | |
634 typedef UINT_PTR DWORD_PTR; | |
635 #endif | |
636 | |
637 int os::active_processor_count() { | |
638 DWORD_PTR lpProcessAffinityMask = 0; | |
639 DWORD_PTR lpSystemAffinityMask = 0; | |
640 int proc_count = processor_count(); | |
641 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && | |
642 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { | |
643 // Nof active processors is number of bits in process affinity mask | |
644 int bitcount = 0; | |
645 while (lpProcessAffinityMask != 0) { | |
646 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); | |
647 bitcount++; | |
648 } | |
649 return bitcount; | |
650 } else { | |
651 return proc_count; | |
652 } | |
653 } | |
654 | |
655 bool os::distribute_processes(uint length, uint* distribution) { | |
656 // Not yet implemented. | |
657 return false; | |
658 } | |
659 | |
660 bool os::bind_to_processor(uint processor_id) { | |
661 // Not yet implemented. | |
662 return false; | |
663 } | |
664 | |
665 static void initialize_performance_counter() { | |
666 LARGE_INTEGER count; | |
667 if (QueryPerformanceFrequency(&count)) { | |
668 has_performance_count = 1; | |
669 performance_frequency = as_long(count); | |
670 QueryPerformanceCounter(&count); | |
671 initial_performance_count = as_long(count); | |
672 } else { | |
673 has_performance_count = 0; | |
674 FILETIME wt; | |
675 GetSystemTimeAsFileTime(&wt); | |
676 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); | |
677 } | |
678 } | |
679 | |
680 | |
681 double os::elapsedTime() { | |
682 return (double) elapsed_counter() / (double) elapsed_frequency(); | |
683 } | |
684 | |
685 | |
686 // Windows format: | |
687 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. | |
688 // Java format: | |
689 // Java standards require the number of milliseconds since 1/1/1970 | |
690 | |
691 // Constant offset - calculated using offset() | |
692 static jlong _offset = 116444736000000000; | |
693 // Fake time counter for reproducible results when debugging | |
694 static jlong fake_time = 0; | |
695 | |
696 #ifdef ASSERT | |
697 // Just to be safe, recalculate the offset in debug mode | |
698 static jlong _calculated_offset = 0; | |
699 static int _has_calculated_offset = 0; | |
700 | |
701 jlong offset() { | |
702 if (_has_calculated_offset) return _calculated_offset; | |
703 SYSTEMTIME java_origin; | |
704 java_origin.wYear = 1970; | |
705 java_origin.wMonth = 1; | |
706 java_origin.wDayOfWeek = 0; // ignored | |
707 java_origin.wDay = 1; | |
708 java_origin.wHour = 0; | |
709 java_origin.wMinute = 0; | |
710 java_origin.wSecond = 0; | |
711 java_origin.wMilliseconds = 0; | |
712 FILETIME jot; | |
713 if (!SystemTimeToFileTime(&java_origin, &jot)) { | |
714 fatal1("Error = %d\nWindows error", GetLastError()); | |
715 } | |
716 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); | |
717 _has_calculated_offset = 1; | |
718 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); | |
719 return _calculated_offset; | |
720 } | |
721 #else | |
722 jlong offset() { | |
723 return _offset; | |
724 } | |
725 #endif | |
726 | |
727 jlong windows_to_java_time(FILETIME wt) { | |
728 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); | |
729 return (a - offset()) / 10000; | |
730 } | |
731 | |
732 FILETIME java_to_windows_time(jlong l) { | |
733 jlong a = (l * 10000) + offset(); | |
734 FILETIME result; | |
735 result.dwHighDateTime = high(a); | |
736 result.dwLowDateTime = low(a); | |
737 return result; | |
738 } | |
739 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
740 // For now, we say that Windows does not support vtime. I have no idea |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
741 // whether it can actually be made to (DLD, 9/13/05). |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
742 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
743 bool os::supports_vtime() { return false; } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
744 bool os::enable_vtime() { return false; } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
745 bool os::vtime_enabled() { return false; } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
746 double os::elapsedVTime() { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
747 // better than nothing, but not much |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
748 return elapsedTime(); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
749 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
750 |
0 | 751 jlong os::javaTimeMillis() { |
752 if (UseFakeTimers) { | |
753 return fake_time++; | |
754 } else { | |
61 | 755 FILETIME wt; |
756 GetSystemTimeAsFileTime(&wt); | |
757 return windows_to_java_time(wt); | |
0 | 758 } |
759 } | |
760 | |
761 #define NANOS_PER_SEC CONST64(1000000000) | |
762 #define NANOS_PER_MILLISEC 1000000 | |
763 jlong os::javaTimeNanos() { | |
764 if (!has_performance_count) { | |
765 return javaTimeMillis() * NANOS_PER_MILLISEC; // the best we can do. | |
766 } else { | |
767 LARGE_INTEGER current_count; | |
768 QueryPerformanceCounter(¤t_count); | |
769 double current = as_long(current_count); | |
770 double freq = performance_frequency; | |
771 jlong time = (jlong)((current/freq) * NANOS_PER_SEC); | |
772 return time; | |
773 } | |
774 } | |
775 | |
776 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { | |
777 if (!has_performance_count) { | |
778 // javaTimeMillis() doesn't have much percision, | |
779 // but it is not going to wrap -- so all 64 bits | |
780 info_ptr->max_value = ALL_64_BITS; | |
781 | |
782 // this is a wall clock timer, so may skip | |
783 info_ptr->may_skip_backward = true; | |
784 info_ptr->may_skip_forward = true; | |
785 } else { | |
786 jlong freq = performance_frequency; | |
787 if (freq < NANOS_PER_SEC) { | |
788 // the performance counter is 64 bits and we will | |
789 // be multiplying it -- so no wrap in 64 bits | |
790 info_ptr->max_value = ALL_64_BITS; | |
791 } else if (freq > NANOS_PER_SEC) { | |
792 // use the max value the counter can reach to | |
793 // determine the max value which could be returned | |
794 julong max_counter = (julong)ALL_64_BITS; | |
795 info_ptr->max_value = (jlong)(max_counter / (freq / NANOS_PER_SEC)); | |
796 } else { | |
797 // the performance counter is 64 bits and we will | |
798 // be using it directly -- so no wrap in 64 bits | |
799 info_ptr->max_value = ALL_64_BITS; | |
800 } | |
801 | |
802 // using a counter, so no skipping | |
803 info_ptr->may_skip_backward = false; | |
804 info_ptr->may_skip_forward = false; | |
805 } | |
806 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time | |
807 } | |
808 | |
809 char* os::local_time_string(char *buf, size_t buflen) { | |
810 SYSTEMTIME st; | |
811 GetLocalTime(&st); | |
812 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", | |
813 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); | |
814 return buf; | |
815 } | |
816 | |
817 bool os::getTimesSecs(double* process_real_time, | |
818 double* process_user_time, | |
819 double* process_system_time) { | |
820 HANDLE h_process = GetCurrentProcess(); | |
821 FILETIME create_time, exit_time, kernel_time, user_time; | |
822 BOOL result = GetProcessTimes(h_process, | |
823 &create_time, | |
824 &exit_time, | |
825 &kernel_time, | |
826 &user_time); | |
827 if (result != 0) { | |
828 FILETIME wt; | |
829 GetSystemTimeAsFileTime(&wt); | |
830 jlong rtc_millis = windows_to_java_time(wt); | |
831 jlong user_millis = windows_to_java_time(user_time); | |
832 jlong system_millis = windows_to_java_time(kernel_time); | |
833 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); | |
834 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); | |
835 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); | |
836 return true; | |
837 } else { | |
838 return false; | |
839 } | |
840 } | |
841 | |
842 void os::shutdown() { | |
843 | |
844 // allow PerfMemory to attempt cleanup of any persistent resources | |
845 perfMemory_exit(); | |
846 | |
847 // flush buffered output, finish log files | |
848 ostream_abort(); | |
849 | |
850 // Check for abort hook | |
851 abort_hook_t abort_hook = Arguments::abort_hook(); | |
852 if (abort_hook != NULL) { | |
853 abort_hook(); | |
854 } | |
855 } | |
856 | |
857 void os::abort(bool dump_core) | |
858 { | |
859 os::shutdown(); | |
860 // no core dump on Windows | |
861 ::exit(1); | |
862 } | |
863 | |
864 // Die immediately, no exit hook, no abort hook, no cleanup. | |
865 void os::die() { | |
866 _exit(-1); | |
867 } | |
868 | |
869 // Directory routines copied from src/win32/native/java/io/dirent_md.c | |
870 // * dirent_md.c 1.15 00/02/02 | |
871 // | |
872 // The declarations for DIR and struct dirent are in jvm_win32.h. | |
873 | |
874 /* Caller must have already run dirname through JVM_NativePath, which removes | |
875 duplicate slashes and converts all instances of '/' into '\\'. */ | |
876 | |
877 DIR * | |
878 os::opendir(const char *dirname) | |
879 { | |
880 assert(dirname != NULL, "just checking"); // hotspot change | |
881 DIR *dirp = (DIR *)malloc(sizeof(DIR)); | |
882 DWORD fattr; // hotspot change | |
883 char alt_dirname[4] = { 0, 0, 0, 0 }; | |
884 | |
885 if (dirp == 0) { | |
886 errno = ENOMEM; | |
887 return 0; | |
888 } | |
889 | |
890 /* | |
891 * Win32 accepts "\" in its POSIX stat(), but refuses to treat it | |
892 * as a directory in FindFirstFile(). We detect this case here and | |
893 * prepend the current drive name. | |
894 */ | |
895 if (dirname[1] == '\0' && dirname[0] == '\\') { | |
896 alt_dirname[0] = _getdrive() + 'A' - 1; | |
897 alt_dirname[1] = ':'; | |
898 alt_dirname[2] = '\\'; | |
899 alt_dirname[3] = '\0'; | |
900 dirname = alt_dirname; | |
901 } | |
902 | |
903 dirp->path = (char *)malloc(strlen(dirname) + 5); | |
904 if (dirp->path == 0) { | |
905 free(dirp); | |
906 errno = ENOMEM; | |
907 return 0; | |
908 } | |
909 strcpy(dirp->path, dirname); | |
910 | |
911 fattr = GetFileAttributes(dirp->path); | |
912 if (fattr == 0xffffffff) { | |
913 free(dirp->path); | |
914 free(dirp); | |
915 errno = ENOENT; | |
916 return 0; | |
917 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { | |
918 free(dirp->path); | |
919 free(dirp); | |
920 errno = ENOTDIR; | |
921 return 0; | |
922 } | |
923 | |
924 /* Append "*.*", or possibly "\\*.*", to path */ | |
925 if (dirp->path[1] == ':' | |
926 && (dirp->path[2] == '\0' | |
927 || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { | |
928 /* No '\\' needed for cases like "Z:" or "Z:\" */ | |
929 strcat(dirp->path, "*.*"); | |
930 } else { | |
931 strcat(dirp->path, "\\*.*"); | |
932 } | |
933 | |
934 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); | |
935 if (dirp->handle == INVALID_HANDLE_VALUE) { | |
936 if (GetLastError() != ERROR_FILE_NOT_FOUND) { | |
937 free(dirp->path); | |
938 free(dirp); | |
939 errno = EACCES; | |
940 return 0; | |
941 } | |
942 } | |
943 return dirp; | |
944 } | |
945 | |
946 /* parameter dbuf unused on Windows */ | |
947 | |
948 struct dirent * | |
949 os::readdir(DIR *dirp, dirent *dbuf) | |
950 { | |
951 assert(dirp != NULL, "just checking"); // hotspot change | |
952 if (dirp->handle == INVALID_HANDLE_VALUE) { | |
953 return 0; | |
954 } | |
955 | |
956 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); | |
957 | |
958 if (!FindNextFile(dirp->handle, &dirp->find_data)) { | |
959 if (GetLastError() == ERROR_INVALID_HANDLE) { | |
960 errno = EBADF; | |
961 return 0; | |
962 } | |
963 FindClose(dirp->handle); | |
964 dirp->handle = INVALID_HANDLE_VALUE; | |
965 } | |
966 | |
967 return &dirp->dirent; | |
968 } | |
969 | |
970 int | |
971 os::closedir(DIR *dirp) | |
972 { | |
973 assert(dirp != NULL, "just checking"); // hotspot change | |
974 if (dirp->handle != INVALID_HANDLE_VALUE) { | |
975 if (!FindClose(dirp->handle)) { | |
976 errno = EBADF; | |
977 return -1; | |
978 } | |
979 dirp->handle = INVALID_HANDLE_VALUE; | |
980 } | |
981 free(dirp->path); | |
982 free(dirp); | |
983 return 0; | |
984 } | |
985 | |
986 const char* os::dll_file_extension() { return ".dll"; } | |
987 | |
988 const char * os::get_temp_directory() | |
989 { | |
990 static char path_buf[MAX_PATH]; | |
991 if (GetTempPath(MAX_PATH, path_buf)>0) | |
992 return path_buf; | |
993 else{ | |
994 path_buf[0]='\0'; | |
995 return path_buf; | |
996 } | |
997 } | |
998 | |
999 // Needs to be in os specific directory because windows requires another | |
1000 // header file <direct.h> | |
1001 const char* os::get_current_directory(char *buf, int buflen) { | |
1002 return _getcwd(buf, buflen); | |
1003 } | |
1004 | |
1005 //----------------------------------------------------------- | |
1006 // Helper functions for fatal error handler | |
1007 | |
1008 // The following library functions are resolved dynamically at runtime: | |
1009 | |
1010 // PSAPI functions, for Windows NT, 2000, XP | |
1011 | |
1012 // psapi.h doesn't come with Visual Studio 6; it can be downloaded as Platform | |
1013 // SDK from Microsoft. Here are the definitions copied from psapi.h | |
1014 typedef struct _MODULEINFO { | |
1015 LPVOID lpBaseOfDll; | |
1016 DWORD SizeOfImage; | |
1017 LPVOID EntryPoint; | |
1018 } MODULEINFO, *LPMODULEINFO; | |
1019 | |
1020 static BOOL (WINAPI *_EnumProcessModules) ( HANDLE, HMODULE *, DWORD, LPDWORD ); | |
1021 static DWORD (WINAPI *_GetModuleFileNameEx) ( HANDLE, HMODULE, LPTSTR, DWORD ); | |
1022 static BOOL (WINAPI *_GetModuleInformation)( HANDLE, HMODULE, LPMODULEINFO, DWORD ); | |
1023 | |
1024 // ToolHelp Functions, for Windows 95, 98 and ME | |
1025 | |
1026 static HANDLE(WINAPI *_CreateToolhelp32Snapshot)(DWORD,DWORD) ; | |
1027 static BOOL (WINAPI *_Module32First) (HANDLE,LPMODULEENTRY32) ; | |
1028 static BOOL (WINAPI *_Module32Next) (HANDLE,LPMODULEENTRY32) ; | |
1029 | |
1030 bool _has_psapi; | |
1031 bool _psapi_init = false; | |
1032 bool _has_toolhelp; | |
1033 | |
1034 static bool _init_psapi() { | |
1035 HINSTANCE psapi = LoadLibrary( "PSAPI.DLL" ) ; | |
1036 if( psapi == NULL ) return false ; | |
1037 | |
1038 _EnumProcessModules = CAST_TO_FN_PTR( | |
1039 BOOL(WINAPI *)(HANDLE, HMODULE *, DWORD, LPDWORD), | |
1040 GetProcAddress(psapi, "EnumProcessModules")) ; | |
1041 _GetModuleFileNameEx = CAST_TO_FN_PTR( | |
1042 DWORD (WINAPI *)(HANDLE, HMODULE, LPTSTR, DWORD), | |
1043 GetProcAddress(psapi, "GetModuleFileNameExA")); | |
1044 _GetModuleInformation = CAST_TO_FN_PTR( | |
1045 BOOL (WINAPI *)(HANDLE, HMODULE, LPMODULEINFO, DWORD), | |
1046 GetProcAddress(psapi, "GetModuleInformation")); | |
1047 | |
1048 _has_psapi = (_EnumProcessModules && _GetModuleFileNameEx && _GetModuleInformation); | |
1049 _psapi_init = true; | |
1050 return _has_psapi; | |
1051 } | |
1052 | |
1053 static bool _init_toolhelp() { | |
1054 HINSTANCE kernel32 = LoadLibrary("Kernel32.DLL") ; | |
1055 if (kernel32 == NULL) return false ; | |
1056 | |
1057 _CreateToolhelp32Snapshot = CAST_TO_FN_PTR( | |
1058 HANDLE(WINAPI *)(DWORD,DWORD), | |
1059 GetProcAddress(kernel32, "CreateToolhelp32Snapshot")); | |
1060 _Module32First = CAST_TO_FN_PTR( | |
1061 BOOL(WINAPI *)(HANDLE,LPMODULEENTRY32), | |
1062 GetProcAddress(kernel32, "Module32First" )); | |
1063 _Module32Next = CAST_TO_FN_PTR( | |
1064 BOOL(WINAPI *)(HANDLE,LPMODULEENTRY32), | |
1065 GetProcAddress(kernel32, "Module32Next" )); | |
1066 | |
1067 _has_toolhelp = (_CreateToolhelp32Snapshot && _Module32First && _Module32Next); | |
1068 return _has_toolhelp; | |
1069 } | |
1070 | |
1071 #ifdef _WIN64 | |
1072 // Helper routine which returns true if address in | |
1073 // within the NTDLL address space. | |
1074 // | |
1075 static bool _addr_in_ntdll( address addr ) | |
1076 { | |
1077 HMODULE hmod; | |
1078 MODULEINFO minfo; | |
1079 | |
1080 hmod = GetModuleHandle("NTDLL.DLL"); | |
1081 if ( hmod == NULL ) return false; | |
1082 if ( !_GetModuleInformation( GetCurrentProcess(), hmod, | |
1083 &minfo, sizeof(MODULEINFO)) ) | |
1084 return false; | |
1085 | |
1086 if ( (addr >= minfo.lpBaseOfDll) && | |
1087 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) | |
1088 return true; | |
1089 else | |
1090 return false; | |
1091 } | |
1092 #endif | |
1093 | |
1094 | |
1095 // Enumerate all modules for a given process ID | |
1096 // | |
1097 // Notice that Windows 95/98/Me and Windows NT/2000/XP have | |
1098 // different API for doing this. We use PSAPI.DLL on NT based | |
1099 // Windows and ToolHelp on 95/98/Me. | |
1100 | |
1101 // Callback function that is called by enumerate_modules() on | |
1102 // every DLL module. | |
1103 // Input parameters: | |
1104 // int pid, | |
1105 // char* module_file_name, | |
1106 // address module_base_addr, | |
1107 // unsigned module_size, | |
1108 // void* param | |
1109 typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *); | |
1110 | |
1111 // enumerate_modules for Windows NT, using PSAPI | |
1112 static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param) | |
1113 { | |
1114 HANDLE hProcess ; | |
1115 | |
1116 # define MAX_NUM_MODULES 128 | |
1117 HMODULE modules[MAX_NUM_MODULES]; | |
1118 static char filename[ MAX_PATH ]; | |
1119 int result = 0; | |
1120 | |
1121 if (!_has_psapi && (_psapi_init || !_init_psapi())) return 0; | |
1122 | |
1123 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, | |
1124 FALSE, pid ) ; | |
1125 if (hProcess == NULL) return 0; | |
1126 | |
1127 DWORD size_needed; | |
1128 if (!_EnumProcessModules(hProcess, modules, | |
1129 sizeof(modules), &size_needed)) { | |
1130 CloseHandle( hProcess ); | |
1131 return 0; | |
1132 } | |
1133 | |
1134 // number of modules that are currently loaded | |
1135 int num_modules = size_needed / sizeof(HMODULE); | |
1136 | |
1137 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { | |
1138 // Get Full pathname: | |
1139 if(!_GetModuleFileNameEx(hProcess, modules[i], | |
1140 filename, sizeof(filename))) { | |
1141 filename[0] = '\0'; | |
1142 } | |
1143 | |
1144 MODULEINFO modinfo; | |
1145 if (!_GetModuleInformation(hProcess, modules[i], | |
1146 &modinfo, sizeof(modinfo))) { | |
1147 modinfo.lpBaseOfDll = NULL; | |
1148 modinfo.SizeOfImage = 0; | |
1149 } | |
1150 | |
1151 // Invoke callback function | |
1152 result = func(pid, filename, (address)modinfo.lpBaseOfDll, | |
1153 modinfo.SizeOfImage, param); | |
1154 if (result) break; | |
1155 } | |
1156 | |
1157 CloseHandle( hProcess ) ; | |
1158 return result; | |
1159 } | |
1160 | |
1161 | |
1162 // enumerate_modules for Windows 95/98/ME, using TOOLHELP | |
1163 static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param) | |
1164 { | |
1165 HANDLE hSnapShot ; | |
1166 static MODULEENTRY32 modentry ; | |
1167 int result = 0; | |
1168 | |
1169 if (!_has_toolhelp) return 0; | |
1170 | |
1171 // Get a handle to a Toolhelp snapshot of the system | |
1172 hSnapShot = _CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ; | |
1173 if( hSnapShot == INVALID_HANDLE_VALUE ) { | |
1174 return FALSE ; | |
1175 } | |
1176 | |
1177 // iterate through all modules | |
1178 modentry.dwSize = sizeof(MODULEENTRY32) ; | |
1179 bool not_done = _Module32First( hSnapShot, &modentry ) != 0; | |
1180 | |
1181 while( not_done ) { | |
1182 // invoke the callback | |
1183 result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr, | |
1184 modentry.modBaseSize, param); | |
1185 if (result) break; | |
1186 | |
1187 modentry.dwSize = sizeof(MODULEENTRY32) ; | |
1188 not_done = _Module32Next( hSnapShot, &modentry ) != 0; | |
1189 } | |
1190 | |
1191 CloseHandle(hSnapShot); | |
1192 return result; | |
1193 } | |
1194 | |
1195 int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param ) | |
1196 { | |
1197 // Get current process ID if caller doesn't provide it. | |
1198 if (!pid) pid = os::current_process_id(); | |
1199 | |
1200 if (os::win32::is_nt()) return _enumerate_modules_winnt (pid, func, param); | |
1201 else return _enumerate_modules_windows(pid, func, param); | |
1202 } | |
1203 | |
1204 struct _modinfo { | |
1205 address addr; | |
1206 char* full_path; // point to a char buffer | |
1207 int buflen; // size of the buffer | |
1208 address base_addr; | |
1209 }; | |
1210 | |
1211 static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr, | |
1212 unsigned size, void * param) { | |
1213 struct _modinfo *pmod = (struct _modinfo *)param; | |
1214 if (!pmod) return -1; | |
1215 | |
1216 if (base_addr <= pmod->addr && | |
1217 base_addr+size > pmod->addr) { | |
1218 // if a buffer is provided, copy path name to the buffer | |
1219 if (pmod->full_path) { | |
1220 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); | |
1221 } | |
1222 pmod->base_addr = base_addr; | |
1223 return 1; | |
1224 } | |
1225 return 0; | |
1226 } | |
1227 | |
1228 bool os::dll_address_to_library_name(address addr, char* buf, | |
1229 int buflen, int* offset) { | |
1230 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always | |
1231 // return the full path to the DLL file, sometimes it returns path | |
1232 // to the corresponding PDB file (debug info); sometimes it only | |
1233 // returns partial path, which makes life painful. | |
1234 | |
1235 struct _modinfo mi; | |
1236 mi.addr = addr; | |
1237 mi.full_path = buf; | |
1238 mi.buflen = buflen; | |
1239 int pid = os::current_process_id(); | |
1240 if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) { | |
1241 // buf already contains path name | |
1242 if (offset) *offset = addr - mi.base_addr; | |
1243 return true; | |
1244 } else { | |
1245 if (buf) buf[0] = '\0'; | |
1246 if (offset) *offset = -1; | |
1247 return false; | |
1248 } | |
1249 } | |
1250 | |
1251 bool os::dll_address_to_function_name(address addr, char *buf, | |
1252 int buflen, int *offset) { | |
1253 // Unimplemented on Windows - in order to use SymGetSymFromAddr(), | |
1254 // we need to initialize imagehlp/dbghelp, then load symbol table | |
1255 // for every module. That's too much work to do after a fatal error. | |
1256 // For an example on how to implement this function, see 1.4.2. | |
1257 if (offset) *offset = -1; | |
1258 if (buf) buf[0] = '\0'; | |
1259 return false; | |
1260 } | |
1261 | |
1262 // save the start and end address of jvm.dll into param[0] and param[1] | |
1263 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr, | |
1264 unsigned size, void * param) { | |
1265 if (!param) return -1; | |
1266 | |
1267 if (base_addr <= (address)_locate_jvm_dll && | |
1268 base_addr+size > (address)_locate_jvm_dll) { | |
1269 ((address*)param)[0] = base_addr; | |
1270 ((address*)param)[1] = base_addr + size; | |
1271 return 1; | |
1272 } | |
1273 return 0; | |
1274 } | |
1275 | |
1276 address vm_lib_location[2]; // start and end address of jvm.dll | |
1277 | |
1278 // check if addr is inside jvm.dll | |
1279 bool os::address_is_in_vm(address addr) { | |
1280 if (!vm_lib_location[0] || !vm_lib_location[1]) { | |
1281 int pid = os::current_process_id(); | |
1282 if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) { | |
1283 assert(false, "Can't find jvm module."); | |
1284 return false; | |
1285 } | |
1286 } | |
1287 | |
1288 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); | |
1289 } | |
1290 | |
1291 // print module info; param is outputStream* | |
1292 static int _print_module(int pid, char* fname, address base, | |
1293 unsigned size, void* param) { | |
1294 if (!param) return -1; | |
1295 | |
1296 outputStream* st = (outputStream*)param; | |
1297 | |
1298 address end_addr = base + size; | |
1299 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname); | |
1300 return 0; | |
1301 } | |
1302 | |
1303 // Loads .dll/.so and | |
1304 // in case of error it checks if .dll/.so was built for the | |
1305 // same architecture as Hotspot is running on | |
1306 void * os::dll_load(const char *name, char *ebuf, int ebuflen) | |
1307 { | |
1308 void * result = LoadLibrary(name); | |
1309 if (result != NULL) | |
1310 { | |
1311 return result; | |
1312 } | |
1313 | |
1314 long errcode = GetLastError(); | |
1315 if (errcode == ERROR_MOD_NOT_FOUND) { | |
1316 strncpy(ebuf, "Can't find dependent libraries", ebuflen-1); | |
1317 ebuf[ebuflen-1]='\0'; | |
1318 return NULL; | |
1319 } | |
1320 | |
1321 // Parsing dll below | |
1322 // If we can read dll-info and find that dll was built | |
1323 // for an architecture other than Hotspot is running in | |
1324 // - then print to buffer "DLL was built for a different architecture" | |
1325 // else call getLastErrorString to obtain system error message | |
1326 | |
1327 // Read system error message into ebuf | |
1328 // It may or may not be overwritten below (in the for loop and just above) | |
1329 getLastErrorString(ebuf, (size_t) ebuflen); | |
1330 ebuf[ebuflen-1]='\0'; | |
1331 int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0); | |
1332 if (file_descriptor<0) | |
1333 { | |
1334 return NULL; | |
1335 } | |
1336 | |
1337 uint32_t signature_offset; | |
1338 uint16_t lib_arch=0; | |
1339 bool failed_to_get_lib_arch= | |
1340 ( | |
1341 //Go to position 3c in the dll | |
1342 (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0) | |
1343 || | |
1344 // Read loacation of signature | |
1345 (sizeof(signature_offset)!= | |
1346 (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset)))) | |
1347 || | |
1348 //Go to COFF File Header in dll | |
1349 //that is located after"signature" (4 bytes long) | |
1350 (os::seek_to_file_offset(file_descriptor, | |
1351 signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0) | |
1352 || | |
1353 //Read field that contains code of architecture | |
1354 // that dll was build for | |
1355 (sizeof(lib_arch)!= | |
1356 (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch)))) | |
1357 ); | |
1358 | |
1359 ::close(file_descriptor); | |
1360 if (failed_to_get_lib_arch) | |
1361 { | |
1362 // file i/o error - report getLastErrorString(...) msg | |
1363 return NULL; | |
1364 } | |
1365 | |
1366 typedef struct | |
1367 { | |
1368 uint16_t arch_code; | |
1369 char* arch_name; | |
1370 } arch_t; | |
1371 | |
1372 static const arch_t arch_array[]={ | |
1373 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, | |
1374 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, | |
1375 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} | |
1376 }; | |
1377 #if (defined _M_IA64) | |
1378 static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64; | |
1379 #elif (defined _M_AMD64) | |
1380 static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64; | |
1381 #elif (defined _M_IX86) | |
1382 static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386; | |
1383 #else | |
1384 #error Method os::dll_load requires that one of following \ | |
1385 is defined :_M_IA64,_M_AMD64 or _M_IX86 | |
1386 #endif | |
1387 | |
1388 | |
1389 // Obtain a string for printf operation | |
1390 // lib_arch_str shall contain string what platform this .dll was built for | |
1391 // running_arch_str shall string contain what platform Hotspot was built for | |
1392 char *running_arch_str=NULL,*lib_arch_str=NULL; | |
1393 for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++) | |
1394 { | |
1395 if (lib_arch==arch_array[i].arch_code) | |
1396 lib_arch_str=arch_array[i].arch_name; | |
1397 if (running_arch==arch_array[i].arch_code) | |
1398 running_arch_str=arch_array[i].arch_name; | |
1399 } | |
1400 | |
1401 assert(running_arch_str, | |
1402 "Didn't find runing architecture code in arch_array"); | |
1403 | |
1404 // If the architure is right | |
1405 // but some other error took place - report getLastErrorString(...) msg | |
1406 if (lib_arch == running_arch) | |
1407 { | |
1408 return NULL; | |
1409 } | |
1410 | |
1411 if (lib_arch_str!=NULL) | |
1412 { | |
1413 ::_snprintf(ebuf, ebuflen-1, | |
1414 "Can't load %s-bit .dll on a %s-bit platform", | |
1415 lib_arch_str,running_arch_str); | |
1416 } | |
1417 else | |
1418 { | |
1419 // don't know what architecture this dll was build for | |
1420 ::_snprintf(ebuf, ebuflen-1, | |
1421 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", | |
1422 lib_arch,running_arch_str); | |
1423 } | |
1424 | |
1425 return NULL; | |
1426 } | |
1427 | |
1428 | |
1429 void os::print_dll_info(outputStream *st) { | |
1430 int pid = os::current_process_id(); | |
1431 st->print_cr("Dynamic libraries:"); | |
1432 enumerate_modules(pid, _print_module, (void *)st); | |
1433 } | |
1434 | |
1435 void os::print_os_info(outputStream* st) { | |
1436 st->print("OS:"); | |
1437 | |
1438 OSVERSIONINFOEX osvi; | |
1439 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); | |
1440 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); | |
1441 | |
1442 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { | |
1443 st->print_cr("N/A"); | |
1444 return; | |
1445 } | |
1446 | |
1447 int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion; | |
1448 | |
1449 if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) { | |
1450 switch (os_vers) { | |
1451 case 3051: st->print(" Windows NT 3.51"); break; | |
1452 case 4000: st->print(" Windows NT 4.0"); break; | |
1453 case 5000: st->print(" Windows 2000"); break; | |
1454 case 5001: st->print(" Windows XP"); break; | |
1455 case 5002: st->print(" Windows Server 2003 family"); break; | |
1456 case 6000: st->print(" Windows Vista"); break; | |
1457 default: // future windows, print out its major and minor versions | |
1458 st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); | |
1459 } | |
1460 } else { | |
1461 switch (os_vers) { | |
1462 case 4000: st->print(" Windows 95"); break; | |
1463 case 4010: st->print(" Windows 98"); break; | |
1464 case 4090: st->print(" Windows Me"); break; | |
1465 default: // future windows, print out its major and minor versions | |
1466 st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); | |
1467 } | |
1468 } | |
1469 | |
1470 st->print(" Build %d", osvi.dwBuildNumber); | |
1471 st->print(" %s", osvi.szCSDVersion); // service pack | |
1472 st->cr(); | |
1473 } | |
1474 | |
1475 void os::print_memory_info(outputStream* st) { | |
1476 st->print("Memory:"); | |
1477 st->print(" %dk page", os::vm_page_size()>>10); | |
1478 | |
1479 // FIXME: GlobalMemoryStatus() may return incorrect value if total memory | |
1480 // is larger than 4GB | |
1481 MEMORYSTATUS ms; | |
1482 GlobalMemoryStatus(&ms); | |
1483 | |
1484 st->print(", physical %uk", os::physical_memory() >> 10); | |
1485 st->print("(%uk free)", os::available_memory() >> 10); | |
1486 | |
1487 st->print(", swap %uk", ms.dwTotalPageFile >> 10); | |
1488 st->print("(%uk free)", ms.dwAvailPageFile >> 10); | |
1489 st->cr(); | |
1490 } | |
1491 | |
1492 void os::print_siginfo(outputStream *st, void *siginfo) { | |
1493 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo; | |
1494 st->print("siginfo:"); | |
1495 st->print(" ExceptionCode=0x%x", er->ExceptionCode); | |
1496 | |
1497 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && | |
1498 er->NumberParameters >= 2) { | |
1499 switch (er->ExceptionInformation[0]) { | |
1500 case 0: st->print(", reading address"); break; | |
1501 case 1: st->print(", writing address"); break; | |
1502 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, | |
1503 er->ExceptionInformation[0]); | |
1504 } | |
1505 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); | |
1506 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && | |
1507 er->NumberParameters >= 2 && UseSharedSpaces) { | |
1508 FileMapInfo* mapinfo = FileMapInfo::current_info(); | |
1509 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) { | |
1510 st->print("\n\nError accessing class data sharing archive." \ | |
1511 " Mapped file inaccessible during execution, " \ | |
1512 " possible disk/network problem."); | |
1513 } | |
1514 } else { | |
1515 int num = er->NumberParameters; | |
1516 if (num > 0) { | |
1517 st->print(", ExceptionInformation="); | |
1518 for (int i = 0; i < num; i++) { | |
1519 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); | |
1520 } | |
1521 } | |
1522 } | |
1523 st->cr(); | |
1524 } | |
1525 | |
1526 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { | |
1527 // do nothing | |
1528 } | |
1529 | |
1530 static char saved_jvm_path[MAX_PATH] = {0}; | |
1531 | |
1532 // Find the full path to the current module, jvm.dll or jvm_g.dll | |
1533 void os::jvm_path(char *buf, jint buflen) { | |
1534 // Error checking. | |
1535 if (buflen < MAX_PATH) { | |
1536 assert(false, "must use a large-enough buffer"); | |
1537 buf[0] = '\0'; | |
1538 return; | |
1539 } | |
1540 // Lazy resolve the path to current module. | |
1541 if (saved_jvm_path[0] != 0) { | |
1542 strcpy(buf, saved_jvm_path); | |
1543 return; | |
1544 } | |
1545 | |
1546 GetModuleFileName(vm_lib_handle, buf, buflen); | |
1547 strcpy(saved_jvm_path, buf); | |
1548 } | |
1549 | |
1550 | |
1551 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { | |
1552 #ifndef _WIN64 | |
1553 st->print("_"); | |
1554 #endif | |
1555 } | |
1556 | |
1557 | |
1558 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { | |
1559 #ifndef _WIN64 | |
1560 st->print("@%d", args_size * sizeof(int)); | |
1561 #endif | |
1562 } | |
1563 | |
1564 // sun.misc.Signal | |
1565 // NOTE that this is a workaround for an apparent kernel bug where if | |
1566 // a signal handler for SIGBREAK is installed then that signal handler | |
1567 // takes priority over the console control handler for CTRL_CLOSE_EVENT. | |
1568 // See bug 4416763. | |
1569 static void (*sigbreakHandler)(int) = NULL; | |
1570 | |
1571 static void UserHandler(int sig, void *siginfo, void *context) { | |
1572 os::signal_notify(sig); | |
1573 // We need to reinstate the signal handler each time... | |
1574 os::signal(sig, (void*)UserHandler); | |
1575 } | |
1576 | |
1577 void* os::user_handler() { | |
1578 return (void*) UserHandler; | |
1579 } | |
1580 | |
1581 void* os::signal(int signal_number, void* handler) { | |
1582 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { | |
1583 void (*oldHandler)(int) = sigbreakHandler; | |
1584 sigbreakHandler = (void (*)(int)) handler; | |
1585 return (void*) oldHandler; | |
1586 } else { | |
1587 return (void*)::signal(signal_number, (void (*)(int))handler); | |
1588 } | |
1589 } | |
1590 | |
1591 void os::signal_raise(int signal_number) { | |
1592 raise(signal_number); | |
1593 } | |
1594 | |
1595 // The Win32 C runtime library maps all console control events other than ^C | |
1596 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, | |
1597 // logoff, and shutdown events. We therefore install our own console handler | |
1598 // that raises SIGTERM for the latter cases. | |
1599 // | |
1600 static BOOL WINAPI consoleHandler(DWORD event) { | |
1601 switch(event) { | |
1602 case CTRL_C_EVENT: | |
1603 if (is_error_reported()) { | |
1604 // Ctrl-C is pressed during error reporting, likely because the error | |
1605 // handler fails to abort. Let VM die immediately. | |
1606 os::die(); | |
1607 } | |
1608 | |
1609 os::signal_raise(SIGINT); | |
1610 return TRUE; | |
1611 break; | |
1612 case CTRL_BREAK_EVENT: | |
1613 if (sigbreakHandler != NULL) { | |
1614 (*sigbreakHandler)(SIGBREAK); | |
1615 } | |
1616 return TRUE; | |
1617 break; | |
1618 case CTRL_CLOSE_EVENT: | |
1619 case CTRL_LOGOFF_EVENT: | |
1620 case CTRL_SHUTDOWN_EVENT: | |
1621 os::signal_raise(SIGTERM); | |
1622 return TRUE; | |
1623 break; | |
1624 default: | |
1625 break; | |
1626 } | |
1627 return FALSE; | |
1628 } | |
1629 | |
1630 /* | |
1631 * The following code is moved from os.cpp for making this | |
1632 * code platform specific, which it is by its very nature. | |
1633 */ | |
1634 | |
1635 // Return maximum OS signal used + 1 for internal use only | |
1636 // Used as exit signal for signal_thread | |
1637 int os::sigexitnum_pd(){ | |
1638 return NSIG; | |
1639 } | |
1640 | |
1641 // a counter for each possible signal value, including signal_thread exit signal | |
1642 static volatile jint pending_signals[NSIG+1] = { 0 }; | |
1643 static HANDLE sig_sem; | |
1644 | |
1645 void os::signal_init_pd() { | |
1646 // Initialize signal structures | |
1647 memset((void*)pending_signals, 0, sizeof(pending_signals)); | |
1648 | |
1649 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); | |
1650 | |
1651 // Programs embedding the VM do not want it to attempt to receive | |
1652 // events like CTRL_LOGOFF_EVENT, which are used to implement the | |
1653 // shutdown hooks mechanism introduced in 1.3. For example, when | |
1654 // the VM is run as part of a Windows NT service (i.e., a servlet | |
1655 // engine in a web server), the correct behavior is for any console | |
1656 // control handler to return FALSE, not TRUE, because the OS's | |
1657 // "final" handler for such events allows the process to continue if | |
1658 // it is a service (while terminating it if it is not a service). | |
1659 // To make this behavior uniform and the mechanism simpler, we | |
1660 // completely disable the VM's usage of these console events if -Xrs | |
1661 // (=ReduceSignalUsage) is specified. This means, for example, that | |
1662 // the CTRL-BREAK thread dump mechanism is also disabled in this | |
1663 // case. See bugs 4323062, 4345157, and related bugs. | |
1664 | |
1665 if (!ReduceSignalUsage) { | |
1666 // Add a CTRL-C handler | |
1667 SetConsoleCtrlHandler(consoleHandler, TRUE); | |
1668 } | |
1669 } | |
1670 | |
1671 void os::signal_notify(int signal_number) { | |
1672 BOOL ret; | |
1673 | |
1674 Atomic::inc(&pending_signals[signal_number]); | |
1675 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); | |
1676 assert(ret != 0, "ReleaseSemaphore() failed"); | |
1677 } | |
1678 | |
1679 static int check_pending_signals(bool wait_for_signal) { | |
1680 DWORD ret; | |
1681 while (true) { | |
1682 for (int i = 0; i < NSIG + 1; i++) { | |
1683 jint n = pending_signals[i]; | |
1684 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { | |
1685 return i; | |
1686 } | |
1687 } | |
1688 if (!wait_for_signal) { | |
1689 return -1; | |
1690 } | |
1691 | |
1692 JavaThread *thread = JavaThread::current(); | |
1693 | |
1694 ThreadBlockInVM tbivm(thread); | |
1695 | |
1696 bool threadIsSuspended; | |
1697 do { | |
1698 thread->set_suspend_equivalent(); | |
1699 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() | |
1700 ret = ::WaitForSingleObject(sig_sem, INFINITE); | |
1701 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); | |
1702 | |
1703 // were we externally suspended while we were waiting? | |
1704 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); | |
1705 if (threadIsSuspended) { | |
1706 // | |
1707 // The semaphore has been incremented, but while we were waiting | |
1708 // another thread suspended us. We don't want to continue running | |
1709 // while suspended because that would surprise the thread that | |
1710 // suspended us. | |
1711 // | |
1712 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); | |
1713 assert(ret != 0, "ReleaseSemaphore() failed"); | |
1714 | |
1715 thread->java_suspend_self(); | |
1716 } | |
1717 } while (threadIsSuspended); | |
1718 } | |
1719 } | |
1720 | |
1721 int os::signal_lookup() { | |
1722 return check_pending_signals(false); | |
1723 } | |
1724 | |
1725 int os::signal_wait() { | |
1726 return check_pending_signals(true); | |
1727 } | |
1728 | |
1729 // Implicit OS exception handling | |
1730 | |
1731 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) { | |
1732 JavaThread* thread = JavaThread::current(); | |
1733 // Save pc in thread | |
1734 #ifdef _M_IA64 | |
1735 thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->StIIP); | |
1736 // Set pc to handler | |
1737 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; | |
1738 #elif _M_AMD64 | |
1739 thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Rip); | |
1740 // Set pc to handler | |
1741 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; | |
1742 #else | |
1743 thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Eip); | |
1744 // Set pc to handler | |
1745 exceptionInfo->ContextRecord->Eip = (LONG)handler; | |
1746 #endif | |
1747 | |
1748 // Continue the execution | |
1749 return EXCEPTION_CONTINUE_EXECUTION; | |
1750 } | |
1751 | |
1752 | |
1753 // Used for PostMortemDump | |
1754 extern "C" void safepoints(); | |
1755 extern "C" void find(int x); | |
1756 extern "C" void events(); | |
1757 | |
1758 // According to Windows API documentation, an illegal instruction sequence should generate | |
1759 // the 0xC000001C exception code. However, real world experience shows that occasionnaly | |
1760 // the execution of an illegal instruction can generate the exception code 0xC000001E. This | |
1761 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). | |
1762 | |
1763 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E | |
1764 | |
1765 // From "Execution Protection in the Windows Operating System" draft 0.35 | |
1766 // Once a system header becomes available, the "real" define should be | |
1767 // included or copied here. | |
1768 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 | |
1769 | |
1770 #define def_excpt(val) #val, val | |
1771 | |
1772 struct siglabel { | |
1773 char *name; | |
1774 int number; | |
1775 }; | |
1776 | |
1777 struct siglabel exceptlabels[] = { | |
1778 def_excpt(EXCEPTION_ACCESS_VIOLATION), | |
1779 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), | |
1780 def_excpt(EXCEPTION_BREAKPOINT), | |
1781 def_excpt(EXCEPTION_SINGLE_STEP), | |
1782 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), | |
1783 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), | |
1784 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), | |
1785 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), | |
1786 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), | |
1787 def_excpt(EXCEPTION_FLT_OVERFLOW), | |
1788 def_excpt(EXCEPTION_FLT_STACK_CHECK), | |
1789 def_excpt(EXCEPTION_FLT_UNDERFLOW), | |
1790 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), | |
1791 def_excpt(EXCEPTION_INT_OVERFLOW), | |
1792 def_excpt(EXCEPTION_PRIV_INSTRUCTION), | |
1793 def_excpt(EXCEPTION_IN_PAGE_ERROR), | |
1794 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), | |
1795 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), | |
1796 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), | |
1797 def_excpt(EXCEPTION_STACK_OVERFLOW), | |
1798 def_excpt(EXCEPTION_INVALID_DISPOSITION), | |
1799 def_excpt(EXCEPTION_GUARD_PAGE), | |
1800 def_excpt(EXCEPTION_INVALID_HANDLE), | |
1801 NULL, 0 | |
1802 }; | |
1803 | |
1804 const char* os::exception_name(int exception_code, char *buf, size_t size) { | |
1805 for (int i = 0; exceptlabels[i].name != NULL; i++) { | |
1806 if (exceptlabels[i].number == exception_code) { | |
1807 jio_snprintf(buf, size, "%s", exceptlabels[i].name); | |
1808 return buf; | |
1809 } | |
1810 } | |
1811 | |
1812 return NULL; | |
1813 } | |
1814 | |
1815 //----------------------------------------------------------------------------- | |
1816 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { | |
1817 // handle exception caused by idiv; should only happen for -MinInt/-1 | |
1818 // (division by zero is handled explicitly) | |
1819 #ifdef _M_IA64 | |
1820 assert(0, "Fix Handle_IDiv_Exception"); | |
1821 #elif _M_AMD64 | |
1822 PCONTEXT ctx = exceptionInfo->ContextRecord; | |
1823 address pc = (address)ctx->Rip; | |
1824 NOT_PRODUCT(Events::log("idiv overflow exception at " INTPTR_FORMAT , pc)); | |
1825 assert(pc[0] == 0xF7, "not an idiv opcode"); | |
1826 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); | |
1827 assert(ctx->Rax == min_jint, "unexpected idiv exception"); | |
1828 // set correct result values and continue after idiv instruction | |
1829 ctx->Rip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes | |
1830 ctx->Rax = (DWORD)min_jint; // result | |
1831 ctx->Rdx = (DWORD)0; // remainder | |
1832 // Continue the execution | |
1833 #else | |
1834 PCONTEXT ctx = exceptionInfo->ContextRecord; | |
1835 address pc = (address)ctx->Eip; | |
1836 NOT_PRODUCT(Events::log("idiv overflow exception at " INTPTR_FORMAT , pc)); | |
1837 assert(pc[0] == 0xF7, "not an idiv opcode"); | |
1838 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); | |
1839 assert(ctx->Eax == min_jint, "unexpected idiv exception"); | |
1840 // set correct result values and continue after idiv instruction | |
1841 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes | |
1842 ctx->Eax = (DWORD)min_jint; // result | |
1843 ctx->Edx = (DWORD)0; // remainder | |
1844 // Continue the execution | |
1845 #endif | |
1846 return EXCEPTION_CONTINUE_EXECUTION; | |
1847 } | |
1848 | |
1849 #ifndef _WIN64 | |
1850 //----------------------------------------------------------------------------- | |
1851 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { | |
1852 // handle exception caused by native mothod modifying control word | |
1853 PCONTEXT ctx = exceptionInfo->ContextRecord; | |
1854 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; | |
1855 | |
1856 switch (exception_code) { | |
1857 case EXCEPTION_FLT_DENORMAL_OPERAND: | |
1858 case EXCEPTION_FLT_DIVIDE_BY_ZERO: | |
1859 case EXCEPTION_FLT_INEXACT_RESULT: | |
1860 case EXCEPTION_FLT_INVALID_OPERATION: | |
1861 case EXCEPTION_FLT_OVERFLOW: | |
1862 case EXCEPTION_FLT_STACK_CHECK: | |
1863 case EXCEPTION_FLT_UNDERFLOW: | |
1864 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); | |
1865 if (fp_control_word != ctx->FloatSave.ControlWord) { | |
1866 // Restore FPCW and mask out FLT exceptions | |
1867 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; | |
1868 // Mask out pending FLT exceptions | |
1869 ctx->FloatSave.StatusWord &= 0xffffff00; | |
1870 return EXCEPTION_CONTINUE_EXECUTION; | |
1871 } | |
1872 } | |
1873 return EXCEPTION_CONTINUE_SEARCH; | |
1874 } | |
1875 #else //_WIN64 | |
1876 /* | |
1877 On Windows, the mxcsr control bits are non-volatile across calls | |
1878 See also CR 6192333 | |
1879 If EXCEPTION_FLT_* happened after some native method modified | |
1880 mxcsr - it is not a jvm fault. | |
1881 However should we decide to restore of mxcsr after a faulty | |
1882 native method we can uncomment following code | |
1883 jint MxCsr = INITIAL_MXCSR; | |
1884 // we can't use StubRoutines::addr_mxcsr_std() | |
1885 // because in Win64 mxcsr is not saved there | |
1886 if (MxCsr != ctx->MxCsr) { | |
1887 ctx->MxCsr = MxCsr; | |
1888 return EXCEPTION_CONTINUE_EXECUTION; | |
1889 } | |
1890 | |
1891 */ | |
1892 #endif //_WIN64 | |
1893 | |
1894 | |
1895 // Fatal error reporting is single threaded so we can make this a | |
1896 // static and preallocated. If it's more than MAX_PATH silently ignore | |
1897 // it. | |
1898 static char saved_error_file[MAX_PATH] = {0}; | |
1899 | |
1900 void os::set_error_file(const char *logfile) { | |
1901 if (strlen(logfile) <= MAX_PATH) { | |
1902 strncpy(saved_error_file, logfile, MAX_PATH); | |
1903 } | |
1904 } | |
1905 | |
1906 static inline void report_error(Thread* t, DWORD exception_code, | |
1907 address addr, void* siginfo, void* context) { | |
1908 VMError err(t, exception_code, addr, siginfo, context); | |
1909 err.report_and_die(); | |
1910 | |
1911 // If UseOsErrorReporting, this will return here and save the error file | |
1912 // somewhere where we can find it in the minidump. | |
1913 } | |
1914 | |
1915 //----------------------------------------------------------------------------- | |
1916 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { | |
1917 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; | |
1918 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; | |
1919 #ifdef _M_IA64 | |
1920 address pc = (address) exceptionInfo->ContextRecord->StIIP; | |
1921 #elif _M_AMD64 | |
1922 address pc = (address) exceptionInfo->ContextRecord->Rip; | |
1923 #else | |
1924 address pc = (address) exceptionInfo->ContextRecord->Eip; | |
1925 #endif | |
1926 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady | |
1927 | |
1928 #ifndef _WIN64 | |
1929 // Execution protection violation - win32 running on AMD64 only | |
1930 // Handled first to avoid misdiagnosis as a "normal" access violation; | |
1931 // This is safe to do because we have a new/unique ExceptionInformation | |
1932 // code for this condition. | |
1933 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { | |
1934 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; | |
1935 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; | |
1936 address addr = (address) exceptionRecord->ExceptionInformation[1]; | |
1937 | |
1938 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { | |
1939 int page_size = os::vm_page_size(); | |
1940 | |
1941 // Make sure the pc and the faulting address are sane. | |
1942 // | |
1943 // If an instruction spans a page boundary, and the page containing | |
1944 // the beginning of the instruction is executable but the following | |
1945 // page is not, the pc and the faulting address might be slightly | |
1946 // different - we still want to unguard the 2nd page in this case. | |
1947 // | |
1948 // 15 bytes seems to be a (very) safe value for max instruction size. | |
1949 bool pc_is_near_addr = | |
1950 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); | |
1951 bool instr_spans_page_boundary = | |
1952 (align_size_down((intptr_t) pc ^ (intptr_t) addr, | |
1953 (intptr_t) page_size) > 0); | |
1954 | |
1955 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { | |
1956 static volatile address last_addr = | |
1957 (address) os::non_memory_address_word(); | |
1958 | |
1959 // In conservative mode, don't unguard unless the address is in the VM | |
1960 if (UnguardOnExecutionViolation > 0 && addr != last_addr && | |
1961 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { | |
1962 | |
1963 // Unguard and retry | |
1964 address page_start = | |
1965 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); | |
1966 bool res = os::unguard_memory((char*) page_start, page_size); | |
1967 | |
1968 if (PrintMiscellaneous && Verbose) { | |
1969 char buf[256]; | |
1970 jio_snprintf(buf, sizeof(buf), "Execution protection violation " | |
1971 "at " INTPTR_FORMAT | |
1972 ", unguarding " INTPTR_FORMAT ": %s", addr, | |
1973 page_start, (res ? "success" : strerror(errno))); | |
1974 tty->print_raw_cr(buf); | |
1975 } | |
1976 | |
1977 // Set last_addr so if we fault again at the same address, we don't | |
1978 // end up in an endless loop. | |
1979 // | |
1980 // There are two potential complications here. Two threads trapping | |
1981 // at the same address at the same time could cause one of the | |
1982 // threads to think it already unguarded, and abort the VM. Likely | |
1983 // very rare. | |
1984 // | |
1985 // The other race involves two threads alternately trapping at | |
1986 // different addresses and failing to unguard the page, resulting in | |
1987 // an endless loop. This condition is probably even more unlikely | |
1988 // than the first. | |
1989 // | |
1990 // Although both cases could be avoided by using locks or thread | |
1991 // local last_addr, these solutions are unnecessary complication: | |
1992 // this handler is a best-effort safety net, not a complete solution. | |
1993 // It is disabled by default and should only be used as a workaround | |
1994 // in case we missed any no-execute-unsafe VM code. | |
1995 | |
1996 last_addr = addr; | |
1997 | |
1998 return EXCEPTION_CONTINUE_EXECUTION; | |
1999 } | |
2000 } | |
2001 | |
2002 // Last unguard failed or not unguarding | |
2003 tty->print_raw_cr("Execution protection violation"); | |
2004 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, | |
2005 exceptionInfo->ContextRecord); | |
2006 return EXCEPTION_CONTINUE_SEARCH; | |
2007 } | |
2008 } | |
2009 #endif // _WIN64 | |
2010 | |
2011 // Check to see if we caught the safepoint code in the | |
2012 // process of write protecting the memory serialization page. | |
2013 // It write enables the page immediately after protecting it | |
2014 // so just return. | |
2015 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { | |
2016 JavaThread* thread = (JavaThread*) t; | |
2017 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; | |
2018 address addr = (address) exceptionRecord->ExceptionInformation[1]; | |
2019 if ( os::is_memory_serialize_page(thread, addr) ) { | |
2020 // Block current thread until the memory serialize page permission restored. | |
2021 os::block_on_serialize_page_trap(); | |
2022 return EXCEPTION_CONTINUE_EXECUTION; | |
2023 } | |
2024 } | |
2025 | |
2026 | |
2027 if (t != NULL && t->is_Java_thread()) { | |
2028 JavaThread* thread = (JavaThread*) t; | |
2029 bool in_java = thread->thread_state() == _thread_in_Java; | |
2030 | |
2031 // Handle potential stack overflows up front. | |
2032 if (exception_code == EXCEPTION_STACK_OVERFLOW) { | |
2033 if (os::uses_stack_guard_pages()) { | |
2034 #ifdef _M_IA64 | |
2035 // | |
2036 // If it's a legal stack address continue, Windows will map it in. | |
2037 // | |
2038 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; | |
2039 address addr = (address) exceptionRecord->ExceptionInformation[1]; | |
2040 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) | |
2041 return EXCEPTION_CONTINUE_EXECUTION; | |
2042 | |
2043 // The register save area is the same size as the memory stack | |
2044 // and starts at the page just above the start of the memory stack. | |
2045 // If we get a fault in this area, we've run out of register | |
2046 // stack. If we are in java, try throwing a stack overflow exception. | |
2047 if (addr > thread->stack_base() && | |
2048 addr <= (thread->stack_base()+thread->stack_size()) ) { | |
2049 char buf[256]; | |
2050 jio_snprintf(buf, sizeof(buf), | |
2051 "Register stack overflow, addr:%p, stack_base:%p\n", | |
2052 addr, thread->stack_base() ); | |
2053 tty->print_raw_cr(buf); | |
2054 // If not in java code, return and hope for the best. | |
2055 return in_java ? Handle_Exception(exceptionInfo, | |
2056 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) | |
2057 : EXCEPTION_CONTINUE_EXECUTION; | |
2058 } | |
2059 #endif | |
2060 if (thread->stack_yellow_zone_enabled()) { | |
2061 // Yellow zone violation. The o/s has unprotected the first yellow | |
2062 // zone page for us. Note: must call disable_stack_yellow_zone to | |
2063 // update the enabled status, even if the zone contains only one page. | |
2064 thread->disable_stack_yellow_zone(); | |
2065 // If not in java code, return and hope for the best. | |
2066 return in_java ? Handle_Exception(exceptionInfo, | |
2067 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) | |
2068 : EXCEPTION_CONTINUE_EXECUTION; | |
2069 } else { | |
2070 // Fatal red zone violation. | |
2071 thread->disable_stack_red_zone(); | |
2072 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); | |
2073 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, | |
2074 exceptionInfo->ContextRecord); | |
2075 return EXCEPTION_CONTINUE_SEARCH; | |
2076 } | |
2077 } else if (in_java) { | |
2078 // JVM-managed guard pages cannot be used on win95/98. The o/s provides | |
2079 // a one-time-only guard page, which it has released to us. The next | |
2080 // stack overflow on this thread will result in an ACCESS_VIOLATION. | |
2081 return Handle_Exception(exceptionInfo, | |
2082 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); | |
2083 } else { | |
2084 // Can only return and hope for the best. Further stack growth will | |
2085 // result in an ACCESS_VIOLATION. | |
2086 return EXCEPTION_CONTINUE_EXECUTION; | |
2087 } | |
2088 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { | |
2089 // Either stack overflow or null pointer exception. | |
2090 if (in_java) { | |
2091 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; | |
2092 address addr = (address) exceptionRecord->ExceptionInformation[1]; | |
2093 address stack_end = thread->stack_base() - thread->stack_size(); | |
2094 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { | |
2095 // Stack overflow. | |
2096 assert(!os::uses_stack_guard_pages(), | |
2097 "should be caught by red zone code above."); | |
2098 return Handle_Exception(exceptionInfo, | |
2099 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); | |
2100 } | |
2101 // | |
2102 // Check for safepoint polling and implicit null | |
2103 // We only expect null pointers in the stubs (vtable) | |
2104 // the rest are checked explicitly now. | |
2105 // | |
2106 CodeBlob* cb = CodeCache::find_blob(pc); | |
2107 if (cb != NULL) { | |
2108 if (os::is_poll_address(addr)) { | |
2109 address stub = SharedRuntime::get_poll_stub(pc); | |
2110 return Handle_Exception(exceptionInfo, stub); | |
2111 } | |
2112 } | |
2113 { | |
2114 #ifdef _WIN64 | |
2115 // | |
2116 // If it's a legal stack address map the entire region in | |
2117 // | |
2118 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; | |
2119 address addr = (address) exceptionRecord->ExceptionInformation[1]; | |
2120 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) { | |
2121 addr = (address)((uintptr_t)addr & | |
2122 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); | |
2123 os::commit_memory( (char *)addr, thread->stack_base() - addr ); | |
2124 return EXCEPTION_CONTINUE_EXECUTION; | |
2125 } | |
2126 else | |
2127 #endif | |
2128 { | |
2129 // Null pointer exception. | |
2130 #ifdef _M_IA64 | |
2131 // We catch register stack overflows in compiled code by doing | |
2132 // an explicit compare and executing a st8(G0, G0) if the | |
2133 // BSP enters into our guard area. We test for the overflow | |
2134 // condition and fall into the normal null pointer exception | |
2135 // code if BSP hasn't overflowed. | |
2136 if ( in_java ) { | |
2137 if(thread->register_stack_overflow()) { | |
2138 assert((address)exceptionInfo->ContextRecord->IntS3 == | |
2139 thread->register_stack_limit(), | |
2140 "GR7 doesn't contain register_stack_limit"); | |
2141 // Disable the yellow zone which sets the state that | |
2142 // we've got a stack overflow problem. | |
2143 if (thread->stack_yellow_zone_enabled()) { | |
2144 thread->disable_stack_yellow_zone(); | |
2145 } | |
2146 // Give us some room to process the exception | |
2147 thread->disable_register_stack_guard(); | |
2148 // Update GR7 with the new limit so we can continue running | |
2149 // compiled code. | |
2150 exceptionInfo->ContextRecord->IntS3 = | |
2151 (ULONGLONG)thread->register_stack_limit(); | |
2152 return Handle_Exception(exceptionInfo, | |
2153 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); | |
2154 } else { | |
2155 // | |
2156 // Check for implicit null | |
2157 // We only expect null pointers in the stubs (vtable) | |
2158 // the rest are checked explicitly now. | |
2159 // | |
2160 CodeBlob* cb = CodeCache::find_blob(pc); | |
2161 if (cb != NULL) { | |
2162 if (VtableStubs::stub_containing(pc) != NULL) { | |
2163 if (((uintptr_t)addr) < os::vm_page_size() ) { | |
2164 // an access to the first page of VM--assume it is a null pointer | |
2165 return Handle_Exception(exceptionInfo, | |
2166 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL)); | |
2167 } | |
2168 } | |
2169 } | |
2170 } | |
2171 } // in_java | |
2172 | |
2173 // IA64 doesn't use implicit null checking yet. So we shouldn't | |
2174 // get here. | |
2175 tty->print_raw_cr("Access violation, possible null pointer exception"); | |
2176 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, | |
2177 exceptionInfo->ContextRecord); | |
2178 return EXCEPTION_CONTINUE_SEARCH; | |
2179 #else /* !IA64 */ | |
2180 | |
2181 // Windows 98 reports faulting addresses incorrectly | |
2182 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || | |
2183 !os::win32::is_nt()) { | |
2184 return Handle_Exception(exceptionInfo, | |
2185 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL)); | |
2186 } | |
2187 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, | |
2188 exceptionInfo->ContextRecord); | |
2189 return EXCEPTION_CONTINUE_SEARCH; | |
2190 #endif | |
2191 } | |
2192 } | |
2193 } | |
2194 | |
2195 #ifdef _WIN64 | |
2196 // Special care for fast JNI field accessors. | |
2197 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks | |
2198 // in and the heap gets shrunk before the field access. | |
2199 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { | |
2200 address addr = JNI_FastGetField::find_slowcase_pc(pc); | |
2201 if (addr != (address)-1) { | |
2202 return Handle_Exception(exceptionInfo, addr); | |
2203 } | |
2204 } | |
2205 #endif | |
2206 | |
2207 #ifdef _WIN64 | |
2208 // Windows will sometimes generate an access violation | |
2209 // when we call malloc. Since we use VectoredExceptions | |
2210 // on 64 bit platforms, we see this exception. We must | |
2211 // pass this exception on so Windows can recover. | |
2212 // We check to see if the pc of the fault is in NTDLL.DLL | |
2213 // if so, we pass control on to Windows for handling. | |
2214 if (UseVectoredExceptions && _addr_in_ntdll(pc)) return EXCEPTION_CONTINUE_SEARCH; | |
2215 #endif | |
2216 | |
2217 // Stack overflow or null pointer exception in native code. | |
2218 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, | |
2219 exceptionInfo->ContextRecord); | |
2220 return EXCEPTION_CONTINUE_SEARCH; | |
2221 } | |
2222 | |
2223 if (in_java) { | |
2224 switch (exception_code) { | |
2225 case EXCEPTION_INT_DIVIDE_BY_ZERO: | |
2226 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); | |
2227 | |
2228 case EXCEPTION_INT_OVERFLOW: | |
2229 return Handle_IDiv_Exception(exceptionInfo); | |
2230 | |
2231 } // switch | |
2232 } | |
2233 #ifndef _WIN64 | |
2234 if ((thread->thread_state() == _thread_in_Java) || | |
2235 (thread->thread_state() == _thread_in_native) ) | |
2236 { | |
2237 LONG result=Handle_FLT_Exception(exceptionInfo); | |
2238 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; | |
2239 } | |
2240 #endif //_WIN64 | |
2241 } | |
2242 | |
2243 if (exception_code != EXCEPTION_BREAKPOINT) { | |
2244 #ifndef _WIN64 | |
2245 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, | |
2246 exceptionInfo->ContextRecord); | |
2247 #else | |
2248 // Itanium Windows uses a VectoredExceptionHandler | |
2249 // Which means that C++ programatic exception handlers (try/except) | |
2250 // will get here. Continue the search for the right except block if | |
2251 // the exception code is not a fatal code. | |
2252 switch ( exception_code ) { | |
2253 case EXCEPTION_ACCESS_VIOLATION: | |
2254 case EXCEPTION_STACK_OVERFLOW: | |
2255 case EXCEPTION_ILLEGAL_INSTRUCTION: | |
2256 case EXCEPTION_ILLEGAL_INSTRUCTION_2: | |
2257 case EXCEPTION_INT_OVERFLOW: | |
2258 case EXCEPTION_INT_DIVIDE_BY_ZERO: | |
2259 { report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, | |
2260 exceptionInfo->ContextRecord); | |
2261 } | |
2262 break; | |
2263 default: | |
2264 break; | |
2265 } | |
2266 #endif | |
2267 } | |
2268 return EXCEPTION_CONTINUE_SEARCH; | |
2269 } | |
2270 | |
2271 #ifndef _WIN64 | |
2272 // Special care for fast JNI accessors. | |
2273 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and | |
2274 // the heap gets shrunk before the field access. | |
2275 // Need to install our own structured exception handler since native code may | |
2276 // install its own. | |
2277 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { | |
2278 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; | |
2279 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { | |
2280 address pc = (address) exceptionInfo->ContextRecord->Eip; | |
2281 address addr = JNI_FastGetField::find_slowcase_pc(pc); | |
2282 if (addr != (address)-1) { | |
2283 return Handle_Exception(exceptionInfo, addr); | |
2284 } | |
2285 } | |
2286 return EXCEPTION_CONTINUE_SEARCH; | |
2287 } | |
2288 | |
2289 #define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \ | |
2290 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \ | |
2291 __try { \ | |
2292 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \ | |
2293 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \ | |
2294 } \ | |
2295 return 0; \ | |
2296 } | |
2297 | |
2298 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) | |
2299 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) | |
2300 DEFINE_FAST_GETFIELD(jchar, char, Char) | |
2301 DEFINE_FAST_GETFIELD(jshort, short, Short) | |
2302 DEFINE_FAST_GETFIELD(jint, int, Int) | |
2303 DEFINE_FAST_GETFIELD(jlong, long, Long) | |
2304 DEFINE_FAST_GETFIELD(jfloat, float, Float) | |
2305 DEFINE_FAST_GETFIELD(jdouble, double, Double) | |
2306 | |
2307 address os::win32::fast_jni_accessor_wrapper(BasicType type) { | |
2308 switch (type) { | |
2309 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; | |
2310 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; | |
2311 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; | |
2312 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; | |
2313 case T_INT: return (address)jni_fast_GetIntField_wrapper; | |
2314 case T_LONG: return (address)jni_fast_GetLongField_wrapper; | |
2315 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; | |
2316 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; | |
2317 default: ShouldNotReachHere(); | |
2318 } | |
2319 return (address)-1; | |
2320 } | |
2321 #endif | |
2322 | |
2323 // Virtual Memory | |
2324 | |
2325 int os::vm_page_size() { return os::win32::vm_page_size(); } | |
2326 int os::vm_allocation_granularity() { | |
2327 return os::win32::vm_allocation_granularity(); | |
2328 } | |
2329 | |
2330 // Windows large page support is available on Windows 2003. In order to use | |
2331 // large page memory, the administrator must first assign additional privilege | |
2332 // to the user: | |
2333 // + select Control Panel -> Administrative Tools -> Local Security Policy | |
2334 // + select Local Policies -> User Rights Assignment | |
2335 // + double click "Lock pages in memory", add users and/or groups | |
2336 // + reboot | |
2337 // Note the above steps are needed for administrator as well, as administrators | |
2338 // by default do not have the privilege to lock pages in memory. | |
2339 // | |
2340 // Note about Windows 2003: although the API supports committing large page | |
2341 // memory on a page-by-page basis and VirtualAlloc() returns success under this | |
2342 // scenario, I found through experiment it only uses large page if the entire | |
2343 // memory region is reserved and committed in a single VirtualAlloc() call. | |
2344 // This makes Windows large page support more or less like Solaris ISM, in | |
2345 // that the entire heap must be committed upfront. This probably will change | |
2346 // in the future, if so the code below needs to be revisited. | |
2347 | |
2348 #ifndef MEM_LARGE_PAGES | |
2349 #define MEM_LARGE_PAGES 0x20000000 | |
2350 #endif | |
2351 | |
2352 // GetLargePageMinimum is only available on Windows 2003. The other functions | |
2353 // are available on NT but not on Windows 98/Me. We have to resolve them at | |
2354 // runtime. | |
2355 typedef SIZE_T (WINAPI *GetLargePageMinimum_func_type) (void); | |
2356 typedef BOOL (WINAPI *AdjustTokenPrivileges_func_type) | |
2357 (HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); | |
2358 typedef BOOL (WINAPI *OpenProcessToken_func_type) (HANDLE, DWORD, PHANDLE); | |
2359 typedef BOOL (WINAPI *LookupPrivilegeValue_func_type) (LPCTSTR, LPCTSTR, PLUID); | |
2360 | |
2361 static GetLargePageMinimum_func_type _GetLargePageMinimum; | |
2362 static AdjustTokenPrivileges_func_type _AdjustTokenPrivileges; | |
2363 static OpenProcessToken_func_type _OpenProcessToken; | |
2364 static LookupPrivilegeValue_func_type _LookupPrivilegeValue; | |
2365 | |
2366 static HINSTANCE _kernel32; | |
2367 static HINSTANCE _advapi32; | |
2368 static HANDLE _hProcess; | |
2369 static HANDLE _hToken; | |
2370 | |
2371 static size_t _large_page_size = 0; | |
2372 | |
2373 static bool resolve_functions_for_large_page_init() { | |
2374 _kernel32 = LoadLibrary("kernel32.dll"); | |
2375 if (_kernel32 == NULL) return false; | |
2376 | |
2377 _GetLargePageMinimum = CAST_TO_FN_PTR(GetLargePageMinimum_func_type, | |
2378 GetProcAddress(_kernel32, "GetLargePageMinimum")); | |
2379 if (_GetLargePageMinimum == NULL) return false; | |
2380 | |
2381 _advapi32 = LoadLibrary("advapi32.dll"); | |
2382 if (_advapi32 == NULL) return false; | |
2383 | |
2384 _AdjustTokenPrivileges = CAST_TO_FN_PTR(AdjustTokenPrivileges_func_type, | |
2385 GetProcAddress(_advapi32, "AdjustTokenPrivileges")); | |
2386 _OpenProcessToken = CAST_TO_FN_PTR(OpenProcessToken_func_type, | |
2387 GetProcAddress(_advapi32, "OpenProcessToken")); | |
2388 _LookupPrivilegeValue = CAST_TO_FN_PTR(LookupPrivilegeValue_func_type, | |
2389 GetProcAddress(_advapi32, "LookupPrivilegeValueA")); | |
2390 return _AdjustTokenPrivileges != NULL && | |
2391 _OpenProcessToken != NULL && | |
2392 _LookupPrivilegeValue != NULL; | |
2393 } | |
2394 | |
2395 static bool request_lock_memory_privilege() { | |
2396 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, | |
2397 os::current_process_id()); | |
2398 | |
2399 LUID luid; | |
2400 if (_hProcess != NULL && | |
2401 _OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && | |
2402 _LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { | |
2403 | |
2404 TOKEN_PRIVILEGES tp; | |
2405 tp.PrivilegeCount = 1; | |
2406 tp.Privileges[0].Luid = luid; | |
2407 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; | |
2408 | |
2409 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the | |
2410 // privilege. Check GetLastError() too. See MSDN document. | |
2411 if (_AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && | |
2412 (GetLastError() == ERROR_SUCCESS)) { | |
2413 return true; | |
2414 } | |
2415 } | |
2416 | |
2417 return false; | |
2418 } | |
2419 | |
2420 static void cleanup_after_large_page_init() { | |
2421 _GetLargePageMinimum = NULL; | |
2422 _AdjustTokenPrivileges = NULL; | |
2423 _OpenProcessToken = NULL; | |
2424 _LookupPrivilegeValue = NULL; | |
2425 if (_kernel32) FreeLibrary(_kernel32); | |
2426 _kernel32 = NULL; | |
2427 if (_advapi32) FreeLibrary(_advapi32); | |
2428 _advapi32 = NULL; | |
2429 if (_hProcess) CloseHandle(_hProcess); | |
2430 _hProcess = NULL; | |
2431 if (_hToken) CloseHandle(_hToken); | |
2432 _hToken = NULL; | |
2433 } | |
2434 | |
2435 bool os::large_page_init() { | |
2436 if (!UseLargePages) return false; | |
2437 | |
2438 // print a warning if any large page related flag is specified on command line | |
2439 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || | |
2440 !FLAG_IS_DEFAULT(LargePageSizeInBytes); | |
2441 bool success = false; | |
2442 | |
2443 # define WARN(msg) if (warn_on_failure) { warning(msg); } | |
2444 if (resolve_functions_for_large_page_init()) { | |
2445 if (request_lock_memory_privilege()) { | |
2446 size_t s = _GetLargePageMinimum(); | |
2447 if (s) { | |
2448 #if defined(IA32) || defined(AMD64) | |
2449 if (s > 4*M || LargePageSizeInBytes > 4*M) { | |
2450 WARN("JVM cannot use large pages bigger than 4mb."); | |
2451 } else { | |
2452 #endif | |
2453 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { | |
2454 _large_page_size = LargePageSizeInBytes; | |
2455 } else { | |
2456 _large_page_size = s; | |
2457 } | |
2458 success = true; | |
2459 #if defined(IA32) || defined(AMD64) | |
2460 } | |
2461 #endif | |
2462 } else { | |
2463 WARN("Large page is not supported by the processor."); | |
2464 } | |
2465 } else { | |
2466 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); | |
2467 } | |
2468 } else { | |
2469 WARN("Large page is not supported by the operating system."); | |
2470 } | |
2471 #undef WARN | |
2472 | |
2473 const size_t default_page_size = (size_t) vm_page_size(); | |
2474 if (success && _large_page_size > default_page_size) { | |
2475 _page_sizes[0] = _large_page_size; | |
2476 _page_sizes[1] = default_page_size; | |
2477 _page_sizes[2] = 0; | |
2478 } | |
2479 | |
2480 cleanup_after_large_page_init(); | |
2481 return success; | |
2482 } | |
2483 | |
2484 // On win32, one cannot release just a part of reserved memory, it's an | |
2485 // all or nothing deal. When we split a reservation, we must break the | |
2486 // reservation into two reservations. | |
2487 void os::split_reserved_memory(char *base, size_t size, size_t split, | |
2488 bool realloc) { | |
2489 if (size > 0) { | |
2490 release_memory(base, size); | |
2491 if (realloc) { | |
2492 reserve_memory(split, base); | |
2493 } | |
2494 if (size != split) { | |
2495 reserve_memory(size - split, base + split); | |
2496 } | |
2497 } | |
2498 } | |
2499 | |
2500 char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { | |
2501 assert((size_t)addr % os::vm_allocation_granularity() == 0, | |
2502 "reserve alignment"); | |
2503 assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size"); | |
2504 char* res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, | |
2505 PAGE_EXECUTE_READWRITE); | |
2506 assert(res == NULL || addr == NULL || addr == res, | |
2507 "Unexpected address from reserve."); | |
2508 return res; | |
2509 } | |
2510 | |
2511 // Reserve memory at an arbitrary address, only if that area is | |
2512 // available (and not reserved for something else). | |
2513 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) { | |
2514 // Windows os::reserve_memory() fails of the requested address range is | |
2515 // not avilable. | |
2516 return reserve_memory(bytes, requested_addr); | |
2517 } | |
2518 | |
2519 size_t os::large_page_size() { | |
2520 return _large_page_size; | |
2521 } | |
2522 | |
2523 bool os::can_commit_large_page_memory() { | |
2524 // Windows only uses large page memory when the entire region is reserved | |
2525 // and committed in a single VirtualAlloc() call. This may change in the | |
2526 // future, but with Windows 2003 it's not possible to commit on demand. | |
2527 return false; | |
2528 } | |
2529 | |
79
82db0859acbe
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
62
diff
changeset
|
2530 bool os::can_execute_large_page_memory() { |
82db0859acbe
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
62
diff
changeset
|
2531 return true; |
82db0859acbe
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
62
diff
changeset
|
2532 } |
82db0859acbe
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
62
diff
changeset
|
2533 |
0 | 2534 char* os::reserve_memory_special(size_t bytes) { |
2535 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; | |
79
82db0859acbe
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
62
diff
changeset
|
2536 char * res = (char *)VirtualAlloc(NULL, bytes, flag, PAGE_EXECUTE_READWRITE); |
0 | 2537 return res; |
2538 } | |
2539 | |
2540 bool os::release_memory_special(char* base, size_t bytes) { | |
2541 return release_memory(base, bytes); | |
2542 } | |
2543 | |
2544 void os::print_statistics() { | |
2545 } | |
2546 | |
2547 bool os::commit_memory(char* addr, size_t bytes) { | |
2548 if (bytes == 0) { | |
2549 // Don't bother the OS with noops. | |
2550 return true; | |
2551 } | |
2552 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); | |
2553 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); | |
2554 // Don't attempt to print anything if the OS call fails. We're | |
2555 // probably low on resources, so the print itself may cause crashes. | |
2556 return VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_EXECUTE_READWRITE) != NULL; | |
2557 } | |
2558 | |
2559 bool os::commit_memory(char* addr, size_t size, size_t alignment_hint) { | |
2560 return commit_memory(addr, size); | |
2561 } | |
2562 | |
2563 bool os::uncommit_memory(char* addr, size_t bytes) { | |
2564 if (bytes == 0) { | |
2565 // Don't bother the OS with noops. | |
2566 return true; | |
2567 } | |
2568 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); | |
2569 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); | |
2570 return VirtualFree(addr, bytes, MEM_DECOMMIT) != 0; | |
2571 } | |
2572 | |
2573 bool os::release_memory(char* addr, size_t bytes) { | |
2574 return VirtualFree(addr, 0, MEM_RELEASE) != 0; | |
2575 } | |
2576 | |
2577 bool os::protect_memory(char* addr, size_t bytes) { | |
2578 DWORD old_status; | |
2579 return VirtualProtect(addr, bytes, PAGE_READONLY, &old_status) != 0; | |
2580 } | |
2581 | |
2582 bool os::guard_memory(char* addr, size_t bytes) { | |
2583 DWORD old_status; | |
2584 return VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE | PAGE_GUARD, &old_status) != 0; | |
2585 } | |
2586 | |
2587 bool os::unguard_memory(char* addr, size_t bytes) { | |
2588 DWORD old_status; | |
2589 return VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &old_status) != 0; | |
2590 } | |
2591 | |
2592 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } | |
2593 void os::free_memory(char *addr, size_t bytes) { } | |
2594 void os::numa_make_global(char *addr, size_t bytes) { } | |
141 | 2595 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } |
0 | 2596 bool os::numa_topology_changed() { return false; } |
2597 size_t os::numa_get_groups_num() { return 1; } | |
2598 int os::numa_get_group_id() { return 0; } | |
2599 size_t os::numa_get_leaf_groups(int *ids, size_t size) { | |
2600 if (size > 0) { | |
2601 ids[0] = 0; | |
2602 return 1; | |
2603 } | |
2604 return 0; | |
2605 } | |
2606 | |
2607 bool os::get_page_info(char *start, page_info* info) { | |
2608 return false; | |
2609 } | |
2610 | |
2611 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { | |
2612 return end; | |
2613 } | |
2614 | |
2615 char* os::non_memory_address_word() { | |
2616 // Must never look like an address returned by reserve_memory, | |
2617 // even in its subfields (as defined by the CPU immediate fields, | |
2618 // if the CPU splits constants across multiple instructions). | |
2619 return (char*)-1; | |
2620 } | |
2621 | |
2622 #define MAX_ERROR_COUNT 100 | |
2623 #define SYS_THREAD_ERROR 0xffffffffUL | |
2624 | |
2625 void os::pd_start_thread(Thread* thread) { | |
2626 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); | |
2627 // Returns previous suspend state: | |
2628 // 0: Thread was not suspended | |
2629 // 1: Thread is running now | |
2630 // >1: Thread is still suspended. | |
2631 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back | |
2632 } | |
2633 | |
2634 size_t os::read(int fd, void *buf, unsigned int nBytes) { | |
2635 return ::read(fd, buf, nBytes); | |
2636 } | |
2637 | |
2638 class HighResolutionInterval { | |
2639 // The default timer resolution seems to be 10 milliseconds. | |
2640 // (Where is this written down?) | |
2641 // If someone wants to sleep for only a fraction of the default, | |
2642 // then we set the timer resolution down to 1 millisecond for | |
2643 // the duration of their interval. | |
2644 // We carefully set the resolution back, since otherwise we | |
2645 // seem to incur an overhead (3%?) that we don't need. | |
2646 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. | |
2647 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). | |
2648 // Alternatively, we could compute the relative error (503/500 = .6%) and only use | |
2649 // timeBeginPeriod() if the relative error exceeded some threshold. | |
2650 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and | |
2651 // to decreased efficiency related to increased timer "tick" rates. We want to minimize | |
2652 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high | |
2653 // resolution timers running. | |
2654 private: | |
2655 jlong resolution; | |
2656 public: | |
2657 HighResolutionInterval(jlong ms) { | |
2658 resolution = ms % 10L; | |
2659 if (resolution != 0) { | |
2660 MMRESULT result = timeBeginPeriod(1L); | |
2661 } | |
2662 } | |
2663 ~HighResolutionInterval() { | |
2664 if (resolution != 0) { | |
2665 MMRESULT result = timeEndPeriod(1L); | |
2666 } | |
2667 resolution = 0L; | |
2668 } | |
2669 }; | |
2670 | |
2671 int os::sleep(Thread* thread, jlong ms, bool interruptable) { | |
2672 jlong limit = (jlong) MAXDWORD; | |
2673 | |
2674 while(ms > limit) { | |
2675 int res; | |
2676 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) | |
2677 return res; | |
2678 ms -= limit; | |
2679 } | |
2680 | |
2681 assert(thread == Thread::current(), "thread consistency check"); | |
2682 OSThread* osthread = thread->osthread(); | |
2683 OSThreadWaitState osts(osthread, false /* not Object.wait() */); | |
2684 int result; | |
2685 if (interruptable) { | |
2686 assert(thread->is_Java_thread(), "must be java thread"); | |
2687 JavaThread *jt = (JavaThread *) thread; | |
2688 ThreadBlockInVM tbivm(jt); | |
2689 | |
2690 jt->set_suspend_equivalent(); | |
2691 // cleared by handle_special_suspend_equivalent_condition() or | |
2692 // java_suspend_self() via check_and_wait_while_suspended() | |
2693 | |
2694 HANDLE events[1]; | |
2695 events[0] = osthread->interrupt_event(); | |
2696 HighResolutionInterval *phri=NULL; | |
2697 if(!ForceTimeHighResolution) | |
2698 phri = new HighResolutionInterval( ms ); | |
2699 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { | |
2700 result = OS_TIMEOUT; | |
2701 } else { | |
2702 ResetEvent(osthread->interrupt_event()); | |
2703 osthread->set_interrupted(false); | |
2704 result = OS_INTRPT; | |
2705 } | |
2706 delete phri; //if it is NULL, harmless | |
2707 | |
2708 // were we externally suspended while we were waiting? | |
2709 jt->check_and_wait_while_suspended(); | |
2710 } else { | |
2711 assert(!thread->is_Java_thread(), "must not be java thread"); | |
2712 Sleep((long) ms); | |
2713 result = OS_TIMEOUT; | |
2714 } | |
2715 return result; | |
2716 } | |
2717 | |
2718 // Sleep forever; naked call to OS-specific sleep; use with CAUTION | |
2719 void os::infinite_sleep() { | |
2720 while (true) { // sleep forever ... | |
2721 Sleep(100000); // ... 100 seconds at a time | |
2722 } | |
2723 } | |
2724 | |
2725 typedef BOOL (WINAPI * STTSignature)(void) ; | |
2726 | |
2727 os::YieldResult os::NakedYield() { | |
2728 // Use either SwitchToThread() or Sleep(0) | |
2729 // Consider passing back the return value from SwitchToThread(). | |
2730 // We use GetProcAddress() as ancient Win9X versions of windows doen't support SwitchToThread. | |
2731 // In that case we revert to Sleep(0). | |
2732 static volatile STTSignature stt = (STTSignature) 1 ; | |
2733 | |
2734 if (stt == ((STTSignature) 1)) { | |
2735 stt = (STTSignature) ::GetProcAddress (LoadLibrary ("Kernel32.dll"), "SwitchToThread") ; | |
2736 // It's OK if threads race during initialization as the operation above is idempotent. | |
2737 } | |
2738 if (stt != NULL) { | |
2739 return (*stt)() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ; | |
2740 } else { | |
2741 Sleep (0) ; | |
2742 } | |
2743 return os::YIELD_UNKNOWN ; | |
2744 } | |
2745 | |
2746 void os::yield() { os::NakedYield(); } | |
2747 | |
2748 void os::yield_all(int attempts) { | |
2749 // Yields to all threads, including threads with lower priorities | |
2750 Sleep(1); | |
2751 } | |
2752 | |
2753 // Win32 only gives you access to seven real priorities at a time, | |
2754 // so we compress Java's ten down to seven. It would be better | |
2755 // if we dynamically adjusted relative priorities. | |
2756 | |
2757 int os::java_to_os_priority[MaxPriority + 1] = { | |
2758 THREAD_PRIORITY_IDLE, // 0 Entry should never be used | |
2759 THREAD_PRIORITY_LOWEST, // 1 MinPriority | |
2760 THREAD_PRIORITY_LOWEST, // 2 | |
2761 THREAD_PRIORITY_BELOW_NORMAL, // 3 | |
2762 THREAD_PRIORITY_BELOW_NORMAL, // 4 | |
2763 THREAD_PRIORITY_NORMAL, // 5 NormPriority | |
2764 THREAD_PRIORITY_NORMAL, // 6 | |
2765 THREAD_PRIORITY_ABOVE_NORMAL, // 7 | |
2766 THREAD_PRIORITY_ABOVE_NORMAL, // 8 | |
2767 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority | |
2768 THREAD_PRIORITY_HIGHEST // 10 MaxPriority | |
2769 }; | |
2770 | |
2771 int prio_policy1[MaxPriority + 1] = { | |
2772 THREAD_PRIORITY_IDLE, // 0 Entry should never be used | |
2773 THREAD_PRIORITY_LOWEST, // 1 MinPriority | |
2774 THREAD_PRIORITY_LOWEST, // 2 | |
2775 THREAD_PRIORITY_BELOW_NORMAL, // 3 | |
2776 THREAD_PRIORITY_BELOW_NORMAL, // 4 | |
2777 THREAD_PRIORITY_NORMAL, // 5 NormPriority | |
2778 THREAD_PRIORITY_ABOVE_NORMAL, // 6 | |
2779 THREAD_PRIORITY_ABOVE_NORMAL, // 7 | |
2780 THREAD_PRIORITY_HIGHEST, // 8 | |
2781 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority | |
2782 THREAD_PRIORITY_TIME_CRITICAL // 10 MaxPriority | |
2783 }; | |
2784 | |
2785 static int prio_init() { | |
2786 // If ThreadPriorityPolicy is 1, switch tables | |
2787 if (ThreadPriorityPolicy == 1) { | |
2788 int i; | |
2789 for (i = 0; i < MaxPriority + 1; i++) { | |
2790 os::java_to_os_priority[i] = prio_policy1[i]; | |
2791 } | |
2792 } | |
2793 return 0; | |
2794 } | |
2795 | |
2796 OSReturn os::set_native_priority(Thread* thread, int priority) { | |
2797 if (!UseThreadPriorities) return OS_OK; | |
2798 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; | |
2799 return ret ? OS_OK : OS_ERR; | |
2800 } | |
2801 | |
2802 OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) { | |
2803 if ( !UseThreadPriorities ) { | |
2804 *priority_ptr = java_to_os_priority[NormPriority]; | |
2805 return OS_OK; | |
2806 } | |
2807 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); | |
2808 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { | |
2809 assert(false, "GetThreadPriority failed"); | |
2810 return OS_ERR; | |
2811 } | |
2812 *priority_ptr = os_prio; | |
2813 return OS_OK; | |
2814 } | |
2815 | |
2816 | |
2817 // Hint to the underlying OS that a task switch would not be good. | |
2818 // Void return because it's a hint and can fail. | |
2819 void os::hint_no_preempt() {} | |
2820 | |
2821 void os::interrupt(Thread* thread) { | |
2822 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), | |
2823 "possibility of dangling Thread pointer"); | |
2824 | |
2825 OSThread* osthread = thread->osthread(); | |
2826 osthread->set_interrupted(true); | |
2827 // More than one thread can get here with the same value of osthread, | |
2828 // resulting in multiple notifications. We do, however, want the store | |
2829 // to interrupted() to be visible to other threads before we post | |
2830 // the interrupt event. | |
2831 OrderAccess::release(); | |
2832 SetEvent(osthread->interrupt_event()); | |
2833 // For JSR166: unpark after setting status | |
2834 if (thread->is_Java_thread()) | |
2835 ((JavaThread*)thread)->parker()->unpark(); | |
2836 | |
2837 ParkEvent * ev = thread->_ParkEvent ; | |
2838 if (ev != NULL) ev->unpark() ; | |
2839 | |
2840 } | |
2841 | |
2842 | |
2843 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { | |
2844 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), | |
2845 "possibility of dangling Thread pointer"); | |
2846 | |
2847 OSThread* osthread = thread->osthread(); | |
2848 bool interrupted; | |
2849 interrupted = osthread->interrupted(); | |
2850 if (clear_interrupted == true) { | |
2851 osthread->set_interrupted(false); | |
2852 ResetEvent(osthread->interrupt_event()); | |
2853 } // Otherwise leave the interrupted state alone | |
2854 | |
2855 return interrupted; | |
2856 } | |
2857 | |
2858 // Get's a pc (hint) for a running thread. Currently used only for profiling. | |
2859 ExtendedPC os::get_thread_pc(Thread* thread) { | |
2860 CONTEXT context; | |
2861 context.ContextFlags = CONTEXT_CONTROL; | |
2862 HANDLE handle = thread->osthread()->thread_handle(); | |
2863 #ifdef _M_IA64 | |
2864 assert(0, "Fix get_thread_pc"); | |
2865 return ExtendedPC(NULL); | |
2866 #else | |
2867 if (GetThreadContext(handle, &context)) { | |
2868 #ifdef _M_AMD64 | |
2869 return ExtendedPC((address) context.Rip); | |
2870 #else | |
2871 return ExtendedPC((address) context.Eip); | |
2872 #endif | |
2873 } else { | |
2874 return ExtendedPC(NULL); | |
2875 } | |
2876 #endif | |
2877 } | |
2878 | |
2879 // GetCurrentThreadId() returns DWORD | |
2880 intx os::current_thread_id() { return GetCurrentThreadId(); } | |
2881 | |
2882 static int _initial_pid = 0; | |
2883 | |
2884 int os::current_process_id() | |
2885 { | |
2886 return (_initial_pid ? _initial_pid : _getpid()); | |
2887 } | |
2888 | |
2889 int os::win32::_vm_page_size = 0; | |
2890 int os::win32::_vm_allocation_granularity = 0; | |
2891 int os::win32::_processor_type = 0; | |
2892 // Processor level is not available on non-NT systems, use vm_version instead | |
2893 int os::win32::_processor_level = 0; | |
2894 julong os::win32::_physical_memory = 0; | |
2895 size_t os::win32::_default_stack_size = 0; | |
2896 | |
2897 intx os::win32::_os_thread_limit = 0; | |
2898 volatile intx os::win32::_os_thread_count = 0; | |
2899 | |
2900 bool os::win32::_is_nt = false; | |
2901 | |
2902 | |
2903 void os::win32::initialize_system_info() { | |
2904 SYSTEM_INFO si; | |
2905 GetSystemInfo(&si); | |
2906 _vm_page_size = si.dwPageSize; | |
2907 _vm_allocation_granularity = si.dwAllocationGranularity; | |
2908 _processor_type = si.dwProcessorType; | |
2909 _processor_level = si.wProcessorLevel; | |
2910 _processor_count = si.dwNumberOfProcessors; | |
2911 | |
2912 MEMORYSTATUS ms; | |
2913 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, | |
2914 // dwMemoryLoad (% of memory in use) | |
2915 GlobalMemoryStatus(&ms); | |
2916 _physical_memory = ms.dwTotalPhys; | |
2917 | |
2918 OSVERSIONINFO oi; | |
2919 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); | |
2920 GetVersionEx(&oi); | |
2921 switch(oi.dwPlatformId) { | |
2922 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; | |
2923 case VER_PLATFORM_WIN32_NT: _is_nt = true; break; | |
2924 default: fatal("Unknown platform"); | |
2925 } | |
2926 | |
2927 _default_stack_size = os::current_stack_size(); | |
2928 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); | |
2929 assert((_default_stack_size & (_vm_page_size - 1)) == 0, | |
2930 "stack size not a multiple of page size"); | |
2931 | |
2932 initialize_performance_counter(); | |
2933 | |
2934 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is | |
2935 // known to deadlock the system, if the VM issues to thread operations with | |
2936 // a too high frequency, e.g., such as changing the priorities. | |
2937 // The 6000 seems to work well - no deadlocks has been notices on the test | |
2938 // programs that we have seen experience this problem. | |
2939 if (!os::win32::is_nt()) { | |
2940 StarvationMonitorInterval = 6000; | |
2941 } | |
2942 } | |
2943 | |
2944 | |
2945 void os::win32::setmode_streams() { | |
2946 _setmode(_fileno(stdin), _O_BINARY); | |
2947 _setmode(_fileno(stdout), _O_BINARY); | |
2948 _setmode(_fileno(stderr), _O_BINARY); | |
2949 } | |
2950 | |
2951 | |
2952 int os::message_box(const char* title, const char* message) { | |
2953 int result = MessageBox(NULL, message, title, | |
2954 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); | |
2955 return result == IDYES; | |
2956 } | |
2957 | |
2958 int os::allocate_thread_local_storage() { | |
2959 return TlsAlloc(); | |
2960 } | |
2961 | |
2962 | |
2963 void os::free_thread_local_storage(int index) { | |
2964 TlsFree(index); | |
2965 } | |
2966 | |
2967 | |
2968 void os::thread_local_storage_at_put(int index, void* value) { | |
2969 TlsSetValue(index, value); | |
2970 assert(thread_local_storage_at(index) == value, "Just checking"); | |
2971 } | |
2972 | |
2973 | |
2974 void* os::thread_local_storage_at(int index) { | |
2975 return TlsGetValue(index); | |
2976 } | |
2977 | |
2978 | |
2979 #ifndef PRODUCT | |
2980 #ifndef _WIN64 | |
2981 // Helpers to check whether NX protection is enabled | |
2982 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { | |
2983 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && | |
2984 pex->ExceptionRecord->NumberParameters > 0 && | |
2985 pex->ExceptionRecord->ExceptionInformation[0] == | |
2986 EXCEPTION_INFO_EXEC_VIOLATION) { | |
2987 return EXCEPTION_EXECUTE_HANDLER; | |
2988 } | |
2989 return EXCEPTION_CONTINUE_SEARCH; | |
2990 } | |
2991 | |
2992 void nx_check_protection() { | |
2993 // If NX is enabled we'll get an exception calling into code on the stack | |
2994 char code[] = { (char)0xC3 }; // ret | |
2995 void *code_ptr = (void *)code; | |
2996 __try { | |
2997 __asm call code_ptr | |
2998 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { | |
2999 tty->print_raw_cr("NX protection detected."); | |
3000 } | |
3001 } | |
3002 #endif // _WIN64 | |
3003 #endif // PRODUCT | |
3004 | |
3005 // this is called _before_ the global arguments have been parsed | |
3006 void os::init(void) { | |
3007 _initial_pid = _getpid(); | |
3008 | |
3009 init_random(1234567); | |
3010 | |
3011 win32::initialize_system_info(); | |
3012 win32::setmode_streams(); | |
3013 init_page_sizes((size_t) win32::vm_page_size()); | |
3014 | |
3015 // For better scalability on MP systems (must be called after initialize_system_info) | |
3016 #ifndef PRODUCT | |
3017 if (is_MP()) { | |
3018 NoYieldsInMicrolock = true; | |
3019 } | |
3020 #endif | |
3021 // Initialize main_process and main_thread | |
3022 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle | |
3023 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, | |
3024 &main_thread, THREAD_ALL_ACCESS, false, 0)) { | |
3025 fatal("DuplicateHandle failed\n"); | |
3026 } | |
3027 main_thread_id = (int) GetCurrentThreadId(); | |
3028 } | |
3029 | |
3030 // To install functions for atexit processing | |
3031 extern "C" { | |
3032 static void perfMemory_exit_helper() { | |
3033 perfMemory_exit(); | |
3034 } | |
3035 } | |
3036 | |
3037 | |
3038 // this is called _after_ the global arguments have been parsed | |
3039 jint os::init_2(void) { | |
3040 // Allocate a single page and mark it as readable for safepoint polling | |
3041 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); | |
3042 guarantee( polling_page != NULL, "Reserve Failed for polling page"); | |
3043 | |
3044 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); | |
3045 guarantee( return_page != NULL, "Commit Failed for polling page"); | |
3046 | |
3047 os::set_polling_page( polling_page ); | |
3048 | |
3049 #ifndef PRODUCT | |
3050 if( Verbose && PrintMiscellaneous ) | |
3051 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); | |
3052 #endif | |
3053 | |
3054 if (!UseMembar) { | |
3055 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_EXECUTE_READWRITE); | |
3056 guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); | |
3057 | |
3058 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_EXECUTE_READWRITE); | |
3059 guarantee( return_page != NULL, "Commit Failed for memory serialize page"); | |
3060 | |
3061 os::set_memory_serialize_page( mem_serialize_page ); | |
3062 | |
3063 #ifndef PRODUCT | |
3064 if(Verbose && PrintMiscellaneous) | |
3065 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); | |
3066 #endif | |
3067 } | |
3068 | |
3069 FLAG_SET_DEFAULT(UseLargePages, os::large_page_init()); | |
3070 | |
3071 // Setup Windows Exceptions | |
3072 | |
3073 // On Itanium systems, Structured Exception Handling does not | |
3074 // work since stack frames must be walkable by the OS. Since | |
3075 // much of our code is dynamically generated, and we do not have | |
3076 // proper unwind .xdata sections, the system simply exits | |
3077 // rather than delivering the exception. To work around | |
3078 // this we use VectorExceptions instead. | |
3079 #ifdef _WIN64 | |
3080 if (UseVectoredExceptions) { | |
3081 topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelExceptionFilter); | |
3082 } | |
3083 #endif | |
3084 | |
3085 // for debugging float code generation bugs | |
3086 if (ForceFloatExceptions) { | |
3087 #ifndef _WIN64 | |
3088 static long fp_control_word = 0; | |
3089 __asm { fstcw fp_control_word } | |
3090 // see Intel PPro Manual, Vol. 2, p 7-16 | |
3091 const long precision = 0x20; | |
3092 const long underflow = 0x10; | |
3093 const long overflow = 0x08; | |
3094 const long zero_div = 0x04; | |
3095 const long denorm = 0x02; | |
3096 const long invalid = 0x01; | |
3097 fp_control_word |= invalid; | |
3098 __asm { fldcw fp_control_word } | |
3099 #endif | |
3100 } | |
3101 | |
3102 // Initialize HPI. | |
3103 jint hpi_result = hpi::initialize(); | |
3104 if (hpi_result != JNI_OK) { return hpi_result; } | |
3105 | |
3106 // If stack_commit_size is 0, windows will reserve the default size, | |
3107 // but only commit a small portion of it. | |
3108 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); | |
3109 size_t default_reserve_size = os::win32::default_stack_size(); | |
3110 size_t actual_reserve_size = stack_commit_size; | |
3111 if (stack_commit_size < default_reserve_size) { | |
3112 // If stack_commit_size == 0, we want this too | |
3113 actual_reserve_size = default_reserve_size; | |
3114 } | |
3115 | |
3116 JavaThread::set_stack_size_at_create(stack_commit_size); | |
3117 | |
3118 // Calculate theoretical max. size of Threads to guard gainst artifical | |
3119 // out-of-memory situations, where all available address-space has been | |
3120 // reserved by thread stacks. | |
3121 assert(actual_reserve_size != 0, "Must have a stack"); | |
3122 | |
3123 // Calculate the thread limit when we should start doing Virtual Memory | |
3124 // banging. Currently when the threads will have used all but 200Mb of space. | |
3125 // | |
3126 // TODO: consider performing a similar calculation for commit size instead | |
3127 // as reserve size, since on a 64-bit platform we'll run into that more | |
3128 // often than running out of virtual memory space. We can use the | |
3129 // lower value of the two calculations as the os_thread_limit. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
79
diff
changeset
|
3130 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); |
0 | 3131 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); |
3132 | |
3133 // at exit methods are called in the reverse order of their registration. | |
3134 // there is no limit to the number of functions registered. atexit does | |
3135 // not set errno. | |
3136 | |
3137 if (PerfAllowAtExitRegistration) { | |
3138 // only register atexit functions if PerfAllowAtExitRegistration is set. | |
3139 // atexit functions can be delayed until process exit time, which | |
3140 // can be problematic for embedded VM situations. Embedded VMs should | |
3141 // call DestroyJavaVM() to assure that VM resources are released. | |
3142 | |
3143 // note: perfMemory_exit_helper atexit function may be removed in | |
3144 // the future if the appropriate cleanup code can be added to the | |
3145 // VM_Exit VMOperation's doit method. | |
3146 if (atexit(perfMemory_exit_helper) != 0) { | |
3147 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); | |
3148 } | |
3149 } | |
3150 | |
3151 // initialize PSAPI or ToolHelp for fatal error handler | |
3152 if (win32::is_nt()) _init_psapi(); | |
3153 else _init_toolhelp(); | |
3154 | |
3155 #ifndef _WIN64 | |
3156 // Print something if NX is enabled (win32 on AMD64) | |
3157 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); | |
3158 #endif | |
3159 | |
3160 // initialize thread priority policy | |
3161 prio_init(); | |
3162 | |
3163 return JNI_OK; | |
3164 } | |
3165 | |
3166 | |
3167 // Mark the polling page as unreadable | |
3168 void os::make_polling_page_unreadable(void) { | |
3169 DWORD old_status; | |
3170 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) ) | |
3171 fatal("Could not disable polling page"); | |
3172 }; | |
3173 | |
3174 // Mark the polling page as readable | |
3175 void os::make_polling_page_readable(void) { | |
3176 DWORD old_status; | |
3177 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) ) | |
3178 fatal("Could not enable polling page"); | |
3179 }; | |
3180 | |
3181 | |
3182 int os::stat(const char *path, struct stat *sbuf) { | |
3183 char pathbuf[MAX_PATH]; | |
3184 if (strlen(path) > MAX_PATH - 1) { | |
3185 errno = ENAMETOOLONG; | |
3186 return -1; | |
3187 } | |
3188 hpi::native_path(strcpy(pathbuf, path)); | |
3189 int ret = ::stat(pathbuf, sbuf); | |
3190 if (sbuf != NULL && UseUTCFileTimestamp) { | |
3191 // Fix for 6539723. st_mtime returned from stat() is dependent on | |
3192 // the system timezone and so can return different values for the | |
3193 // same file if/when daylight savings time changes. This adjustment | |
3194 // makes sure the same timestamp is returned regardless of the TZ. | |
3195 // | |
3196 // See: | |
3197 // http://msdn.microsoft.com/library/ | |
3198 // default.asp?url=/library/en-us/sysinfo/base/ | |
3199 // time_zone_information_str.asp | |
3200 // and | |
3201 // http://msdn.microsoft.com/library/default.asp?url= | |
3202 // /library/en-us/sysinfo/base/settimezoneinformation.asp | |
3203 // | |
3204 // NOTE: there is a insidious bug here: If the timezone is changed | |
3205 // after the call to stat() but before 'GetTimeZoneInformation()', then | |
3206 // the adjustment we do here will be wrong and we'll return the wrong | |
3207 // value (which will likely end up creating an invalid class data | |
3208 // archive). Absent a better API for this, or some time zone locking | |
3209 // mechanism, we'll have to live with this risk. | |
3210 TIME_ZONE_INFORMATION tz; | |
3211 DWORD tzid = GetTimeZoneInformation(&tz); | |
3212 int daylightBias = | |
3213 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; | |
3214 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; | |
3215 } | |
3216 return ret; | |
3217 } | |
3218 | |
3219 | |
3220 #define FT2INT64(ft) \ | |
3221 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) | |
3222 | |
3223 | |
3224 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) | |
3225 // are used by JVM M&M and JVMTI to get user+sys or user CPU time | |
3226 // of a thread. | |
3227 // | |
3228 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns | |
3229 // the fast estimate available on the platform. | |
3230 | |
3231 // current_thread_cpu_time() is not optimized for Windows yet | |
3232 jlong os::current_thread_cpu_time() { | |
3233 // return user + sys since the cost is the same | |
3234 return os::thread_cpu_time(Thread::current(), true /* user+sys */); | |
3235 } | |
3236 | |
3237 jlong os::thread_cpu_time(Thread* thread) { | |
3238 // consistent with what current_thread_cpu_time() returns. | |
3239 return os::thread_cpu_time(thread, true /* user+sys */); | |
3240 } | |
3241 | |
3242 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { | |
3243 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); | |
3244 } | |
3245 | |
3246 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { | |
3247 // This code is copy from clasic VM -> hpi::sysThreadCPUTime | |
3248 // If this function changes, os::is_thread_cpu_time_supported() should too | |
3249 if (os::win32::is_nt()) { | |
3250 FILETIME CreationTime; | |
3251 FILETIME ExitTime; | |
3252 FILETIME KernelTime; | |
3253 FILETIME UserTime; | |
3254 | |
3255 if ( GetThreadTimes(thread->osthread()->thread_handle(), | |
3256 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) | |
3257 return -1; | |
3258 else | |
3259 if (user_sys_cpu_time) { | |
3260 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; | |
3261 } else { | |
3262 return FT2INT64(UserTime) * 100; | |
3263 } | |
3264 } else { | |
3265 return (jlong) timeGetTime() * 1000000; | |
3266 } | |
3267 } | |
3268 | |
3269 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { | |
3270 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits | |
3271 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time | |
3272 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time | |
3273 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned | |
3274 } | |
3275 | |
3276 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { | |
3277 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits | |
3278 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time | |
3279 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time | |
3280 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned | |
3281 } | |
3282 | |
3283 bool os::is_thread_cpu_time_supported() { | |
3284 // see os::thread_cpu_time | |
3285 if (os::win32::is_nt()) { | |
3286 FILETIME CreationTime; | |
3287 FILETIME ExitTime; | |
3288 FILETIME KernelTime; | |
3289 FILETIME UserTime; | |
3290 | |
3291 if ( GetThreadTimes(GetCurrentThread(), | |
3292 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) | |
3293 return false; | |
3294 else | |
3295 return true; | |
3296 } else { | |
3297 return false; | |
3298 } | |
3299 } | |
3300 | |
3301 // Windows does't provide a loadavg primitive so this is stubbed out for now. | |
3302 // It does have primitives (PDH API) to get CPU usage and run queue length. | |
3303 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" | |
3304 // If we wanted to implement loadavg on Windows, we have a few options: | |
3305 // | |
3306 // a) Query CPU usage and run queue length and "fake" an answer by | |
3307 // returning the CPU usage if it's under 100%, and the run queue | |
3308 // length otherwise. It turns out that querying is pretty slow | |
3309 // on Windows, on the order of 200 microseconds on a fast machine. | |
3310 // Note that on the Windows the CPU usage value is the % usage | |
3311 // since the last time the API was called (and the first call | |
3312 // returns 100%), so we'd have to deal with that as well. | |
3313 // | |
3314 // b) Sample the "fake" answer using a sampling thread and store | |
3315 // the answer in a global variable. The call to loadavg would | |
3316 // just return the value of the global, avoiding the slow query. | |
3317 // | |
3318 // c) Sample a better answer using exponential decay to smooth the | |
3319 // value. This is basically the algorithm used by UNIX kernels. | |
3320 // | |
3321 // Note that sampling thread starvation could affect both (b) and (c). | |
3322 int os::loadavg(double loadavg[], int nelem) { | |
3323 return -1; | |
3324 } | |
3325 | |
3326 | |
3327 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() | |
3328 bool os::dont_yield() { | |
3329 return DontYieldALot; | |
3330 } | |
3331 | |
3332 // Is a (classpath) directory empty? | |
3333 bool os::dir_is_empty(const char* path) { | |
3334 WIN32_FIND_DATA fd; | |
3335 HANDLE f = FindFirstFile(path, &fd); | |
3336 if (f == INVALID_HANDLE_VALUE) { | |
3337 return true; | |
3338 } | |
3339 FindClose(f); | |
3340 return false; | |
3341 } | |
3342 | |
3343 // create binary file, rewriting existing file if required | |
3344 int os::create_binary_file(const char* path, bool rewrite_existing) { | |
3345 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; | |
3346 if (!rewrite_existing) { | |
3347 oflags |= _O_EXCL; | |
3348 } | |
3349 return ::open(path, oflags, _S_IREAD | _S_IWRITE); | |
3350 } | |
3351 | |
3352 // return current position of file pointer | |
3353 jlong os::current_file_offset(int fd) { | |
3354 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); | |
3355 } | |
3356 | |
3357 // move file pointer to the specified offset | |
3358 jlong os::seek_to_file_offset(int fd, jlong offset) { | |
3359 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); | |
3360 } | |
3361 | |
3362 | |
3363 // Map a block of memory. | |
3364 char* os::map_memory(int fd, const char* file_name, size_t file_offset, | |
3365 char *addr, size_t bytes, bool read_only, | |
3366 bool allow_exec) { | |
3367 HANDLE hFile; | |
3368 char* base; | |
3369 | |
3370 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, | |
3371 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); | |
3372 if (hFile == NULL) { | |
3373 if (PrintMiscellaneous && Verbose) { | |
3374 DWORD err = GetLastError(); | |
3375 tty->print_cr("CreateFile() failed: GetLastError->%ld."); | |
3376 } | |
3377 return NULL; | |
3378 } | |
3379 | |
3380 if (allow_exec) { | |
3381 // CreateFileMapping/MapViewOfFileEx can't map executable memory | |
3382 // unless it comes from a PE image (which the shared archive is not.) | |
3383 // Even VirtualProtect refuses to give execute access to mapped memory | |
3384 // that was not previously executable. | |
3385 // | |
3386 // Instead, stick the executable region in anonymous memory. Yuck. | |
3387 // Penalty is that ~4 pages will not be shareable - in the future | |
3388 // we might consider DLLizing the shared archive with a proper PE | |
3389 // header so that mapping executable + sharing is possible. | |
3390 | |
3391 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, | |
3392 PAGE_READWRITE); | |
3393 if (base == NULL) { | |
3394 if (PrintMiscellaneous && Verbose) { | |
3395 DWORD err = GetLastError(); | |
3396 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); | |
3397 } | |
3398 CloseHandle(hFile); | |
3399 return NULL; | |
3400 } | |
3401 | |
3402 DWORD bytes_read; | |
3403 OVERLAPPED overlapped; | |
3404 overlapped.Offset = (DWORD)file_offset; | |
3405 overlapped.OffsetHigh = 0; | |
3406 overlapped.hEvent = NULL; | |
3407 // ReadFile guarantees that if the return value is true, the requested | |
3408 // number of bytes were read before returning. | |
3409 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; | |
3410 if (!res) { | |
3411 if (PrintMiscellaneous && Verbose) { | |
3412 DWORD err = GetLastError(); | |
3413 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); | |
3414 } | |
3415 release_memory(base, bytes); | |
3416 CloseHandle(hFile); | |
3417 return NULL; | |
3418 } | |
3419 } else { | |
3420 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, | |
3421 NULL /*file_name*/); | |
3422 if (hMap == NULL) { | |
3423 if (PrintMiscellaneous && Verbose) { | |
3424 DWORD err = GetLastError(); | |
3425 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld."); | |
3426 } | |
3427 CloseHandle(hFile); | |
3428 return NULL; | |
3429 } | |
3430 | |
3431 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; | |
3432 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, | |
3433 (DWORD)bytes, addr); | |
3434 if (base == NULL) { | |
3435 if (PrintMiscellaneous && Verbose) { | |
3436 DWORD err = GetLastError(); | |
3437 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); | |
3438 } | |
3439 CloseHandle(hMap); | |
3440 CloseHandle(hFile); | |
3441 return NULL; | |
3442 } | |
3443 | |
3444 if (CloseHandle(hMap) == 0) { | |
3445 if (PrintMiscellaneous && Verbose) { | |
3446 DWORD err = GetLastError(); | |
3447 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); | |
3448 } | |
3449 CloseHandle(hFile); | |
3450 return base; | |
3451 } | |
3452 } | |
3453 | |
3454 if (allow_exec) { | |
3455 DWORD old_protect; | |
3456 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; | |
3457 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; | |
3458 | |
3459 if (!res) { | |
3460 if (PrintMiscellaneous && Verbose) { | |
3461 DWORD err = GetLastError(); | |
3462 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); | |
3463 } | |
3464 // Don't consider this a hard error, on IA32 even if the | |
3465 // VirtualProtect fails, we should still be able to execute | |
3466 CloseHandle(hFile); | |
3467 return base; | |
3468 } | |
3469 } | |
3470 | |
3471 if (CloseHandle(hFile) == 0) { | |
3472 if (PrintMiscellaneous && Verbose) { | |
3473 DWORD err = GetLastError(); | |
3474 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); | |
3475 } | |
3476 return base; | |
3477 } | |
3478 | |
3479 return base; | |
3480 } | |
3481 | |
3482 | |
3483 // Remap a block of memory. | |
3484 char* os::remap_memory(int fd, const char* file_name, size_t file_offset, | |
3485 char *addr, size_t bytes, bool read_only, | |
3486 bool allow_exec) { | |
3487 // This OS does not allow existing memory maps to be remapped so we | |
3488 // have to unmap the memory before we remap it. | |
3489 if (!os::unmap_memory(addr, bytes)) { | |
3490 return NULL; | |
3491 } | |
3492 | |
3493 // There is a very small theoretical window between the unmap_memory() | |
3494 // call above and the map_memory() call below where a thread in native | |
3495 // code may be able to access an address that is no longer mapped. | |
3496 | |
3497 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, | |
3498 allow_exec); | |
3499 } | |
3500 | |
3501 | |
3502 // Unmap a block of memory. | |
3503 // Returns true=success, otherwise false. | |
3504 | |
3505 bool os::unmap_memory(char* addr, size_t bytes) { | |
3506 BOOL result = UnmapViewOfFile(addr); | |
3507 if (result == 0) { | |
3508 if (PrintMiscellaneous && Verbose) { | |
3509 DWORD err = GetLastError(); | |
3510 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); | |
3511 } | |
3512 return false; | |
3513 } | |
3514 return true; | |
3515 } | |
3516 | |
3517 void os::pause() { | |
3518 char filename[MAX_PATH]; | |
3519 if (PauseAtStartupFile && PauseAtStartupFile[0]) { | |
3520 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); | |
3521 } else { | |
3522 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); | |
3523 } | |
3524 | |
3525 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); | |
3526 if (fd != -1) { | |
3527 struct stat buf; | |
3528 close(fd); | |
3529 while (::stat(filename, &buf) == 0) { | |
3530 Sleep(100); | |
3531 } | |
3532 } else { | |
3533 jio_fprintf(stderr, | |
3534 "Could not open pause file '%s', continuing immediately.\n", filename); | |
3535 } | |
3536 } | |
3537 | |
3538 // An Event wraps a win32 "CreateEvent" kernel handle. | |
3539 // | |
3540 // We have a number of choices regarding "CreateEvent" win32 handle leakage: | |
3541 // | |
3542 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle | |
3543 // field, and call CloseHandle() on the win32 event handle. Unpark() would | |
3544 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. | |
3545 // In addition, an unpark() operation might fetch the handle field, but the | |
3546 // event could recycle between the fetch and the SetEvent() operation. | |
3547 // SetEvent() would either fail because the handle was invalid, or inadvertently work, | |
3548 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() | |
3549 // on an stale but recycled handle would be harmless, but in practice this might | |
3550 // confuse other non-Sun code, so it's not a viable approach. | |
3551 // | |
3552 // 2: Once a win32 event handle is associated with an Event, it remains associated | |
3553 // with the Event. The event handle is never closed. This could be construed | |
3554 // as handle leakage, but only up to the maximum # of threads that have been extant | |
3555 // at any one time. This shouldn't be an issue, as windows platforms typically | |
3556 // permit a process to have hundreds of thousands of open handles. | |
3557 // | |
3558 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList | |
3559 // and release unused handles. | |
3560 // | |
3561 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. | |
3562 // It's not clear, however, that we wouldn't be trading one type of leak for another. | |
3563 // | |
3564 // 5. Use an RCU-like mechanism (Read-Copy Update). | |
3565 // Or perhaps something similar to Maged Michael's "Hazard pointers". | |
3566 // | |
3567 // We use (2). | |
3568 // | |
3569 // TODO-FIXME: | |
3570 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. | |
3571 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks | |
3572 // to recover from (or at least detect) the dreaded Windows 841176 bug. | |
3573 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent | |
3574 // into a single win32 CreateEvent() handle. | |
3575 // | |
3576 // _Event transitions in park() | |
3577 // -1 => -1 : illegal | |
3578 // 1 => 0 : pass - return immediately | |
3579 // 0 => -1 : block | |
3580 // | |
3581 // _Event serves as a restricted-range semaphore : | |
3582 // -1 : thread is blocked | |
3583 // 0 : neutral - thread is running or ready | |
3584 // 1 : signaled - thread is running or ready | |
3585 // | |
3586 // Another possible encoding of _Event would be | |
3587 // with explicit "PARKED" and "SIGNALED" bits. | |
3588 | |
3589 int os::PlatformEvent::park (jlong Millis) { | |
3590 guarantee (_ParkHandle != NULL , "Invariant") ; | |
3591 guarantee (Millis > 0 , "Invariant") ; | |
3592 int v ; | |
3593 | |
3594 // CONSIDER: defer assigning a CreateEvent() handle to the Event until | |
3595 // the initial park() operation. | |
3596 | |
3597 for (;;) { | |
3598 v = _Event ; | |
3599 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; | |
3600 } | |
3601 guarantee ((v == 0) || (v == 1), "invariant") ; | |
3602 if (v != 0) return OS_OK ; | |
3603 | |
3604 // Do this the hard way by blocking ... | |
3605 // TODO: consider a brief spin here, gated on the success of recent | |
3606 // spin attempts by this thread. | |
3607 // | |
3608 // We decompose long timeouts into series of shorter timed waits. | |
3609 // Evidently large timo values passed in WaitForSingleObject() are problematic on some | |
3610 // versions of Windows. See EventWait() for details. This may be superstition. Or not. | |
3611 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time | |
3612 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from | |
3613 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend | |
3614 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == | |
3615 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate | |
3616 // for the already waited time. This policy does not admit any new outcomes. | |
3617 // In the future, however, we might want to track the accumulated wait time and | |
3618 // adjust Millis accordingly if we encounter a spurious wakeup. | |
3619 | |
3620 const int MAXTIMEOUT = 0x10000000 ; | |
3621 DWORD rv = WAIT_TIMEOUT ; | |
3622 while (_Event < 0 && Millis > 0) { | |
3623 DWORD prd = Millis ; // set prd = MAX (Millis, MAXTIMEOUT) | |
3624 if (Millis > MAXTIMEOUT) { | |
3625 prd = MAXTIMEOUT ; | |
3626 } | |
3627 rv = ::WaitForSingleObject (_ParkHandle, prd) ; | |
3628 assert (rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed") ; | |
3629 if (rv == WAIT_TIMEOUT) { | |
3630 Millis -= prd ; | |
3631 } | |
3632 } | |
3633 v = _Event ; | |
3634 _Event = 0 ; | |
3635 OrderAccess::fence() ; | |
3636 // If we encounter a nearly simultanous timeout expiry and unpark() | |
3637 // we return OS_OK indicating we awoke via unpark(). | |
3638 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. | |
3639 return (v >= 0) ? OS_OK : OS_TIMEOUT ; | |
3640 } | |
3641 | |
3642 void os::PlatformEvent::park () { | |
3643 guarantee (_ParkHandle != NULL, "Invariant") ; | |
3644 // Invariant: Only the thread associated with the Event/PlatformEvent | |
3645 // may call park(). | |
3646 int v ; | |
3647 for (;;) { | |
3648 v = _Event ; | |
3649 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; | |
3650 } | |
3651 guarantee ((v == 0) || (v == 1), "invariant") ; | |
3652 if (v != 0) return ; | |
3653 | |
3654 // Do this the hard way by blocking ... | |
3655 // TODO: consider a brief spin here, gated on the success of recent | |
3656 // spin attempts by this thread. | |
3657 while (_Event < 0) { | |
3658 DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ; | |
3659 assert (rv == WAIT_OBJECT_0, "WaitForSingleObject failed") ; | |
3660 } | |
3661 | |
3662 // Usually we'll find _Event == 0 at this point, but as | |
3663 // an optional optimization we clear it, just in case can | |
3664 // multiple unpark() operations drove _Event up to 1. | |
3665 _Event = 0 ; | |
3666 OrderAccess::fence() ; | |
3667 guarantee (_Event >= 0, "invariant") ; | |
3668 } | |
3669 | |
3670 void os::PlatformEvent::unpark() { | |
3671 guarantee (_ParkHandle != NULL, "Invariant") ; | |
3672 int v ; | |
3673 for (;;) { | |
3674 v = _Event ; // Increment _Event if it's < 1. | |
3675 if (v > 0) { | |
3676 // If it's already signaled just return. | |
3677 // The LD of _Event could have reordered or be satisfied | |
3678 // by a read-aside from this processor's write buffer. | |
3679 // To avoid problems execute a barrier and then | |
3680 // ratify the value. A degenerate CAS() would also work. | |
3681 // Viz., CAS (v+0, &_Event, v) == v). | |
3682 OrderAccess::fence() ; | |
3683 if (_Event == v) return ; | |
3684 continue ; | |
3685 } | |
3686 if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ; | |
3687 } | |
3688 if (v < 0) { | |
3689 ::SetEvent (_ParkHandle) ; | |
3690 } | |
3691 } | |
3692 | |
3693 | |
3694 // JSR166 | |
3695 // ------------------------------------------------------- | |
3696 | |
3697 /* | |
3698 * The Windows implementation of Park is very straightforward: Basic | |
3699 * operations on Win32 Events turn out to have the right semantics to | |
3700 * use them directly. We opportunistically resuse the event inherited | |
3701 * from Monitor. | |
3702 */ | |
3703 | |
3704 | |
3705 void Parker::park(bool isAbsolute, jlong time) { | |
3706 guarantee (_ParkEvent != NULL, "invariant") ; | |
3707 // First, demultiplex/decode time arguments | |
3708 if (time < 0) { // don't wait | |
3709 return; | |
3710 } | |
3711 else if (time == 0) { | |
3712 time = INFINITE; | |
3713 } | |
3714 else if (isAbsolute) { | |
3715 time -= os::javaTimeMillis(); // convert to relative time | |
3716 if (time <= 0) // already elapsed | |
3717 return; | |
3718 } | |
3719 else { // relative | |
3720 time /= 1000000; // Must coarsen from nanos to millis | |
3721 if (time == 0) // Wait for the minimal time unit if zero | |
3722 time = 1; | |
3723 } | |
3724 | |
3725 JavaThread* thread = (JavaThread*)(Thread::current()); | |
3726 assert(thread->is_Java_thread(), "Must be JavaThread"); | |
3727 JavaThread *jt = (JavaThread *)thread; | |
3728 | |
3729 // Don't wait if interrupted or already triggered | |
3730 if (Thread::is_interrupted(thread, false) || | |
3731 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { | |
3732 ResetEvent(_ParkEvent); | |
3733 return; | |
3734 } | |
3735 else { | |
3736 ThreadBlockInVM tbivm(jt); | |
3737 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); | |
3738 jt->set_suspend_equivalent(); | |
3739 | |
3740 WaitForSingleObject(_ParkEvent, time); | |
3741 ResetEvent(_ParkEvent); | |
3742 | |
3743 // If externally suspended while waiting, re-suspend | |
3744 if (jt->handle_special_suspend_equivalent_condition()) { | |
3745 jt->java_suspend_self(); | |
3746 } | |
3747 } | |
3748 } | |
3749 | |
3750 void Parker::unpark() { | |
3751 guarantee (_ParkEvent != NULL, "invariant") ; | |
3752 SetEvent(_ParkEvent); | |
3753 } | |
3754 | |
3755 // Run the specified command in a separate process. Return its exit value, | |
3756 // or -1 on failure (e.g. can't create a new process). | |
3757 int os::fork_and_exec(char* cmd) { | |
3758 STARTUPINFO si; | |
3759 PROCESS_INFORMATION pi; | |
3760 | |
3761 memset(&si, 0, sizeof(si)); | |
3762 si.cb = sizeof(si); | |
3763 memset(&pi, 0, sizeof(pi)); | |
3764 BOOL rslt = CreateProcess(NULL, // executable name - use command line | |
3765 cmd, // command line | |
3766 NULL, // process security attribute | |
3767 NULL, // thread security attribute | |
3768 TRUE, // inherits system handles | |
3769 0, // no creation flags | |
3770 NULL, // use parent's environment block | |
3771 NULL, // use parent's starting directory | |
3772 &si, // (in) startup information | |
3773 &pi); // (out) process information | |
3774 | |
3775 if (rslt) { | |
3776 // Wait until child process exits. | |
3777 WaitForSingleObject(pi.hProcess, INFINITE); | |
3778 | |
3779 DWORD exit_code; | |
3780 GetExitCodeProcess(pi.hProcess, &exit_code); | |
3781 | |
3782 // Close process and thread handles. | |
3783 CloseHandle(pi.hProcess); | |
3784 CloseHandle(pi.hThread); | |
3785 | |
3786 return (int)exit_code; | |
3787 } else { | |
3788 return -1; | |
3789 } | |
3790 } | |
3791 | |
3792 //-------------------------------------------------------------------------------------------------- | |
3793 // Non-product code | |
3794 | |
3795 static int mallocDebugIntervalCounter = 0; | |
3796 static int mallocDebugCounter = 0; | |
3797 bool os::check_heap(bool force) { | |
3798 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; | |
3799 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { | |
3800 // Note: HeapValidate executes two hardware breakpoints when it finds something | |
3801 // wrong; at these points, eax contains the address of the offending block (I think). | |
3802 // To get to the exlicit error message(s) below, just continue twice. | |
3803 HANDLE heap = GetProcessHeap(); | |
3804 { HeapLock(heap); | |
3805 PROCESS_HEAP_ENTRY phe; | |
3806 phe.lpData = NULL; | |
3807 while (HeapWalk(heap, &phe) != 0) { | |
3808 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && | |
3809 !HeapValidate(heap, 0, phe.lpData)) { | |
3810 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); | |
3811 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); | |
3812 fatal("corrupted C heap"); | |
3813 } | |
3814 } | |
3815 int err = GetLastError(); | |
3816 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { | |
3817 fatal1("heap walk aborted with error %d", err); | |
3818 } | |
3819 HeapUnlock(heap); | |
3820 } | |
3821 mallocDebugIntervalCounter = 0; | |
3822 } | |
3823 return true; | |
3824 } | |
3825 | |
3826 | |
3827 #ifndef PRODUCT | |
3828 bool os::find(address addr) { | |
3829 // Nothing yet | |
3830 return false; | |
3831 } | |
3832 #endif | |
3833 | |
3834 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { | |
3835 DWORD exception_code = e->ExceptionRecord->ExceptionCode; | |
3836 | |
3837 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { | |
3838 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); | |
3839 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; | |
3840 address addr = (address) exceptionRecord->ExceptionInformation[1]; | |
3841 | |
3842 if (os::is_memory_serialize_page(thread, addr)) | |
3843 return EXCEPTION_CONTINUE_EXECUTION; | |
3844 } | |
3845 | |
3846 return EXCEPTION_CONTINUE_SEARCH; | |
3847 } | |
3848 | |
3849 static int getLastErrorString(char *buf, size_t len) | |
3850 { | |
3851 long errval; | |
3852 | |
3853 if ((errval = GetLastError()) != 0) | |
3854 { | |
3855 /* DOS error */ | |
3856 size_t n = (size_t)FormatMessage( | |
3857 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, | |
3858 NULL, | |
3859 errval, | |
3860 0, | |
3861 buf, | |
3862 (DWORD)len, | |
3863 NULL); | |
3864 if (n > 3) { | |
3865 /* Drop final '.', CR, LF */ | |
3866 if (buf[n - 1] == '\n') n--; | |
3867 if (buf[n - 1] == '\r') n--; | |
3868 if (buf[n - 1] == '.') n--; | |
3869 buf[n] = '\0'; | |
3870 } | |
3871 return (int)n; | |
3872 } | |
3873 | |
3874 if (errno != 0) | |
3875 { | |
3876 /* C runtime error that has no corresponding DOS error code */ | |
3877 const char *s = strerror(errno); | |
3878 size_t n = strlen(s); | |
3879 if (n >= len) n = len - 1; | |
3880 strncpy(buf, s, n); | |
3881 buf[n] = '\0'; | |
3882 return (int)n; | |
3883 } | |
3884 return 0; | |
3885 } |