0
|
1 /*
|
337
|
2 * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved.
|
0
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 // do not include precompiled header file
|
|
26 # include "incls/_os_windows_x86.cpp.incl"
|
|
27 # include "unwind_windows_x86.hpp"
|
|
28 #undef REG_SP
|
|
29 #undef REG_FP
|
|
30 #undef REG_PC
|
|
31 #ifdef AMD64
|
|
32 #define REG_SP Rsp
|
|
33 #define REG_FP Rbp
|
|
34 #define REG_PC Rip
|
|
35 #else
|
|
36 #define REG_SP Esp
|
|
37 #define REG_FP Ebp
|
|
38 #define REG_PC Eip
|
|
39 #endif // AMD64
|
|
40
|
|
41 extern LONG WINAPI topLevelExceptionFilter(_EXCEPTION_POINTERS* );
|
|
42
|
|
43 // Install a win32 structured exception handler around thread.
|
|
44 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
|
|
45 __try {
|
|
46
|
|
47 #ifndef AMD64
|
|
48 // We store the current thread in this wrapperthread location
|
|
49 // and determine how far away this address is from the structured
|
|
50 // execption pointer that FS:[0] points to. This get_thread
|
|
51 // code can then get the thread pointer via FS.
|
|
52 //
|
|
53 // Warning: This routine must NEVER be inlined since we'd end up with
|
|
54 // multiple offsets.
|
|
55 //
|
|
56 volatile Thread* wrapperthread = thread;
|
|
57
|
|
58 if ( ThreadLocalStorage::get_thread_ptr_offset() == 0 ) {
|
|
59 int thread_ptr_offset;
|
|
60 __asm {
|
|
61 lea eax, dword ptr wrapperthread;
|
|
62 sub eax, dword ptr FS:[0H];
|
|
63 mov thread_ptr_offset, eax
|
|
64 };
|
|
65 ThreadLocalStorage::set_thread_ptr_offset(thread_ptr_offset);
|
|
66 }
|
|
67 #ifdef ASSERT
|
|
68 // Verify that the offset hasn't changed since we initally captured
|
|
69 // it. This might happen if we accidentally ended up with an
|
|
70 // inlined version of this routine.
|
|
71 else {
|
|
72 int test_thread_ptr_offset;
|
|
73 __asm {
|
|
74 lea eax, dword ptr wrapperthread;
|
|
75 sub eax, dword ptr FS:[0H];
|
|
76 mov test_thread_ptr_offset, eax
|
|
77 };
|
|
78 assert(test_thread_ptr_offset == ThreadLocalStorage::get_thread_ptr_offset(),
|
|
79 "thread pointer offset from SEH changed");
|
|
80 }
|
|
81 #endif // ASSERT
|
|
82 #endif // !AMD64
|
|
83
|
|
84 f(value, method, args, thread);
|
|
85 } __except(topLevelExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) {
|
|
86 // Nothing to do.
|
|
87 }
|
|
88 }
|
|
89
|
|
90 #ifdef AMD64
|
|
91
|
|
92 // This is the language specific handler for exceptions
|
|
93 // originating from dynamically generated code.
|
|
94 // We call the standard structured exception handler
|
|
95 // We only expect Continued Execution since we cannot unwind
|
|
96 // from generated code.
|
|
97 LONG HandleExceptionFromCodeCache(
|
|
98 IN PEXCEPTION_RECORD ExceptionRecord,
|
|
99 IN ULONG64 EstablisherFrame,
|
|
100 IN OUT PCONTEXT ContextRecord,
|
|
101 IN OUT PDISPATCHER_CONTEXT DispatcherContext) {
|
|
102 EXCEPTION_POINTERS ep;
|
|
103 LONG result;
|
|
104
|
|
105 ep.ExceptionRecord = ExceptionRecord;
|
|
106 ep.ContextRecord = ContextRecord;
|
|
107
|
|
108 result = topLevelExceptionFilter(&ep);
|
|
109
|
|
110 // We better only get a CONTINUE_EXECUTION from our handler
|
|
111 // since we don't have unwind information registered.
|
|
112
|
|
113 guarantee( result == EXCEPTION_CONTINUE_EXECUTION,
|
|
114 "Unexpected result from topLevelExceptionFilter");
|
|
115
|
|
116 return(ExceptionContinueExecution);
|
|
117 }
|
|
118
|
|
119
|
|
120 // Structure containing the Windows Data Structures required
|
|
121 // to register our Code Cache exception handler.
|
|
122 // We put these in the CodeCache since the API requires
|
|
123 // all addresses in these structures are relative to the Code
|
|
124 // area registered with RtlAddFunctionTable.
|
|
125 typedef struct {
|
|
126 char ExceptionHandlerInstr[16]; // jmp HandleExceptionFromCodeCache
|
|
127 RUNTIME_FUNCTION rt;
|
|
128 UNWIND_INFO_EH_ONLY unw;
|
|
129 } DynamicCodeData, *pDynamicCodeData;
|
|
130
|
|
131 #endif // AMD64
|
|
132 //
|
|
133 // Register our CodeCache area with the OS so it will dispatch exceptions
|
|
134 // to our topLevelExceptionFilter when we take an exception in our
|
|
135 // dynamically generated code.
|
|
136 //
|
|
137 // Arguments: low and high are the address of the full reserved
|
|
138 // codeCache area
|
|
139 //
|
|
140 bool os::register_code_area(char *low, char *high) {
|
|
141 #ifdef AMD64
|
|
142
|
|
143 ResourceMark rm;
|
|
144
|
|
145 pDynamicCodeData pDCD;
|
|
146 PRUNTIME_FUNCTION prt;
|
|
147 PUNWIND_INFO_EH_ONLY punwind;
|
|
148
|
|
149 // If we are using Vectored Exceptions we don't need this registration
|
|
150 if (UseVectoredExceptions) return true;
|
|
151
|
|
152 BufferBlob* b = BufferBlob::create("CodeCache Exception Handler", sizeof (DynamicCodeData));
|
|
153 CodeBuffer cb(b->instructions_begin(), b->instructions_size());
|
|
154 MacroAssembler* masm = new MacroAssembler(&cb);
|
|
155 pDCD = (pDynamicCodeData) masm->pc();
|
|
156
|
|
157 masm->jump(ExternalAddress((address)&HandleExceptionFromCodeCache));
|
|
158 masm->flush();
|
|
159
|
|
160 // Create an Unwind Structure specifying no unwind info
|
|
161 // other than an Exception Handler
|
|
162 punwind = &pDCD->unw;
|
|
163 punwind->Version = 1;
|
|
164 punwind->Flags = UNW_FLAG_EHANDLER;
|
|
165 punwind->SizeOfProlog = 0;
|
|
166 punwind->CountOfCodes = 0;
|
|
167 punwind->FrameRegister = 0;
|
|
168 punwind->FrameOffset = 0;
|
|
169 punwind->ExceptionHandler = (char *)(&(pDCD->ExceptionHandlerInstr[0])) -
|
|
170 (char*)low;
|
|
171 punwind->ExceptionData[0] = 0;
|
|
172
|
|
173 // This structure describes the covered dynamic code area.
|
|
174 // Addresses are relative to the beginning on the code cache area
|
|
175 prt = &pDCD->rt;
|
|
176 prt->BeginAddress = 0;
|
|
177 prt->EndAddress = (ULONG)(high - low);
|
|
178 prt->UnwindData = ((char *)punwind - low);
|
|
179
|
|
180 guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low),
|
|
181 "Failed to register Dynamic Code Exception Handler with RtlAddFunctionTable");
|
|
182
|
|
183 #endif // AMD64
|
|
184 return true;
|
|
185 }
|
|
186
|
|
187 void os::initialize_thread() {
|
|
188 // Nothing to do.
|
|
189 }
|
|
190
|
|
191 // Atomics and Stub Functions
|
|
192
|
|
193 typedef jint xchg_func_t (jint, volatile jint*);
|
|
194 typedef intptr_t xchg_ptr_func_t (intptr_t, volatile intptr_t*);
|
|
195 typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
|
|
196 typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong);
|
|
197 typedef jint add_func_t (jint, volatile jint*);
|
|
198 typedef intptr_t add_ptr_func_t (intptr_t, volatile intptr_t*);
|
|
199 typedef void fence_func_t ();
|
|
200
|
|
201 #ifdef AMD64
|
|
202
|
|
203 jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
|
|
204 // try to use the stub:
|
|
205 xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
|
|
206
|
|
207 if (func != NULL) {
|
|
208 os::atomic_xchg_func = func;
|
|
209 return (*func)(exchange_value, dest);
|
|
210 }
|
|
211 assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
|
212
|
|
213 jint old_value = *dest;
|
|
214 *dest = exchange_value;
|
|
215 return old_value;
|
|
216 }
|
|
217
|
|
218 intptr_t os::atomic_xchg_ptr_bootstrap(intptr_t exchange_value, volatile intptr_t* dest) {
|
|
219 // try to use the stub:
|
|
220 xchg_ptr_func_t* func = CAST_TO_FN_PTR(xchg_ptr_func_t*, StubRoutines::atomic_xchg_ptr_entry());
|
|
221
|
|
222 if (func != NULL) {
|
|
223 os::atomic_xchg_ptr_func = func;
|
|
224 return (*func)(exchange_value, dest);
|
|
225 }
|
|
226 assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
|
227
|
|
228 intptr_t old_value = *dest;
|
|
229 *dest = exchange_value;
|
|
230 return old_value;
|
|
231 }
|
|
232
|
|
233
|
|
234 jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
|
|
235 // try to use the stub:
|
|
236 cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
|
|
237
|
|
238 if (func != NULL) {
|
|
239 os::atomic_cmpxchg_func = func;
|
|
240 return (*func)(exchange_value, dest, compare_value);
|
|
241 }
|
|
242 assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
|
243
|
|
244 jint old_value = *dest;
|
|
245 if (old_value == compare_value)
|
|
246 *dest = exchange_value;
|
|
247 return old_value;
|
|
248 }
|
|
249 #endif // AMD64
|
|
250
|
|
251 jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
|
|
252 // try to use the stub:
|
|
253 cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
|
|
254
|
|
255 if (func != NULL) {
|
|
256 os::atomic_cmpxchg_long_func = func;
|
|
257 return (*func)(exchange_value, dest, compare_value);
|
|
258 }
|
|
259 assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
|
260
|
|
261 jlong old_value = *dest;
|
|
262 if (old_value == compare_value)
|
|
263 *dest = exchange_value;
|
|
264 return old_value;
|
|
265 }
|
|
266
|
|
267 #ifdef AMD64
|
|
268
|
|
269 jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
|
|
270 // try to use the stub:
|
|
271 add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
|
|
272
|
|
273 if (func != NULL) {
|
|
274 os::atomic_add_func = func;
|
|
275 return (*func)(add_value, dest);
|
|
276 }
|
|
277 assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
|
278
|
|
279 return (*dest) += add_value;
|
|
280 }
|
|
281
|
|
282 intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* dest) {
|
|
283 // try to use the stub:
|
|
284 add_ptr_func_t* func = CAST_TO_FN_PTR(add_ptr_func_t*, StubRoutines::atomic_add_ptr_entry());
|
|
285
|
|
286 if (func != NULL) {
|
|
287 os::atomic_add_ptr_func = func;
|
|
288 return (*func)(add_value, dest);
|
|
289 }
|
|
290 assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
|
291
|
|
292 return (*dest) += add_value;
|
|
293 }
|
|
294
|
|
295 void os::fence_bootstrap() {
|
|
296 // try to use the stub:
|
|
297 fence_func_t* func = CAST_TO_FN_PTR(fence_func_t*, StubRoutines::fence_entry());
|
|
298
|
|
299 if (func != NULL) {
|
|
300 os::fence_func = func;
|
|
301 (*func)();
|
|
302 return;
|
|
303 }
|
|
304 assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
|
305
|
|
306 // don't have to do anything for a single thread
|
|
307 }
|
|
308
|
|
309
|
|
310 xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap;
|
|
311 xchg_ptr_func_t* os::atomic_xchg_ptr_func = os::atomic_xchg_ptr_bootstrap;
|
|
312 cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
|
|
313 add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
|
|
314 add_ptr_func_t* os::atomic_add_ptr_func = os::atomic_add_ptr_bootstrap;
|
|
315 fence_func_t* os::fence_func = os::fence_bootstrap;
|
|
316
|
|
317 #endif // AMD64
|
|
318
|
|
319 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
|
|
320
|
|
321 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
|
|
322 intptr_t** ret_sp, intptr_t** ret_fp) {
|
|
323
|
|
324 ExtendedPC epc;
|
|
325 CONTEXT* uc = (CONTEXT*)ucVoid;
|
|
326
|
|
327 if (uc != NULL) {
|
|
328 epc = ExtendedPC((address)uc->REG_PC);
|
|
329 if (ret_sp) *ret_sp = (intptr_t*)uc->REG_SP;
|
|
330 if (ret_fp) *ret_fp = (intptr_t*)uc->REG_FP;
|
|
331 } else {
|
|
332 // construct empty ExtendedPC for return value checking
|
|
333 epc = ExtendedPC(NULL);
|
|
334 if (ret_sp) *ret_sp = (intptr_t *)NULL;
|
|
335 if (ret_fp) *ret_fp = (intptr_t *)NULL;
|
|
336 }
|
|
337
|
|
338 return epc;
|
|
339 }
|
|
340
|
|
341 frame os::fetch_frame_from_context(void* ucVoid) {
|
|
342 intptr_t* sp;
|
|
343 intptr_t* fp;
|
|
344 ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
|
|
345 return frame(sp, fp, epc.pc());
|
|
346 }
|
|
347
|
|
348 // VC++ does not save frame pointer on stack in optimized build. It
|
|
349 // can be turned off by /Oy-. If we really want to walk C frames,
|
|
350 // we can use the StackWalk() API.
|
|
351 frame os::get_sender_for_C_frame(frame* fr) {
|
|
352 return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
|
|
353 }
|
|
354
|
|
355
|
|
356 #ifndef AMD64
|
|
357 intptr_t* _get_previous_fp() {
|
|
358 intptr_t **frameptr;
|
|
359 __asm {
|
|
360 mov frameptr, ebp
|
|
361 };
|
|
362 return *frameptr;
|
|
363 }
|
|
364 #endif // !AMD64
|
|
365
|
|
366 frame os::current_frame() {
|
|
367
|
|
368 #ifdef AMD64
|
|
369 // apparently _asm not supported on windows amd64
|
|
370 typedef intptr_t* get_fp_func ();
|
|
371 get_fp_func* func = CAST_TO_FN_PTR(get_fp_func*,
|
304
|
372 StubRoutines::x86::get_previous_fp_entry());
|
0
|
373 if (func == NULL) return frame(NULL, NULL, NULL);
|
|
374 intptr_t* fp = (*func)();
|
|
375 #else
|
|
376 intptr_t* fp = _get_previous_fp();
|
|
377 #endif // AMD64
|
|
378
|
|
379 frame myframe((intptr_t*)os::current_stack_pointer(),
|
|
380 (intptr_t*)fp,
|
|
381 CAST_FROM_FN_PTR(address, os::current_frame));
|
|
382 if (os::is_first_C_frame(&myframe)) {
|
|
383 // stack is not walkable
|
|
384 return frame(NULL, NULL, NULL);
|
|
385 } else {
|
|
386 return os::get_sender_for_C_frame(&myframe);
|
|
387 }
|
|
388 }
|
|
389
|
|
390 void os::print_context(outputStream *st, void *context) {
|
|
391 if (context == NULL) return;
|
|
392
|
|
393 CONTEXT* uc = (CONTEXT*)context;
|
|
394
|
|
395 st->print_cr("Registers:");
|
|
396 #ifdef AMD64
|
|
397 st->print( "EAX=" INTPTR_FORMAT, uc->Rax);
|
|
398 st->print(", EBX=" INTPTR_FORMAT, uc->Rbx);
|
|
399 st->print(", ECX=" INTPTR_FORMAT, uc->Rcx);
|
|
400 st->print(", EDX=" INTPTR_FORMAT, uc->Rdx);
|
|
401 st->cr();
|
|
402 st->print( "ESP=" INTPTR_FORMAT, uc->Rsp);
|
|
403 st->print(", EBP=" INTPTR_FORMAT, uc->Rbp);
|
|
404 st->print(", ESI=" INTPTR_FORMAT, uc->Rsi);
|
|
405 st->print(", EDI=" INTPTR_FORMAT, uc->Rdi);
|
|
406 st->cr();
|
|
407 st->print( "EIP=" INTPTR_FORMAT, uc->Rip);
|
|
408 st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
|
|
409 #else
|
|
410 st->print( "EAX=" INTPTR_FORMAT, uc->Eax);
|
|
411 st->print(", EBX=" INTPTR_FORMAT, uc->Ebx);
|
|
412 st->print(", ECX=" INTPTR_FORMAT, uc->Ecx);
|
|
413 st->print(", EDX=" INTPTR_FORMAT, uc->Edx);
|
|
414 st->cr();
|
|
415 st->print( "ESP=" INTPTR_FORMAT, uc->Esp);
|
|
416 st->print(", EBP=" INTPTR_FORMAT, uc->Ebp);
|
|
417 st->print(", ESI=" INTPTR_FORMAT, uc->Esi);
|
|
418 st->print(", EDI=" INTPTR_FORMAT, uc->Edi);
|
|
419 st->cr();
|
|
420 st->print( "EIP=" INTPTR_FORMAT, uc->Eip);
|
|
421 st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
|
|
422 #endif // AMD64
|
|
423 st->cr();
|
|
424 st->cr();
|
|
425
|
|
426 intptr_t *sp = (intptr_t *)uc->REG_SP;
|
|
427 st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
|
|
428 print_hex_dump(st, (address)sp, (address)(sp + 32), sizeof(intptr_t));
|
|
429 st->cr();
|
|
430
|
|
431 // Note: it may be unsafe to inspect memory near pc. For example, pc may
|
|
432 // point to garbage if entry point in an nmethod is corrupted. Leave
|
|
433 // this at the end, and hope for the best.
|
|
434 address pc = (address)uc->REG_PC;
|
|
435 st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
|
|
436 print_hex_dump(st, pc - 16, pc + 16, sizeof(char));
|
|
437 st->cr();
|
|
438 }
|
|
439
|
|
440 extern "C" int SafeFetch32 (int * adr, int Err) {
|
|
441 int rv = Err ;
|
|
442 _try {
|
|
443 rv = *((volatile int *) adr) ;
|
|
444 } __except(EXCEPTION_EXECUTE_HANDLER) {
|
|
445 }
|
|
446 return rv ;
|
|
447 }
|
|
448
|
|
449 extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t Err) {
|
|
450 intptr_t rv = Err ;
|
|
451 _try {
|
|
452 rv = *((volatile intptr_t *) adr) ;
|
|
453 } __except(EXCEPTION_EXECUTE_HANDLER) {
|
|
454 }
|
|
455 return rv ;
|
|
456 }
|
|
457
|
|
458 extern "C" int SpinPause () {
|
|
459 #ifdef AMD64
|
|
460 return 0 ;
|
|
461 #else
|
|
462 // pause == rep:nop
|
|
463 // On systems that don't support pause a rep:nop
|
|
464 // is executed as a nop. The rep: prefix is ignored.
|
|
465 _asm {
|
|
466 pause ;
|
|
467 };
|
|
468 return 1 ;
|
|
469 #endif // AMD64
|
|
470 }
|
|
471
|
|
472
|
|
473 void os::setup_fpu() {
|
|
474 #ifndef AMD64
|
|
475 int fpu_cntrl_word = StubRoutines::fpu_cntrl_wrd_std();
|
|
476 __asm fldcw fpu_cntrl_word;
|
|
477 #endif // !AMD64
|
|
478 }
|