comparison src/os_cpu/linux_x86/vm/os_linux_x86.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children e195fe4c40c7 485d403e94e1
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 // do not include precompiled header file
26 # include "incls/_os_linux_x86.cpp.incl"
27
28 // put OS-includes here
29 # include <sys/types.h>
30 # include <sys/mman.h>
31 # include <pthread.h>
32 # include <signal.h>
33 # include <errno.h>
34 # include <dlfcn.h>
35 # include <stdlib.h>
36 # include <stdio.h>
37 # include <unistd.h>
38 # include <sys/resource.h>
39 # include <pthread.h>
40 # include <sys/stat.h>
41 # include <sys/time.h>
42 # include <sys/utsname.h>
43 # include <sys/socket.h>
44 # include <sys/wait.h>
45 # include <pwd.h>
46 # include <poll.h>
47 # include <ucontext.h>
48 # include <fpu_control.h>
49
50 #ifdef AMD64
51 #define REG_SP REG_RSP
52 #define REG_PC REG_RIP
53 #define REG_FP REG_RBP
54 #define SPELL_REG_SP "rsp"
55 #define SPELL_REG_FP "rbp"
56 #else
57 #define REG_SP REG_UESP
58 #define REG_PC REG_EIP
59 #define REG_FP REG_EBP
60 #define SPELL_REG_SP "esp"
61 #define SPELL_REG_FP "ebp"
62 #endif // AMD64
63
64 address os::current_stack_pointer() {
65 register void *esp __asm__ (SPELL_REG_SP);
66 return (address) esp;
67 }
68
69 char* os::non_memory_address_word() {
70 // Must never look like an address returned by reserve_memory,
71 // even in its subfields (as defined by the CPU immediate fields,
72 // if the CPU splits constants across multiple instructions).
73
74 return (char*) -1;
75 }
76
77 void os::initialize_thread() {
78 // Nothing to do.
79 }
80
81 address os::Linux::ucontext_get_pc(ucontext_t * uc) {
82 return (address)uc->uc_mcontext.gregs[REG_PC];
83 }
84
85 intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) {
86 return (intptr_t*)uc->uc_mcontext.gregs[REG_SP];
87 }
88
89 intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) {
90 return (intptr_t*)uc->uc_mcontext.gregs[REG_FP];
91 }
92
93 // For Forte Analyzer AsyncGetCallTrace profiling support - thread
94 // is currently interrupted by SIGPROF.
95 // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal
96 // frames. Currently we don't do that on Linux, so it's the same as
97 // os::fetch_frame_from_context().
98 ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread,
99 ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
100
101 assert(thread != NULL, "just checking");
102 assert(ret_sp != NULL, "just checking");
103 assert(ret_fp != NULL, "just checking");
104
105 return os::fetch_frame_from_context(uc, ret_sp, ret_fp);
106 }
107
108 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
109 intptr_t** ret_sp, intptr_t** ret_fp) {
110
111 ExtendedPC epc;
112 ucontext_t* uc = (ucontext_t*)ucVoid;
113
114 if (uc != NULL) {
115 epc = ExtendedPC(os::Linux::ucontext_get_pc(uc));
116 if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc);
117 if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc);
118 } else {
119 // construct empty ExtendedPC for return value checking
120 epc = ExtendedPC(NULL);
121 if (ret_sp) *ret_sp = (intptr_t *)NULL;
122 if (ret_fp) *ret_fp = (intptr_t *)NULL;
123 }
124
125 return epc;
126 }
127
128 frame os::fetch_frame_from_context(void* ucVoid) {
129 intptr_t* sp;
130 intptr_t* fp;
131 ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
132 return frame(sp, fp, epc.pc());
133 }
134
135 // By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get
136 // turned off by -fomit-frame-pointer,
137 frame os::get_sender_for_C_frame(frame* fr) {
138 return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
139 }
140
141 intptr_t* _get_previous_fp() {
142 register intptr_t **ebp __asm__ (SPELL_REG_FP);
143 return (intptr_t*) *ebp; // we want what it points to.
144 }
145
146
147 frame os::current_frame() {
148 intptr_t* fp = _get_previous_fp();
149 frame myframe((intptr_t*)os::current_stack_pointer(),
150 (intptr_t*)fp,
151 CAST_FROM_FN_PTR(address, os::current_frame));
152 if (os::is_first_C_frame(&myframe)) {
153 // stack is not walkable
154 return frame(NULL, NULL, NULL);
155 } else {
156 return os::get_sender_for_C_frame(&myframe);
157 }
158 }
159
160
161 // Utility functions
162
163 julong os::allocatable_physical_memory(julong size) {
164 #ifdef AMD64
165 return size;
166 #else
167 julong result = MIN2(size, (julong)3800*M);
168 if (!is_allocatable(result)) {
169 // See comments under solaris for alignment considerations
170 julong reasonable_size = (julong)2*G - 2 * os::vm_page_size();
171 result = MIN2(size, reasonable_size);
172 }
173 return result;
174 #endif // AMD64
175 }
176
177 // From IA32 System Programming Guide
178 enum {
179 trap_page_fault = 0xE
180 };
181
182 extern "C" void Fetch32PFI () ;
183 extern "C" void Fetch32Resume () ;
184 #ifdef AMD64
185 extern "C" void FetchNPFI () ;
186 extern "C" void FetchNResume () ;
187 #endif // AMD64
188
189 extern "C" int
190 JVM_handle_linux_signal(int sig,
191 siginfo_t* info,
192 void* ucVoid,
193 int abort_if_unrecognized) {
194 ucontext_t* uc = (ucontext_t*) ucVoid;
195
196 Thread* t = ThreadLocalStorage::get_thread_slow();
197
198 SignalHandlerMark shm(t);
199
200 // Note: it's not uncommon that JNI code uses signal/sigset to install
201 // then restore certain signal handler (e.g. to temporarily block SIGPIPE,
202 // or have a SIGILL handler when detecting CPU type). When that happens,
203 // JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To
204 // avoid unnecessary crash when libjsig is not preloaded, try handle signals
205 // that do not require siginfo/ucontext first.
206
207 if (sig == SIGPIPE || sig == SIGXFSZ) {
208 // allow chained handler to go first
209 if (os::Linux::chained_handler(sig, info, ucVoid)) {
210 return true;
211 } else {
212 if (PrintMiscellaneous && (WizardMode || Verbose)) {
213 char buf[64];
214 warning("Ignoring %s - see bugs 4229104 or 646499219",
215 os::exception_name(sig, buf, sizeof(buf)));
216 }
217 return true;
218 }
219 }
220
221 JavaThread* thread = NULL;
222 VMThread* vmthread = NULL;
223 if (os::Linux::signal_handlers_are_installed) {
224 if (t != NULL ){
225 if(t->is_Java_thread()) {
226 thread = (JavaThread*)t;
227 }
228 else if(t->is_VM_thread()){
229 vmthread = (VMThread *)t;
230 }
231 }
232 }
233 /*
234 NOTE: does not seem to work on linux.
235 if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) {
236 // can't decode this kind of signal
237 info = NULL;
238 } else {
239 assert(sig == info->si_signo, "bad siginfo");
240 }
241 */
242 // decide if this trap can be handled by a stub
243 address stub = NULL;
244
245 address pc = NULL;
246
247 //%note os_trap_1
248 if (info != NULL && uc != NULL && thread != NULL) {
249 pc = (address) os::Linux::ucontext_get_pc(uc);
250
251 if (pc == (address) Fetch32PFI) {
252 uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ;
253 return 1 ;
254 }
255 #ifdef AMD64
256 if (pc == (address) FetchNPFI) {
257 uc->uc_mcontext.gregs[REG_PC] = intptr_t (FetchNResume) ;
258 return 1 ;
259 }
260 #endif // AMD64
261
262 // Handle ALL stack overflow variations here
263 if (sig == SIGSEGV) {
264 address addr = (address) info->si_addr;
265
266 // check if fault address is within thread stack
267 if (addr < thread->stack_base() &&
268 addr >= thread->stack_base() - thread->stack_size()) {
269 // stack overflow
270 if (thread->in_stack_yellow_zone(addr)) {
271 thread->disable_stack_yellow_zone();
272 if (thread->thread_state() == _thread_in_Java) {
273 // Throw a stack overflow exception. Guard pages will be reenabled
274 // while unwinding the stack.
275 stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
276 } else {
277 // Thread was in the vm or native code. Return and try to finish.
278 return 1;
279 }
280 } else if (thread->in_stack_red_zone(addr)) {
281 // Fatal red zone violation. Disable the guard pages and fall through
282 // to handle_unexpected_exception way down below.
283 thread->disable_stack_red_zone();
284 tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
285 } else {
286 // Accessing stack address below sp may cause SEGV if current
287 // thread has MAP_GROWSDOWN stack. This should only happen when
288 // current thread was created by user code with MAP_GROWSDOWN flag
289 // and then attached to VM. See notes in os_linux.cpp.
290 if (thread->osthread()->expanding_stack() == 0) {
291 thread->osthread()->set_expanding_stack();
292 if (os::Linux::manually_expand_stack(thread, addr)) {
293 thread->osthread()->clear_expanding_stack();
294 return 1;
295 }
296 thread->osthread()->clear_expanding_stack();
297 } else {
298 fatal("recursive segv. expanding stack.");
299 }
300 }
301 }
302 }
303
304 if (thread->thread_state() == _thread_in_Java) {
305 // Java thread running in Java code => find exception handler if any
306 // a fault inside compiled code, the interpreter, or a stub
307
308 if (sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) {
309 stub = SharedRuntime::get_poll_stub(pc);
310 } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
311 // BugId 4454115: A read from a MappedByteBuffer can fault
312 // here if the underlying file has been truncated.
313 // Do not crash the VM in such a case.
314 CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
315 nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
316 if (nm != NULL && nm->has_unsafe_access()) {
317 stub = StubRoutines::handler_for_unsafe_access();
318 }
319 }
320 else
321
322 #ifdef AMD64
323 if (sig == SIGFPE &&
324 (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
325 stub =
326 SharedRuntime::
327 continuation_for_implicit_exception(thread,
328 pc,
329 SharedRuntime::
330 IMPLICIT_DIVIDE_BY_ZERO);
331 #else
332 if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) {
333 // HACK: si_code does not work on linux 2.2.12-20!!!
334 int op = pc[0];
335 if (op == 0xDB) {
336 // FIST
337 // TODO: The encoding of D2I in i486.ad can cause an exception
338 // prior to the fist instruction if there was an invalid operation
339 // pending. We want to dismiss that exception. From the win_32
340 // side it also seems that if it really was the fist causing
341 // the exception that we do the d2i by hand with different
342 // rounding. Seems kind of weird.
343 // NOTE: that we take the exception at the NEXT floating point instruction.
344 assert(pc[0] == 0xDB, "not a FIST opcode");
345 assert(pc[1] == 0x14, "not a FIST opcode");
346 assert(pc[2] == 0x24, "not a FIST opcode");
347 return true;
348 } else if (op == 0xF7) {
349 // IDIV
350 stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
351 } else {
352 // TODO: handle more cases if we are using other x86 instructions
353 // that can generate SIGFPE signal on linux.
354 tty->print_cr("unknown opcode 0x%X with SIGFPE.", op);
355 fatal("please update this code.");
356 }
357 #endif // AMD64
358 } else if (sig == SIGSEGV &&
359 !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) {
360 // Determination of interpreter/vtable stub/compiled code null exception
361 stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
362 }
363 } else if (thread->thread_state() == _thread_in_vm &&
364 sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
365 thread->doing_unsafe_access()) {
366 stub = StubRoutines::handler_for_unsafe_access();
367 }
368
369 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
370 // and the heap gets shrunk before the field access.
371 if ((sig == SIGSEGV) || (sig == SIGBUS)) {
372 address addr = JNI_FastGetField::find_slowcase_pc(pc);
373 if (addr != (address)-1) {
374 stub = addr;
375 }
376 }
377
378 // Check to see if we caught the safepoint code in the
379 // process of write protecting the memory serialization page.
380 // It write enables the page immediately after protecting it
381 // so we can just return to retry the write.
382 if ((sig == SIGSEGV) &&
383 os::is_memory_serialize_page(thread, (address) info->si_addr)) {
384 // Block current thread until the memory serialize page permission restored.
385 os::block_on_serialize_page_trap();
386 return true;
387 }
388 }
389
390 #ifndef AMD64
391 // Execution protection violation
392 //
393 // This should be kept as the last step in the triage. We don't
394 // have a dedicated trap number for a no-execute fault, so be
395 // conservative and allow other handlers the first shot.
396 //
397 // Note: We don't test that info->si_code == SEGV_ACCERR here.
398 // this si_code is so generic that it is almost meaningless; and
399 // the si_code for this condition may change in the future.
400 // Furthermore, a false-positive should be harmless.
401 if (UnguardOnExecutionViolation > 0 &&
402 (sig == SIGSEGV || sig == SIGBUS) &&
403 uc->uc_mcontext.gregs[REG_TRAPNO] == trap_page_fault) {
404 int page_size = os::vm_page_size();
405 address addr = (address) info->si_addr;
406 address pc = os::Linux::ucontext_get_pc(uc);
407 // Make sure the pc and the faulting address are sane.
408 //
409 // If an instruction spans a page boundary, and the page containing
410 // the beginning of the instruction is executable but the following
411 // page is not, the pc and the faulting address might be slightly
412 // different - we still want to unguard the 2nd page in this case.
413 //
414 // 15 bytes seems to be a (very) safe value for max instruction size.
415 bool pc_is_near_addr =
416 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
417 bool instr_spans_page_boundary =
418 (align_size_down((intptr_t) pc ^ (intptr_t) addr,
419 (intptr_t) page_size) > 0);
420
421 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
422 static volatile address last_addr =
423 (address) os::non_memory_address_word();
424
425 // In conservative mode, don't unguard unless the address is in the VM
426 if (addr != last_addr &&
427 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
428
429 // Unguard and retry
430 address page_start =
431 (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
432 bool res = os::unguard_memory((char*) page_start, page_size);
433
434 if (PrintMiscellaneous && Verbose) {
435 char buf[256];
436 jio_snprintf(buf, sizeof(buf), "Execution protection violation "
437 "at " INTPTR_FORMAT
438 ", unguarding " INTPTR_FORMAT ": %s, errno=%d", addr,
439 page_start, (res ? "success" : "failed"), errno);
440 tty->print_raw_cr(buf);
441 }
442 stub = pc;
443
444 // Set last_addr so if we fault again at the same address, we don't end
445 // up in an endless loop.
446 //
447 // There are two potential complications here. Two threads trapping at
448 // the same address at the same time could cause one of the threads to
449 // think it already unguarded, and abort the VM. Likely very rare.
450 //
451 // The other race involves two threads alternately trapping at
452 // different addresses and failing to unguard the page, resulting in
453 // an endless loop. This condition is probably even more unlikely than
454 // the first.
455 //
456 // Although both cases could be avoided by using locks or thread local
457 // last_addr, these solutions are unnecessary complication: this
458 // handler is a best-effort safety net, not a complete solution. It is
459 // disabled by default and should only be used as a workaround in case
460 // we missed any no-execute-unsafe VM code.
461
462 last_addr = addr;
463 }
464 }
465 }
466 #endif // !AMD64
467
468 if (stub != NULL) {
469 // save all thread context in case we need to restore it
470 if (thread != NULL) thread->set_saved_exception_pc(pc);
471
472 uc->uc_mcontext.gregs[REG_PC] = (greg_t)stub;
473 return true;
474 }
475
476 // signal-chaining
477 if (os::Linux::chained_handler(sig, info, ucVoid)) {
478 return true;
479 }
480
481 if (!abort_if_unrecognized) {
482 // caller wants another chance, so give it to him
483 return false;
484 }
485
486 if (pc == NULL && uc != NULL) {
487 pc = os::Linux::ucontext_get_pc(uc);
488 }
489
490 // unmask current signal
491 sigset_t newset;
492 sigemptyset(&newset);
493 sigaddset(&newset, sig);
494 sigprocmask(SIG_UNBLOCK, &newset, NULL);
495
496 VMError err(t, sig, pc, info, ucVoid);
497 err.report_and_die();
498
499 ShouldNotReachHere();
500 }
501
502 void os::Linux::init_thread_fpu_state(void) {
503 #ifndef AMD64
504 // set fpu to 53 bit precision
505 set_fpu_control_word(0x27f);
506 #endif // !AMD64
507 }
508
509 int os::Linux::get_fpu_control_word(void) {
510 #ifdef AMD64
511 return 0;
512 #else
513 int fpu_control;
514 _FPU_GETCW(fpu_control);
515 return fpu_control & 0xffff;
516 #endif // AMD64
517 }
518
519 void os::Linux::set_fpu_control_word(int fpu_control) {
520 #ifndef AMD64
521 _FPU_SETCW(fpu_control);
522 #endif // !AMD64
523 }
524
525 // Check that the linux kernel version is 2.4 or higher since earlier
526 // versions do not support SSE without patches.
527 bool os::supports_sse() {
528 #ifdef AMD64
529 return true;
530 #else
531 struct utsname uts;
532 if( uname(&uts) != 0 ) return false; // uname fails?
533 char *minor_string;
534 int major = strtol(uts.release,&minor_string,10);
535 int minor = strtol(minor_string+1,NULL,10);
536 bool result = (major > 2 || (major==2 && minor >= 4));
537 #ifndef PRODUCT
538 if (PrintMiscellaneous && Verbose) {
539 tty->print("OS version is %d.%d, which %s support SSE/SSE2\n",
540 major,minor, result ? "DOES" : "does NOT");
541 }
542 #endif
543 return result;
544 #endif // AMD64
545 }
546
547 bool os::is_allocatable(size_t bytes) {
548 #ifdef AMD64
549 // unused on amd64?
550 return true;
551 #else
552
553 if (bytes < 2 * G) {
554 return true;
555 }
556
557 char* addr = reserve_memory(bytes, NULL);
558
559 if (addr != NULL) {
560 release_memory(addr, bytes);
561 }
562
563 return addr != NULL;
564 #endif // AMD64
565 }
566
567 ////////////////////////////////////////////////////////////////////////////////
568 // thread stack
569
570 #ifdef AMD64
571 size_t os::Linux::min_stack_allowed = 64 * K;
572
573 // amd64: pthread on amd64 is always in floating stack mode
574 bool os::Linux::supports_variable_stack_size() { return true; }
575 #else
576 size_t os::Linux::min_stack_allowed = (48 DEBUG_ONLY(+4))*K;
577
578 #define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;})
579
580 // Test if pthread library can support variable thread stack size. LinuxThreads
581 // in fixed stack mode allocates 2M fixed slot for each thread. LinuxThreads
582 // in floating stack mode and NPTL support variable stack size.
583 bool os::Linux::supports_variable_stack_size() {
584 if (os::Linux::is_NPTL()) {
585 // NPTL, yes
586 return true;
587
588 } else {
589 // Note: We can't control default stack size when creating a thread.
590 // If we use non-default stack size (pthread_attr_setstacksize), both
591 // floating stack and non-floating stack LinuxThreads will return the
592 // same value. This makes it impossible to implement this function by
593 // detecting thread stack size directly.
594 //
595 // An alternative approach is to check %gs. Fixed-stack LinuxThreads
596 // do not use %gs, so its value is 0. Floating-stack LinuxThreads use
597 // %gs (either as LDT selector or GDT selector, depending on kernel)
598 // to access thread specific data.
599 //
600 // Note that %gs is a reserved glibc register since early 2001, so
601 // applications are not allowed to change its value (Ulrich Drepper from
602 // Redhat confirmed that all known offenders have been modified to use
603 // either %fs or TSD). In the worst case scenario, when VM is embedded in
604 // a native application that plays with %gs, we might see non-zero %gs
605 // even LinuxThreads is running in fixed stack mode. As the result, we'll
606 // return true and skip _thread_safety_check(), so we may not be able to
607 // detect stack-heap collisions. But otherwise it's harmless.
608 //
609 return (GET_GS() != 0);
610 }
611 }
612 #endif // AMD64
613
614 // return default stack size for thr_type
615 size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
616 // default stack size (compiler thread needs larger stack)
617 #ifdef AMD64
618 size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
619 #else
620 size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
621 #endif // AMD64
622 return s;
623 }
624
625 size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
626 // Creating guard page is very expensive. Java thread has HotSpot
627 // guard page, only enable glibc guard page for non-Java threads.
628 return (thr_type == java_thread ? 0 : page_size());
629 }
630
631 // Java thread:
632 //
633 // Low memory addresses
634 // +------------------------+
635 // | |\ JavaThread created by VM does not have glibc
636 // | glibc guard page | - guard, attached Java thread usually has
637 // | |/ 1 page glibc guard.
638 // P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
639 // | |\
640 // | HotSpot Guard Pages | - red and yellow pages
641 // | |/
642 // +------------------------+ JavaThread::stack_yellow_zone_base()
643 // | |\
644 // | Normal Stack | -
645 // | |/
646 // P2 +------------------------+ Thread::stack_base()
647 //
648 // Non-Java thread:
649 //
650 // Low memory addresses
651 // +------------------------+
652 // | |\
653 // | glibc guard page | - usually 1 page
654 // | |/
655 // P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
656 // | |\
657 // | Normal Stack | -
658 // | |/
659 // P2 +------------------------+ Thread::stack_base()
660 //
661 // ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from
662 // pthread_attr_getstack()
663
664 static void current_stack_region(address * bottom, size_t * size) {
665 if (os::Linux::is_initial_thread()) {
666 // initial thread needs special handling because pthread_getattr_np()
667 // may return bogus value.
668 *bottom = os::Linux::initial_thread_stack_bottom();
669 *size = os::Linux::initial_thread_stack_size();
670 } else {
671 pthread_attr_t attr;
672
673 int rslt = pthread_getattr_np(pthread_self(), &attr);
674
675 // JVM needs to know exact stack location, abort if it fails
676 if (rslt != 0) {
677 if (rslt == ENOMEM) {
678 vm_exit_out_of_memory(0, "pthread_getattr_np");
679 } else {
680 fatal1("pthread_getattr_np failed with errno = %d", rslt);
681 }
682 }
683
684 if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) {
685 fatal("Can not locate current stack attributes!");
686 }
687
688 pthread_attr_destroy(&attr);
689
690 }
691 assert(os::current_stack_pointer() >= *bottom &&
692 os::current_stack_pointer() < *bottom + *size, "just checking");
693 }
694
695 address os::current_stack_base() {
696 address bottom;
697 size_t size;
698 current_stack_region(&bottom, &size);
699 return (bottom + size);
700 }
701
702 size_t os::current_stack_size() {
703 // stack size includes normal stack and HotSpot guard pages
704 address bottom;
705 size_t size;
706 current_stack_region(&bottom, &size);
707 return size;
708 }
709
710 /////////////////////////////////////////////////////////////////////////////
711 // helper functions for fatal error handler
712
713 void os::print_context(outputStream *st, void *context) {
714 if (context == NULL) return;
715
716 ucontext_t *uc = (ucontext_t*)context;
717 st->print_cr("Registers:");
718 #ifdef AMD64
719 st->print( "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]);
720 st->print(", RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]);
721 st->print(", RCX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RCX]);
722 st->print(", RDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDX]);
723 st->cr();
724 st->print( "RSP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSP]);
725 st->print(", RBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBP]);
726 st->print(", RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]);
727 st->print(", RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]);
728 st->cr();
729 st->print( "R8 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]);
730 st->print(", R9 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]);
731 st->print(", R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]);
732 st->print(", R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]);
733 st->cr();
734 st->print( "R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]);
735 st->print(", R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]);
736 st->print(", R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]);
737 st->print(", R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]);
738 st->cr();
739 st->print( "RIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RIP]);
740 st->print(", EFL=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]);
741 st->print(", CSGSFS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_CSGSFS]);
742 st->print(", ERR=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ERR]);
743 st->cr();
744 st->print(" TRAPNO=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_TRAPNO]);
745 #else
746 st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EAX]);
747 st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBX]);
748 st->print(", ECX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ECX]);
749 st->print(", EDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDX]);
750 st->cr();
751 st->print( "ESP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_UESP]);
752 st->print(", EBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBP]);
753 st->print(", ESI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ESI]);
754 st->print(", EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDI]);
755 st->cr();
756 st->print( "EIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EIP]);
757 st->print(", CR2=" INTPTR_FORMAT, uc->uc_mcontext.cr2);
758 st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]);
759 #endif // AMD64
760 st->cr();
761 st->cr();
762
763 intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
764 st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
765 print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t));
766 st->cr();
767
768 // Note: it may be unsafe to inspect memory near pc. For example, pc may
769 // point to garbage if entry point in an nmethod is corrupted. Leave
770 // this at the end, and hope for the best.
771 address pc = os::Linux::ucontext_get_pc(uc);
772 st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
773 print_hex_dump(st, pc - 16, pc + 16, sizeof(char));
774 }
775
776 void os::setup_fpu() {
777 #ifndef AMD64
778 address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std();
779 __asm__ volatile ( "fldcw (%0)" :
780 : "r" (fpu_cntrl) : "memory");
781 #endif // !AMD64
782 }