comparison src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp @ 3960:f08d439fab8c

7089790: integrate bsd-port changes Reviewed-by: kvn, twisti, jrose Contributed-by: Kurt Miller <kurt@intricatesoftware.com>, Greg Lewis <glewis@eyesbeyond.com>, Jung-uk Kim <jkim@freebsd.org>, Christos Zoulas <christos@zoulas.com>, Landon Fuller <landonf@plausible.coop>, The FreeBSD Foundation <board@freebsdfoundation.org>, Michael Franz <mvfranz@gmail.com>, Roger Hoover <rhoover@apple.com>, Alexander Strange <astrange@apple.com>
author never
date Sun, 25 Sep 2011 16:03:29 -0700
parents
children 436b4a3231bf
comparison
equal deleted inserted replaced
3959:eda6988c0d81 3960:f08d439fab8c
1 /*
2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 // no precompiled headers
26 #include "assembler_x86.inline.hpp"
27 #include "classfile/classLoader.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/vtableStubs.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "jvm_bsd.h"
34 #include "memory/allocation.inline.hpp"
35 #include "mutex_bsd.inline.hpp"
36 #include "nativeInst_x86.hpp"
37 #include "os_share_bsd.hpp"
38 #include "prims/jniFastGetField.hpp"
39 #include "prims/jvm.h"
40 #include "prims/jvm_misc.hpp"
41 #include "runtime/arguments.hpp"
42 #include "runtime/extendedPC.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/interfaceSupport.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/javaCalls.hpp"
47 #include "runtime/mutexLocker.hpp"
48 #include "runtime/osThread.hpp"
49 #include "runtime/sharedRuntime.hpp"
50 #include "runtime/stubRoutines.hpp"
51 #include "runtime/timer.hpp"
52 #include "thread_bsd.inline.hpp"
53 #include "utilities/events.hpp"
54 #include "utilities/vmError.hpp"
55 #ifdef COMPILER1
56 #include "c1/c1_Runtime1.hpp"
57 #endif
58 #ifdef COMPILER2
59 #include "opto/runtime.hpp"
60 #endif
61
62 // put OS-includes here
63 # include <sys/types.h>
64 # include <sys/mman.h>
65 # include <pthread.h>
66 # include <signal.h>
67 # include <errno.h>
68 # include <dlfcn.h>
69 # include <stdlib.h>
70 # include <stdio.h>
71 # include <unistd.h>
72 # include <sys/resource.h>
73 # include <pthread.h>
74 # include <sys/stat.h>
75 # include <sys/time.h>
76 # include <sys/utsname.h>
77 # include <sys/socket.h>
78 # include <sys/wait.h>
79 # include <pwd.h>
80 # include <poll.h>
81 #ifndef __OpenBSD__
82 # include <ucontext.h>
83 #endif
84
85 #if defined(_ALLBSD_SOURCE) && !defined(__APPLE__) && !defined(__NetBSD__)
86 # include <pthread_np.h>
87 #endif
88
89 #ifdef AMD64
90 #define SPELL_REG_SP "rsp"
91 #define SPELL_REG_FP "rbp"
92 #else
93 #define SPELL_REG_SP "esp"
94 #define SPELL_REG_FP "ebp"
95 #endif // AMD64
96
97 #ifdef __FreeBSD__
98 # define context_trapno uc_mcontext.mc_trapno
99 # ifdef AMD64
100 # define context_pc uc_mcontext.mc_rip
101 # define context_sp uc_mcontext.mc_rsp
102 # define context_fp uc_mcontext.mc_rbp
103 # define context_rip uc_mcontext.mc_rip
104 # define context_rsp uc_mcontext.mc_rsp
105 # define context_rbp uc_mcontext.mc_rbp
106 # define context_rax uc_mcontext.mc_rax
107 # define context_rbx uc_mcontext.mc_rbx
108 # define context_rcx uc_mcontext.mc_rcx
109 # define context_rdx uc_mcontext.mc_rdx
110 # define context_rsi uc_mcontext.mc_rsi
111 # define context_rdi uc_mcontext.mc_rdi
112 # define context_r8 uc_mcontext.mc_r8
113 # define context_r9 uc_mcontext.mc_r9
114 # define context_r10 uc_mcontext.mc_r10
115 # define context_r11 uc_mcontext.mc_r11
116 # define context_r12 uc_mcontext.mc_r12
117 # define context_r13 uc_mcontext.mc_r13
118 # define context_r14 uc_mcontext.mc_r14
119 # define context_r15 uc_mcontext.mc_r15
120 # define context_flags uc_mcontext.mc_flags
121 # define context_err uc_mcontext.mc_err
122 # else
123 # define context_pc uc_mcontext.mc_eip
124 # define context_sp uc_mcontext.mc_esp
125 # define context_fp uc_mcontext.mc_ebp
126 # define context_eip uc_mcontext.mc_eip
127 # define context_esp uc_mcontext.mc_esp
128 # define context_eax uc_mcontext.mc_eax
129 # define context_ebx uc_mcontext.mc_ebx
130 # define context_ecx uc_mcontext.mc_ecx
131 # define context_edx uc_mcontext.mc_edx
132 # define context_ebp uc_mcontext.mc_ebp
133 # define context_esi uc_mcontext.mc_esi
134 # define context_edi uc_mcontext.mc_edi
135 # define context_eflags uc_mcontext.mc_eflags
136 # define context_trapno uc_mcontext.mc_trapno
137 # endif
138 #endif
139
140 #ifdef __APPLE__
141 # if __DARWIN_UNIX03 && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_5)
142 // 10.5 UNIX03 member name prefixes
143 #define DU3_PREFIX(s, m) __ ## s.__ ## m
144 # else
145 #define DU3_PREFIX(s, m) s ## . ## m
146 # endif
147
148 # ifdef AMD64
149 # define context_pc context_rip
150 # define context_sp context_rsp
151 # define context_fp context_rbp
152 # define context_rip uc_mcontext->DU3_PREFIX(ss,rip)
153 # define context_rsp uc_mcontext->DU3_PREFIX(ss,rsp)
154 # define context_rax uc_mcontext->DU3_PREFIX(ss,rax)
155 # define context_rbx uc_mcontext->DU3_PREFIX(ss,rbx)
156 # define context_rcx uc_mcontext->DU3_PREFIX(ss,rcx)
157 # define context_rdx uc_mcontext->DU3_PREFIX(ss,rdx)
158 # define context_rbp uc_mcontext->DU3_PREFIX(ss,rbp)
159 # define context_rsi uc_mcontext->DU3_PREFIX(ss,rsi)
160 # define context_rdi uc_mcontext->DU3_PREFIX(ss,rdi)
161 # define context_r8 uc_mcontext->DU3_PREFIX(ss,r8)
162 # define context_r9 uc_mcontext->DU3_PREFIX(ss,r9)
163 # define context_r10 uc_mcontext->DU3_PREFIX(ss,r10)
164 # define context_r11 uc_mcontext->DU3_PREFIX(ss,r11)
165 # define context_r12 uc_mcontext->DU3_PREFIX(ss,r12)
166 # define context_r13 uc_mcontext->DU3_PREFIX(ss,r13)
167 # define context_r14 uc_mcontext->DU3_PREFIX(ss,r14)
168 # define context_r15 uc_mcontext->DU3_PREFIX(ss,r15)
169 # define context_flags uc_mcontext->DU3_PREFIX(ss,rflags)
170 # define context_trapno uc_mcontext->DU3_PREFIX(es,trapno)
171 # define context_err uc_mcontext->DU3_PREFIX(es,err)
172 # else
173 # define context_pc context_eip
174 # define context_sp context_esp
175 # define context_fp context_ebp
176 # define context_eip uc_mcontext->DU3_PREFIX(ss,eip)
177 # define context_esp uc_mcontext->DU3_PREFIX(ss,esp)
178 # define context_eax uc_mcontext->DU3_PREFIX(ss,eax)
179 # define context_ebx uc_mcontext->DU3_PREFIX(ss,ebx)
180 # define context_ecx uc_mcontext->DU3_PREFIX(ss,ecx)
181 # define context_edx uc_mcontext->DU3_PREFIX(ss,edx)
182 # define context_ebp uc_mcontext->DU3_PREFIX(ss,ebp)
183 # define context_esi uc_mcontext->DU3_PREFIX(ss,esi)
184 # define context_edi uc_mcontext->DU3_PREFIX(ss,edi)
185 # define context_eflags uc_mcontext->DU3_PREFIX(ss,eflags)
186 # define context_trapno uc_mcontext->DU3_PREFIX(es,trapno)
187 # endif
188 #endif
189
190 #ifdef __OpenBSD__
191 # define context_trapno sc_trapno
192 # ifdef AMD64
193 # define context_pc sc_rip
194 # define context_sp sc_rsp
195 # define context_fp sc_rbp
196 # define context_rip sc_rip
197 # define context_rsp sc_rsp
198 # define context_rbp sc_rbp
199 # define context_rax sc_rax
200 # define context_rbx sc_rbx
201 # define context_rcx sc_rcx
202 # define context_rdx sc_rdx
203 # define context_rsi sc_rsi
204 # define context_rdi sc_rdi
205 # define context_r8 sc_r8
206 # define context_r9 sc_r9
207 # define context_r10 sc_r10
208 # define context_r11 sc_r11
209 # define context_r12 sc_r12
210 # define context_r13 sc_r13
211 # define context_r14 sc_r14
212 # define context_r15 sc_r15
213 # define context_flags sc_rflags
214 # define context_err sc_err
215 # else
216 # define context_pc sc_eip
217 # define context_sp sc_esp
218 # define context_fp sc_ebp
219 # define context_eip sc_eip
220 # define context_esp sc_esp
221 # define context_eax sc_eax
222 # define context_ebx sc_ebx
223 # define context_ecx sc_ecx
224 # define context_edx sc_edx
225 # define context_ebp sc_ebp
226 # define context_esi sc_esi
227 # define context_edi sc_edi
228 # define context_eflags sc_eflags
229 # define context_trapno sc_trapno
230 # endif
231 #endif
232
233 #ifdef __NetBSD__
234 # define context_trapno uc_mcontext.__gregs[_REG_TRAPNO]
235 # ifdef AMD64
236 # define __register_t __greg_t
237 # define context_pc uc_mcontext.__gregs[_REG_RIP]
238 # define context_sp uc_mcontext.__gregs[_REG_URSP]
239 # define context_fp uc_mcontext.__gregs[_REG_RBP]
240 # define context_rip uc_mcontext.__gregs[_REG_RIP]
241 # define context_rsp uc_mcontext.__gregs[_REG_URSP]
242 # define context_rax uc_mcontext.__gregs[_REG_RAX]
243 # define context_rbx uc_mcontext.__gregs[_REG_RBX]
244 # define context_rcx uc_mcontext.__gregs[_REG_RCX]
245 # define context_rdx uc_mcontext.__gregs[_REG_RDX]
246 # define context_rbp uc_mcontext.__gregs[_REG_RBP]
247 # define context_rsi uc_mcontext.__gregs[_REG_RSI]
248 # define context_rdi uc_mcontext.__gregs[_REG_RDI]
249 # define context_r8 uc_mcontext.__gregs[_REG_R8]
250 # define context_r9 uc_mcontext.__gregs[_REG_R9]
251 # define context_r10 uc_mcontext.__gregs[_REG_R10]
252 # define context_r11 uc_mcontext.__gregs[_REG_R11]
253 # define context_r12 uc_mcontext.__gregs[_REG_R12]
254 # define context_r13 uc_mcontext.__gregs[_REG_R13]
255 # define context_r14 uc_mcontext.__gregs[_REG_R14]
256 # define context_r15 uc_mcontext.__gregs[_REG_R15]
257 # define context_flags uc_mcontext.__gregs[_REG_RFL]
258 # define context_err uc_mcontext.__gregs[_REG_ERR]
259 # else
260 # define context_pc uc_mcontext.__gregs[_REG_EIP]
261 # define context_sp uc_mcontext.__gregs[_REG_UESP]
262 # define context_fp uc_mcontext.__gregs[_REG_EBP]
263 # define context_eip uc_mcontext.__gregs[_REG_EIP]
264 # define context_esp uc_mcontext.__gregs[_REG_UESP]
265 # define context_eax uc_mcontext.__gregs[_REG_EAX]
266 # define context_ebx uc_mcontext.__gregs[_REG_EBX]
267 # define context_ecx uc_mcontext.__gregs[_REG_ECX]
268 # define context_edx uc_mcontext.__gregs[_REG_EDX]
269 # define context_ebp uc_mcontext.__gregs[_REG_EBP]
270 # define context_esi uc_mcontext.__gregs[_REG_ESI]
271 # define context_edi uc_mcontext.__gregs[_REG_EDI]
272 # define context_eflags uc_mcontext.__gregs[_REG_EFL]
273 # define context_trapno uc_mcontext.__gregs[_REG_TRAPNO]
274 # endif
275 #endif
276
277 address os::current_stack_pointer() {
278 #ifdef SPARC_WORKS
279 register void *esp;
280 __asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp));
281 return (address) ((char*)esp + sizeof(long)*2);
282 #else
283 register void *esp __asm__ (SPELL_REG_SP);
284 return (address) esp;
285 #endif
286 }
287
288 char* os::non_memory_address_word() {
289 // Must never look like an address returned by reserve_memory,
290 // even in its subfields (as defined by the CPU immediate fields,
291 // if the CPU splits constants across multiple instructions).
292
293 return (char*) -1;
294 }
295
296 void os::initialize_thread() {
297 // Nothing to do.
298 }
299
300 address os::Bsd::ucontext_get_pc(ucontext_t * uc) {
301 return (address)uc->context_pc;
302 }
303
304 intptr_t* os::Bsd::ucontext_get_sp(ucontext_t * uc) {
305 return (intptr_t*)uc->context_sp;
306 }
307
308 intptr_t* os::Bsd::ucontext_get_fp(ucontext_t * uc) {
309 return (intptr_t*)uc->context_fp;
310 }
311
312 // For Forte Analyzer AsyncGetCallTrace profiling support - thread
313 // is currently interrupted by SIGPROF.
314 // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal
315 // frames. Currently we don't do that on Bsd, so it's the same as
316 // os::fetch_frame_from_context().
317 ExtendedPC os::Bsd::fetch_frame_from_ucontext(Thread* thread,
318 ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
319
320 assert(thread != NULL, "just checking");
321 assert(ret_sp != NULL, "just checking");
322 assert(ret_fp != NULL, "just checking");
323
324 return os::fetch_frame_from_context(uc, ret_sp, ret_fp);
325 }
326
327 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
328 intptr_t** ret_sp, intptr_t** ret_fp) {
329
330 ExtendedPC epc;
331 ucontext_t* uc = (ucontext_t*)ucVoid;
332
333 if (uc != NULL) {
334 epc = ExtendedPC(os::Bsd::ucontext_get_pc(uc));
335 if (ret_sp) *ret_sp = os::Bsd::ucontext_get_sp(uc);
336 if (ret_fp) *ret_fp = os::Bsd::ucontext_get_fp(uc);
337 } else {
338 // construct empty ExtendedPC for return value checking
339 epc = ExtendedPC(NULL);
340 if (ret_sp) *ret_sp = (intptr_t *)NULL;
341 if (ret_fp) *ret_fp = (intptr_t *)NULL;
342 }
343
344 return epc;
345 }
346
347 frame os::fetch_frame_from_context(void* ucVoid) {
348 intptr_t* sp;
349 intptr_t* fp;
350 ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
351 return frame(sp, fp, epc.pc());
352 }
353
354 // By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get
355 // turned off by -fomit-frame-pointer,
356 frame os::get_sender_for_C_frame(frame* fr) {
357 return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
358 }
359
360 intptr_t* _get_previous_fp() {
361 #ifdef SPARC_WORKS
362 register intptr_t **ebp;
363 __asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp));
364 #else
365 register intptr_t **ebp __asm__ (SPELL_REG_FP);
366 #endif
367 return (intptr_t*) *ebp; // we want what it points to.
368 }
369
370
371 frame os::current_frame() {
372 intptr_t* fp = _get_previous_fp();
373 frame myframe((intptr_t*)os::current_stack_pointer(),
374 (intptr_t*)fp,
375 CAST_FROM_FN_PTR(address, os::current_frame));
376 if (os::is_first_C_frame(&myframe)) {
377 // stack is not walkable
378 return frame(NULL, NULL, NULL);
379 } else {
380 return os::get_sender_for_C_frame(&myframe);
381 }
382 }
383
384 // Utility functions
385
386 // From IA32 System Programming Guide
387 enum {
388 trap_page_fault = 0xE
389 };
390
391 extern "C" void Fetch32PFI () ;
392 extern "C" void Fetch32Resume () ;
393 #ifdef AMD64
394 extern "C" void FetchNPFI () ;
395 extern "C" void FetchNResume () ;
396 #endif // AMD64
397
398 extern "C" JNIEXPORT int
399 JVM_handle_bsd_signal(int sig,
400 siginfo_t* info,
401 void* ucVoid,
402 int abort_if_unrecognized) {
403 ucontext_t* uc = (ucontext_t*) ucVoid;
404
405 Thread* t = ThreadLocalStorage::get_thread_slow();
406
407 SignalHandlerMark shm(t);
408
409 // Note: it's not uncommon that JNI code uses signal/sigset to install
410 // then restore certain signal handler (e.g. to temporarily block SIGPIPE,
411 // or have a SIGILL handler when detecting CPU type). When that happens,
412 // JVM_handle_bsd_signal() might be invoked with junk info/ucVoid. To
413 // avoid unnecessary crash when libjsig is not preloaded, try handle signals
414 // that do not require siginfo/ucontext first.
415
416 if (sig == SIGPIPE || sig == SIGXFSZ) {
417 // allow chained handler to go first
418 if (os::Bsd::chained_handler(sig, info, ucVoid)) {
419 return true;
420 } else {
421 if (PrintMiscellaneous && (WizardMode || Verbose)) {
422 char buf[64];
423 warning("Ignoring %s - see bugs 4229104 or 646499219",
424 os::exception_name(sig, buf, sizeof(buf)));
425 }
426 return true;
427 }
428 }
429
430 JavaThread* thread = NULL;
431 VMThread* vmthread = NULL;
432 if (os::Bsd::signal_handlers_are_installed) {
433 if (t != NULL ){
434 if(t->is_Java_thread()) {
435 thread = (JavaThread*)t;
436 }
437 else if(t->is_VM_thread()){
438 vmthread = (VMThread *)t;
439 }
440 }
441 }
442 /*
443 NOTE: does not seem to work on bsd.
444 if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) {
445 // can't decode this kind of signal
446 info = NULL;
447 } else {
448 assert(sig == info->si_signo, "bad siginfo");
449 }
450 */
451 // decide if this trap can be handled by a stub
452 address stub = NULL;
453
454 address pc = NULL;
455
456 //%note os_trap_1
457 if (info != NULL && uc != NULL && thread != NULL) {
458 pc = (address) os::Bsd::ucontext_get_pc(uc);
459
460 if (pc == (address) Fetch32PFI) {
461 uc->context_pc = intptr_t(Fetch32Resume) ;
462 return 1 ;
463 }
464 #ifdef AMD64
465 if (pc == (address) FetchNPFI) {
466 uc->context_pc = intptr_t (FetchNResume) ;
467 return 1 ;
468 }
469 #endif // AMD64
470
471 // Handle ALL stack overflow variations here
472 if (sig == SIGSEGV || sig == SIGBUS) {
473 address addr = (address) info->si_addr;
474
475 // check if fault address is within thread stack
476 if (addr < thread->stack_base() &&
477 addr >= thread->stack_base() - thread->stack_size()) {
478 // stack overflow
479 if (thread->in_stack_yellow_zone(addr)) {
480 thread->disable_stack_yellow_zone();
481 if (thread->thread_state() == _thread_in_Java) {
482 // Throw a stack overflow exception. Guard pages will be reenabled
483 // while unwinding the stack.
484 stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
485 } else {
486 // Thread was in the vm or native code. Return and try to finish.
487 return 1;
488 }
489 } else if (thread->in_stack_red_zone(addr)) {
490 // Fatal red zone violation. Disable the guard pages and fall through
491 // to handle_unexpected_exception way down below.
492 thread->disable_stack_red_zone();
493 tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
494 #ifndef _ALLBSD_SOURCE
495 } else {
496 // Accessing stack address below sp may cause SEGV if current
497 // thread has MAP_GROWSDOWN stack. This should only happen when
498 // current thread was created by user code with MAP_GROWSDOWN flag
499 // and then attached to VM. See notes in os_bsd.cpp.
500 if (thread->osthread()->expanding_stack() == 0) {
501 thread->osthread()->set_expanding_stack();
502 if (os::Bsd::manually_expand_stack(thread, addr)) {
503 thread->osthread()->clear_expanding_stack();
504 return 1;
505 }
506 thread->osthread()->clear_expanding_stack();
507 } else {
508 fatal("recursive segv. expanding stack.");
509 }
510 #endif
511 }
512 }
513 }
514
515 if (thread->thread_state() == _thread_in_Java) {
516 // Java thread running in Java code => find exception handler if any
517 // a fault inside compiled code, the interpreter, or a stub
518
519 if ((sig == SIGSEGV || sig == SIGBUS) && os::is_poll_address((address)info->si_addr)) {
520 stub = SharedRuntime::get_poll_stub(pc);
521 #if defined(__APPLE__) && !defined(AMD64)
522 // 32-bit Darwin reports a SIGBUS for nearly all memory access exceptions.
523 // Catching SIGBUS here prevents the implicit SIGBUS NULL check below from
524 // being called, so only do so if the implicit NULL check is not necessary.
525 } else if (sig == SIGBUS && MacroAssembler::needs_explicit_null_check((int)info->si_addr)) {
526 #else
527 } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
528 #endif
529 // BugId 4454115: A read from a MappedByteBuffer can fault
530 // here if the underlying file has been truncated.
531 // Do not crash the VM in such a case.
532 CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
533 nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
534 if (nm != NULL && nm->has_unsafe_access()) {
535 stub = StubRoutines::handler_for_unsafe_access();
536 }
537 }
538 else
539
540 #ifdef AMD64
541 if (sig == SIGFPE &&
542 (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
543 stub =
544 SharedRuntime::
545 continuation_for_implicit_exception(thread,
546 pc,
547 SharedRuntime::
548 IMPLICIT_DIVIDE_BY_ZERO);
549 #ifdef __APPLE__
550 } else if (sig == SIGFPE && info->si_code == FPE_NOOP) {
551 int op = pc[0];
552
553 // Skip REX
554 if ((pc[0] & 0xf0) == 0x40) {
555 op = pc[1];
556 } else {
557 op = pc[0];
558 }
559
560 // Check for IDIV
561 if (op == 0xF7) {
562 stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime:: IMPLICIT_DIVIDE_BY_ZERO);
563 } else {
564 // TODO: handle more cases if we are using other x86 instructions
565 // that can generate SIGFPE signal.
566 tty->print_cr("unknown opcode 0x%X with SIGFPE.", op);
567 fatal("please update this code.");
568 }
569 #endif /* __APPLE__ */
570
571 #else
572 if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) {
573 // HACK: si_code does not work on bsd 2.2.12-20!!!
574 int op = pc[0];
575 if (op == 0xDB) {
576 // FIST
577 // TODO: The encoding of D2I in i486.ad can cause an exception
578 // prior to the fist instruction if there was an invalid operation
579 // pending. We want to dismiss that exception. From the win_32
580 // side it also seems that if it really was the fist causing
581 // the exception that we do the d2i by hand with different
582 // rounding. Seems kind of weird.
583 // NOTE: that we take the exception at the NEXT floating point instruction.
584 assert(pc[0] == 0xDB, "not a FIST opcode");
585 assert(pc[1] == 0x14, "not a FIST opcode");
586 assert(pc[2] == 0x24, "not a FIST opcode");
587 return true;
588 } else if (op == 0xF7) {
589 // IDIV
590 stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
591 } else {
592 // TODO: handle more cases if we are using other x86 instructions
593 // that can generate SIGFPE signal on bsd.
594 tty->print_cr("unknown opcode 0x%X with SIGFPE.", op);
595 fatal("please update this code.");
596 }
597 #endif // AMD64
598 } else if ((sig == SIGSEGV || sig == SIGBUS) &&
599 !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) {
600 // Determination of interpreter/vtable stub/compiled code null exception
601 stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
602 }
603 } else if (thread->thread_state() == _thread_in_vm &&
604 sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
605 thread->doing_unsafe_access()) {
606 stub = StubRoutines::handler_for_unsafe_access();
607 }
608
609 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
610 // and the heap gets shrunk before the field access.
611 if ((sig == SIGSEGV) || (sig == SIGBUS)) {
612 address addr = JNI_FastGetField::find_slowcase_pc(pc);
613 if (addr != (address)-1) {
614 stub = addr;
615 }
616 }
617
618 // Check to see if we caught the safepoint code in the
619 // process of write protecting the memory serialization page.
620 // It write enables the page immediately after protecting it
621 // so we can just return to retry the write.
622 if ((sig == SIGSEGV || sig == SIGBUS) &&
623 os::is_memory_serialize_page(thread, (address) info->si_addr)) {
624 // Block current thread until the memory serialize page permission restored.
625 os::block_on_serialize_page_trap();
626 return true;
627 }
628 }
629
630 #ifndef AMD64
631 // Execution protection violation
632 //
633 // This should be kept as the last step in the triage. We don't
634 // have a dedicated trap number for a no-execute fault, so be
635 // conservative and allow other handlers the first shot.
636 //
637 // Note: We don't test that info->si_code == SEGV_ACCERR here.
638 // this si_code is so generic that it is almost meaningless; and
639 // the si_code for this condition may change in the future.
640 // Furthermore, a false-positive should be harmless.
641 if (UnguardOnExecutionViolation > 0 &&
642 (sig == SIGSEGV || sig == SIGBUS) &&
643 uc->context_trapno == trap_page_fault) {
644 int page_size = os::vm_page_size();
645 address addr = (address) info->si_addr;
646 address pc = os::Bsd::ucontext_get_pc(uc);
647 // Make sure the pc and the faulting address are sane.
648 //
649 // If an instruction spans a page boundary, and the page containing
650 // the beginning of the instruction is executable but the following
651 // page is not, the pc and the faulting address might be slightly
652 // different - we still want to unguard the 2nd page in this case.
653 //
654 // 15 bytes seems to be a (very) safe value for max instruction size.
655 bool pc_is_near_addr =
656 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
657 bool instr_spans_page_boundary =
658 (align_size_down((intptr_t) pc ^ (intptr_t) addr,
659 (intptr_t) page_size) > 0);
660
661 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
662 static volatile address last_addr =
663 (address) os::non_memory_address_word();
664
665 // In conservative mode, don't unguard unless the address is in the VM
666 if (addr != last_addr &&
667 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
668
669 // Set memory to RWX and retry
670 address page_start =
671 (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
672 bool res = os::protect_memory((char*) page_start, page_size,
673 os::MEM_PROT_RWX);
674
675 if (PrintMiscellaneous && Verbose) {
676 char buf[256];
677 jio_snprintf(buf, sizeof(buf), "Execution protection violation "
678 "at " INTPTR_FORMAT
679 ", unguarding " INTPTR_FORMAT ": %s, errno=%d", addr,
680 page_start, (res ? "success" : "failed"), errno);
681 tty->print_raw_cr(buf);
682 }
683 stub = pc;
684
685 // Set last_addr so if we fault again at the same address, we don't end
686 // up in an endless loop.
687 //
688 // There are two potential complications here. Two threads trapping at
689 // the same address at the same time could cause one of the threads to
690 // think it already unguarded, and abort the VM. Likely very rare.
691 //
692 // The other race involves two threads alternately trapping at
693 // different addresses and failing to unguard the page, resulting in
694 // an endless loop. This condition is probably even more unlikely than
695 // the first.
696 //
697 // Although both cases could be avoided by using locks or thread local
698 // last_addr, these solutions are unnecessary complication: this
699 // handler is a best-effort safety net, not a complete solution. It is
700 // disabled by default and should only be used as a workaround in case
701 // we missed any no-execute-unsafe VM code.
702
703 last_addr = addr;
704 }
705 }
706 }
707 #endif // !AMD64
708
709 if (stub != NULL) {
710 // save all thread context in case we need to restore it
711 if (thread != NULL) thread->set_saved_exception_pc(pc);
712
713 uc->context_pc = (intptr_t)stub;
714 return true;
715 }
716
717 // signal-chaining
718 if (os::Bsd::chained_handler(sig, info, ucVoid)) {
719 return true;
720 }
721
722 if (!abort_if_unrecognized) {
723 // caller wants another chance, so give it to him
724 return false;
725 }
726
727 if (pc == NULL && uc != NULL) {
728 pc = os::Bsd::ucontext_get_pc(uc);
729 }
730
731 // unmask current signal
732 sigset_t newset;
733 sigemptyset(&newset);
734 sigaddset(&newset, sig);
735 sigprocmask(SIG_UNBLOCK, &newset, NULL);
736
737 VMError err(t, sig, pc, info, ucVoid);
738 err.report_and_die();
739
740 ShouldNotReachHere();
741 }
742
743 #ifdef _ALLBSD_SOURCE
744 // From solaris_i486.s ported to bsd_i486.s
745 extern "C" void fixcw();
746 #endif
747
748 void os::Bsd::init_thread_fpu_state(void) {
749 #ifndef AMD64
750 # ifdef _ALLBSD_SOURCE
751 // Set fpu to 53 bit precision. This happens too early to use a stub.
752 fixcw();
753 # else
754 // set fpu to 53 bit precision
755 set_fpu_control_word(0x27f);
756 # endif
757 #endif // !AMD64
758 }
759
760 #ifndef _ALLBSD_SOURCE
761 int os::Bsd::get_fpu_control_word(void) {
762 #ifdef AMD64
763 return 0;
764 #else
765 int fpu_control;
766 _FPU_GETCW(fpu_control);
767 return fpu_control & 0xffff;
768 #endif // AMD64
769 }
770
771 void os::Bsd::set_fpu_control_word(int fpu_control) {
772 #ifndef AMD64
773 _FPU_SETCW(fpu_control);
774 #endif // !AMD64
775 }
776 #endif
777
778 // Check that the bsd kernel version is 2.4 or higher since earlier
779 // versions do not support SSE without patches.
780 bool os::supports_sse() {
781 #if defined(AMD64) || defined(_ALLBSD_SOURCE)
782 return true;
783 #else
784 struct utsname uts;
785 if( uname(&uts) != 0 ) return false; // uname fails?
786 char *minor_string;
787 int major = strtol(uts.release,&minor_string,10);
788 int minor = strtol(minor_string+1,NULL,10);
789 bool result = (major > 2 || (major==2 && minor >= 4));
790 #ifndef PRODUCT
791 if (PrintMiscellaneous && Verbose) {
792 tty->print("OS version is %d.%d, which %s support SSE/SSE2\n",
793 major,minor, result ? "DOES" : "does NOT");
794 }
795 #endif
796 return result;
797 #endif // AMD64
798 }
799
800 bool os::is_allocatable(size_t bytes) {
801 #ifdef AMD64
802 // unused on amd64?
803 return true;
804 #else
805
806 if (bytes < 2 * G) {
807 return true;
808 }
809
810 char* addr = reserve_memory(bytes, NULL);
811
812 if (addr != NULL) {
813 release_memory(addr, bytes);
814 }
815
816 return addr != NULL;
817 #endif // AMD64
818 }
819
820 ////////////////////////////////////////////////////////////////////////////////
821 // thread stack
822
823 #ifdef AMD64
824 size_t os::Bsd::min_stack_allowed = 64 * K;
825
826 // amd64: pthread on amd64 is always in floating stack mode
827 bool os::Bsd::supports_variable_stack_size() { return true; }
828 #else
829 size_t os::Bsd::min_stack_allowed = (48 DEBUG_ONLY(+4))*K;
830
831 #ifdef __GNUC__
832 #define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;})
833 #endif
834
835 #ifdef _ALLBSD_SOURCE
836 bool os::Bsd::supports_variable_stack_size() { return true; }
837 #else
838 // Test if pthread library can support variable thread stack size. BsdThreads
839 // in fixed stack mode allocates 2M fixed slot for each thread. BsdThreads
840 // in floating stack mode and NPTL support variable stack size.
841 bool os::Bsd::supports_variable_stack_size() {
842 if (os::Bsd::is_NPTL()) {
843 // NPTL, yes
844 return true;
845
846 } else {
847 // Note: We can't control default stack size when creating a thread.
848 // If we use non-default stack size (pthread_attr_setstacksize), both
849 // floating stack and non-floating stack BsdThreads will return the
850 // same value. This makes it impossible to implement this function by
851 // detecting thread stack size directly.
852 //
853 // An alternative approach is to check %gs. Fixed-stack BsdThreads
854 // do not use %gs, so its value is 0. Floating-stack BsdThreads use
855 // %gs (either as LDT selector or GDT selector, depending on kernel)
856 // to access thread specific data.
857 //
858 // Note that %gs is a reserved glibc register since early 2001, so
859 // applications are not allowed to change its value (Ulrich Drepper from
860 // Redhat confirmed that all known offenders have been modified to use
861 // either %fs or TSD). In the worst case scenario, when VM is embedded in
862 // a native application that plays with %gs, we might see non-zero %gs
863 // even BsdThreads is running in fixed stack mode. As the result, we'll
864 // return true and skip _thread_safety_check(), so we may not be able to
865 // detect stack-heap collisions. But otherwise it's harmless.
866 //
867 #ifdef __GNUC__
868 return (GET_GS() != 0);
869 #else
870 return false;
871 #endif
872 }
873 }
874 #endif
875 #endif // AMD64
876
877 // return default stack size for thr_type
878 size_t os::Bsd::default_stack_size(os::ThreadType thr_type) {
879 // default stack size (compiler thread needs larger stack)
880 #ifdef AMD64
881 size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
882 #else
883 size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
884 #endif // AMD64
885 return s;
886 }
887
888 size_t os::Bsd::default_guard_size(os::ThreadType thr_type) {
889 // Creating guard page is very expensive. Java thread has HotSpot
890 // guard page, only enable glibc guard page for non-Java threads.
891 return (thr_type == java_thread ? 0 : page_size());
892 }
893
894 // Java thread:
895 //
896 // Low memory addresses
897 // +------------------------+
898 // | |\ JavaThread created by VM does not have glibc
899 // | glibc guard page | - guard, attached Java thread usually has
900 // | |/ 1 page glibc guard.
901 // P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
902 // | |\
903 // | HotSpot Guard Pages | - red and yellow pages
904 // | |/
905 // +------------------------+ JavaThread::stack_yellow_zone_base()
906 // | |\
907 // | Normal Stack | -
908 // | |/
909 // P2 +------------------------+ Thread::stack_base()
910 //
911 // Non-Java thread:
912 //
913 // Low memory addresses
914 // +------------------------+
915 // | |\
916 // | glibc guard page | - usually 1 page
917 // | |/
918 // P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
919 // | |\
920 // | Normal Stack | -
921 // | |/
922 // P2 +------------------------+ Thread::stack_base()
923 //
924 // ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from
925 // pthread_attr_getstack()
926
927 static void current_stack_region(address * bottom, size_t * size) {
928 #ifdef __APPLE__
929 pthread_t self = pthread_self();
930 void *stacktop = pthread_get_stackaddr_np(self);
931 *size = pthread_get_stacksize_np(self);
932 *bottom = (address) stacktop - *size;
933 #elif defined(__OpenBSD__)
934 stack_t ss;
935 int rslt = pthread_stackseg_np(pthread_self(), &ss);
936
937 if (rslt != 0)
938 fatal(err_msg("pthread_stackseg_np failed with err = %d", rslt));
939
940 *bottom = (address)((char *)ss.ss_sp - ss.ss_size);
941 *size = ss.ss_size;
942 #elif defined(_ALLBSD_SOURCE)
943 pthread_attr_t attr;
944
945 int rslt = pthread_attr_init(&attr);
946
947 // JVM needs to know exact stack location, abort if it fails
948 if (rslt != 0)
949 fatal(err_msg("pthread_attr_init failed with err = %d", rslt));
950
951 rslt = pthread_attr_get_np(pthread_self(), &attr);
952
953 if (rslt != 0)
954 fatal(err_msg("pthread_attr_get_np failed with err = %d", rslt));
955
956 if (pthread_attr_getstackaddr(&attr, (void **)bottom) != 0 ||
957 pthread_attr_getstacksize(&attr, size) != 0) {
958 fatal("Can not locate current stack attributes!");
959 }
960
961 pthread_attr_destroy(&attr);
962 #else
963 if (os::Bsd::is_initial_thread()) {
964 // initial thread needs special handling because pthread_getattr_np()
965 // may return bogus value.
966 *bottom = os::Bsd::initial_thread_stack_bottom();
967 *size = os::Bsd::initial_thread_stack_size();
968 } else {
969 pthread_attr_t attr;
970
971 int rslt = pthread_getattr_np(pthread_self(), &attr);
972
973 // JVM needs to know exact stack location, abort if it fails
974 if (rslt != 0) {
975 if (rslt == ENOMEM) {
976 vm_exit_out_of_memory(0, "pthread_getattr_np");
977 } else {
978 fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
979 }
980 }
981
982 if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) {
983 fatal("Can not locate current stack attributes!");
984 }
985
986 pthread_attr_destroy(&attr);
987
988 }
989 #endif
990 assert(os::current_stack_pointer() >= *bottom &&
991 os::current_stack_pointer() < *bottom + *size, "just checking");
992 }
993
994 address os::current_stack_base() {
995 address bottom;
996 size_t size;
997 current_stack_region(&bottom, &size);
998 return (bottom + size);
999 }
1000
1001 size_t os::current_stack_size() {
1002 // stack size includes normal stack and HotSpot guard pages
1003 address bottom;
1004 size_t size;
1005 current_stack_region(&bottom, &size);
1006 return size;
1007 }
1008
1009 /////////////////////////////////////////////////////////////////////////////
1010 // helper functions for fatal error handler
1011
1012 void os::print_context(outputStream *st, void *context) {
1013 if (context == NULL) return;
1014
1015 ucontext_t *uc = (ucontext_t*)context;
1016 st->print_cr("Registers:");
1017 #ifdef AMD64
1018 st->print( "RAX=" INTPTR_FORMAT, uc->context_rax);
1019 st->print(", RBX=" INTPTR_FORMAT, uc->context_rbx);
1020 st->print(", RCX=" INTPTR_FORMAT, uc->context_rcx);
1021 st->print(", RDX=" INTPTR_FORMAT, uc->context_rdx);
1022 st->cr();
1023 st->print( "RSP=" INTPTR_FORMAT, uc->context_rsp);
1024 st->print(", RBP=" INTPTR_FORMAT, uc->context_rbp);
1025 st->print(", RSI=" INTPTR_FORMAT, uc->context_rsi);
1026 st->print(", RDI=" INTPTR_FORMAT, uc->context_rdi);
1027 st->cr();
1028 st->print( "R8 =" INTPTR_FORMAT, uc->context_r8);
1029 st->print(", R9 =" INTPTR_FORMAT, uc->context_r9);
1030 st->print(", R10=" INTPTR_FORMAT, uc->context_r10);
1031 st->print(", R11=" INTPTR_FORMAT, uc->context_r11);
1032 st->cr();
1033 st->print( "R12=" INTPTR_FORMAT, uc->context_r12);
1034 st->print(", R13=" INTPTR_FORMAT, uc->context_r13);
1035 st->print(", R14=" INTPTR_FORMAT, uc->context_r14);
1036 st->print(", R15=" INTPTR_FORMAT, uc->context_r15);
1037 st->cr();
1038 st->print( "RIP=" INTPTR_FORMAT, uc->context_rip);
1039 st->print(", EFLAGS=" INTPTR_FORMAT, uc->context_flags);
1040 st->print(", ERR=" INTPTR_FORMAT, uc->context_err);
1041 st->cr();
1042 st->print(" TRAPNO=" INTPTR_FORMAT, uc->context_trapno);
1043 #else
1044 st->print( "EAX=" INTPTR_FORMAT, uc->context_eax);
1045 st->print(", EBX=" INTPTR_FORMAT, uc->context_ebx);
1046 st->print(", ECX=" INTPTR_FORMAT, uc->context_ecx);
1047 st->print(", EDX=" INTPTR_FORMAT, uc->context_edx);
1048 st->cr();
1049 st->print( "ESP=" INTPTR_FORMAT, uc->context_esp);
1050 st->print(", EBP=" INTPTR_FORMAT, uc->context_ebp);
1051 st->print(", ESI=" INTPTR_FORMAT, uc->context_esi);
1052 st->print(", EDI=" INTPTR_FORMAT, uc->context_edi);
1053 st->cr();
1054 st->print( "EIP=" INTPTR_FORMAT, uc->context_eip);
1055 st->print(", EFLAGS=" INTPTR_FORMAT, uc->context_eflags);
1056 #endif // AMD64
1057 st->cr();
1058 st->cr();
1059
1060 intptr_t *sp = (intptr_t *)os::Bsd::ucontext_get_sp(uc);
1061 st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
1062 print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t));
1063 st->cr();
1064
1065 // Note: it may be unsafe to inspect memory near pc. For example, pc may
1066 // point to garbage if entry point in an nmethod is corrupted. Leave
1067 // this at the end, and hope for the best.
1068 address pc = os::Bsd::ucontext_get_pc(uc);
1069 st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
1070 print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
1071 }
1072
1073 void os::print_register_info(outputStream *st, void *context) {
1074 if (context == NULL) return;
1075
1076 ucontext_t *uc = (ucontext_t*)context;
1077
1078 st->print_cr("Register to memory mapping:");
1079 st->cr();
1080
1081 // this is horrendously verbose but the layout of the registers in the
1082 // context does not match how we defined our abstract Register set, so
1083 // we can't just iterate through the gregs area
1084
1085 // this is only for the "general purpose" registers
1086
1087 #ifdef AMD64
1088 st->print("RAX="); print_location(st, uc->context_rax);
1089 st->print("RBX="); print_location(st, uc->context_rbx);
1090 st->print("RCX="); print_location(st, uc->context_rcx);
1091 st->print("RDX="); print_location(st, uc->context_rdx);
1092 st->print("RSP="); print_location(st, uc->context_rsp);
1093 st->print("RBP="); print_location(st, uc->context_rbp);
1094 st->print("RSI="); print_location(st, uc->context_rsi);
1095 st->print("RDI="); print_location(st, uc->context_rdi);
1096 st->print("R8 ="); print_location(st, uc->context_r8);
1097 st->print("R9 ="); print_location(st, uc->context_r9);
1098 st->print("R10="); print_location(st, uc->context_r10);
1099 st->print("R11="); print_location(st, uc->context_r11);
1100 st->print("R12="); print_location(st, uc->context_r12);
1101 st->print("R13="); print_location(st, uc->context_r13);
1102 st->print("R14="); print_location(st, uc->context_r14);
1103 st->print("R15="); print_location(st, uc->context_r15);
1104 #else
1105 st->print("EAX="); print_location(st, uc->context_eax);
1106 st->print("EBX="); print_location(st, uc->context_ebx);
1107 st->print("ECX="); print_location(st, uc->context_ecx);
1108 st->print("EDX="); print_location(st, uc->context_edx);
1109 st->print("ESP="); print_location(st, uc->context_esp);
1110 st->print("EBP="); print_location(st, uc->context_ebp);
1111 st->print("ESI="); print_location(st, uc->context_esi);
1112 st->print("EDI="); print_location(st, uc->context_edi);
1113 #endif // AMD64
1114
1115 st->cr();
1116 }
1117
1118 void os::setup_fpu() {
1119 #ifndef AMD64
1120 address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std();
1121 __asm__ volatile ( "fldcw (%0)" :
1122 : "r" (fpu_cntrl) : "memory");
1123 #endif // !AMD64
1124 }