changeset 146:7cce9e4e0f7c

Merge
author rasbold
date Fri, 09 May 2008 05:26:59 -0700
parents 8bd1e4487c18 (diff) f3de1255b035 (current diff)
children 83c868b757c0
files
diffstat 48 files changed, 2119 insertions(+), 159 deletions(-) [+]
line wrap: on
line diff
--- a/make/linux/makefiles/mapfile-vers-debug	Wed May 07 08:06:46 2008 -0700
+++ b/make/linux/makefiles/mapfile-vers-debug	Fri May 09 05:26:59 2008 -0700
@@ -279,7 +279,9 @@
                 jio_snprintf;
                 jio_vfprintf;
                 jio_vsnprintf;
-		fork1;
+                fork1;
+                numa_warn;
+                numa_error;
 
                 # Needed because there is no JVM interface for this.
                 sysThreadAvailableStackWithSlack;
--- a/make/linux/makefiles/mapfile-vers-product	Wed May 07 08:06:46 2008 -0700
+++ b/make/linux/makefiles/mapfile-vers-product	Fri May 09 05:26:59 2008 -0700
@@ -274,7 +274,9 @@
                 jio_snprintf;
                 jio_vfprintf;
                 jio_vsnprintf;
-		fork1;
+                fork1;
+                numa_warn;
+                numa_error;
 
                 # Needed because there is no JVM interface for this.
                 sysThreadAvailableStackWithSlack;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/linux/platform_sparcv9	Fri May 09 05:26:59 2008 -0700
@@ -0,0 +1,15 @@
+os_family = linux
+
+arch = sparc
+
+arch_model = sparc
+
+os_arch = linux_sparc
+
+os_arch_model = linux_sparc
+
+lib_arch = sparcv9
+
+compiler = gcc
+
+sysdefs = -DLINUX -D_GNU_SOURCE -DSPARC
--- a/src/os/linux/vm/os_linux.cpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Fri May 09 05:26:59 2008 -0700
@@ -2228,20 +2228,42 @@
 }
 
 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
-void os::free_memory(char *addr, size_t bytes)         { }
+
+void os::free_memory(char *addr, size_t bytes) {
+  uncommit_memory(addr, bytes);
+}
+
 void os::numa_make_global(char *addr, size_t bytes)    { }
-void os::numa_make_local(char *addr, size_t bytes)     { }
-bool os::numa_topology_changed()                       { return false; }
-size_t os::numa_get_groups_num()                       { return 1; }
-int os::numa_get_group_id()                            { return 0; }
-size_t os::numa_get_leaf_groups(int *ids, size_t size) {
-  if (size > 0) {
-    ids[0] = 0;
-    return 1;
+
+void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
+  Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
+}
+
+bool os::numa_topology_changed()   { return false; }
+
+size_t os::numa_get_groups_num() {
+  int max_node = Linux::numa_max_node();
+  return max_node > 0 ? max_node + 1 : 1;
+}
+
+int os::numa_get_group_id() {
+  int cpu_id = Linux::sched_getcpu();
+  if (cpu_id != -1) {
+    int lgrp_id = Linux::get_node_by_cpu(cpu_id);
+    if (lgrp_id != -1) {
+      return lgrp_id;
+    }
   }
   return 0;
 }
 
+size_t os::numa_get_leaf_groups(int *ids, size_t size) {
+  for (size_t i = 0; i < size; i++) {
+    ids[i] = i;
+  }
+  return size;
+}
+
 bool os::get_page_info(char *start, page_info* info) {
   return false;
 }
@@ -2250,6 +2272,74 @@
   return end;
 }
 
+extern "C" void numa_warn(int number, char *where, ...) { }
+extern "C" void numa_error(char *where) { }
+
+void os::Linux::libnuma_init() {
+  // sched_getcpu() should be in libc.
+  set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
+                                  dlsym(RTLD_DEFAULT, "sched_getcpu")));
+
+  if (sched_getcpu() != -1) { // Does it work?
+    void *handle = dlopen("libnuma.so", RTLD_LAZY);
+    if (handle != NULL) {
+      set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
+                                           dlsym(handle, "numa_node_to_cpus")));
+      set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
+                                       dlsym(handle, "numa_max_node")));
+      set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
+                                        dlsym(handle, "numa_available")));
+      set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
+                                            dlsym(handle, "numa_tonode_memory")));
+      if (numa_available() != -1) {
+        // Create a cpu -> node mapping
+        _cpu_to_node = new (ResourceObj::C_HEAP) GrowableArray<int>(0, true);
+        rebuild_cpu_to_node_map();
+      }
+    }
+  }
+}
+
+// rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id.
+// The table is later used in get_node_by_cpu().
+void os::Linux::rebuild_cpu_to_node_map() {
+  int cpu_num = os::active_processor_count();
+  cpu_to_node()->clear();
+  cpu_to_node()->at_grow(cpu_num - 1);
+  int node_num = numa_get_groups_num();
+  int cpu_map_size = (cpu_num + BitsPerLong - 1) / BitsPerLong;
+  unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size);
+  for (int i = 0; i < node_num; i++) {
+    if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
+      for (int j = 0; j < cpu_map_size; j++) {
+        if (cpu_map[j] != 0) {
+          for (int k = 0; k < BitsPerLong; k++) {
+            if (cpu_map[j] & (1UL << k)) {
+              cpu_to_node()->at_put(j * BitsPerLong + k, i);
+            }
+          }
+        }
+      }
+    }
+  }
+  FREE_C_HEAP_ARRAY(unsigned long, cpu_map);
+}
+
+int os::Linux::get_node_by_cpu(int cpu_id) {
+  if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
+    return cpu_to_node()->at(cpu_id);
+  }
+  return -1;
+}
+
+GrowableArray<int>* os::Linux::_cpu_to_node;
+os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
+os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
+os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
+os::Linux::numa_available_func_t os::Linux::_numa_available;
+os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
+
+
 bool os::uncommit_memory(char* addr, size_t size) {
   return ::mmap(addr, size,
                 PROT_READ|PROT_WRITE|PROT_EXEC,
@@ -3552,6 +3642,10 @@
           Linux::is_floating_stack() ? "floating stack" : "fixed stack");
   }
 
+  if (UseNUMA) {
+    Linux::libnuma_init();
+  }
+
   if (MaxFDLimit) {
     // set the number of file descriptors to max. print out error
     // if getrlimit/setrlimit fails but continue regardless.
--- a/src/os/linux/vm/os_linux.hpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/os/linux/vm/os_linux.hpp	Fri May 09 05:26:59 2008 -0700
@@ -59,6 +59,8 @@
   static bool _is_NPTL;
   static bool _supports_fast_thread_cpu_time;
 
+  static GrowableArray<int>* _cpu_to_node;
+
  protected:
 
   static julong _physical_memory;
@@ -79,8 +81,9 @@
   static void set_is_LinuxThreads()           { _is_NPTL = false; }
   static void set_is_floating_stack()         { _is_floating_stack = true; }
 
+  static void rebuild_cpu_to_node_map();
+  static GrowableArray<int>* cpu_to_node()    { return _cpu_to_node; }
  public:
-
   static void init_thread_fpu_state();
   static int  get_fpu_control_word();
   static void set_fpu_control_word(int fpu_control);
@@ -143,6 +146,7 @@
   static bool is_floating_stack()             { return _is_floating_stack; }
 
   static void libpthread_init();
+  static void libnuma_init();
 
   // Minimum stack size a thread can be created with (allowing
   // the VM to completely create the thread and enter user code)
@@ -229,6 +233,38 @@
 
     #undef SR_SUSPENDED
   };
+
+private:
+  typedef int (*sched_getcpu_func_t)(void);
+  typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
+  typedef int (*numa_max_node_func_t)(void);
+  typedef int (*numa_available_func_t)(void);
+  typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
+
+
+  static sched_getcpu_func_t _sched_getcpu;
+  static numa_node_to_cpus_func_t _numa_node_to_cpus;
+  static numa_max_node_func_t _numa_max_node;
+  static numa_available_func_t _numa_available;
+  static numa_tonode_memory_func_t _numa_tonode_memory;
+
+  static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
+  static void set_numa_node_to_cpus(numa_node_to_cpus_func_t func) { _numa_node_to_cpus = func; }
+  static void set_numa_max_node(numa_max_node_func_t func) { _numa_max_node = func; }
+  static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
+  static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
+
+public:
+  static int sched_getcpu()  { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
+  static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
+    return _numa_node_to_cpus != NULL ? _numa_node_to_cpus(node, buffer, bufferlen) : -1;
+  }
+  static int numa_max_node() { return _numa_max_node != NULL ? _numa_max_node() : -1; }
+  static int numa_available() { return _numa_available != NULL ? _numa_available() : -1; }
+  static int numa_tonode_memory(void *start, size_t size, int node) {
+    return _numa_tonode_memory != NULL ? _numa_tonode_memory(start, size, node) : -1;
+  }
+  static int get_node_by_cpu(int cpu_id);
 };
 
 
--- a/src/os/linux/vm/os_linux.inline.hpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/os/linux/vm/os_linux.inline.hpp	Fri May 09 05:26:59 2008 -0700
@@ -120,3 +120,6 @@
   RESTARTABLE(_cmd, _result); \
   return _result; \
 } while(false)
+
+inline bool os::numa_has_static_binding()   { return true; }
+inline bool os::numa_has_group_homing()     { return false;  }
--- a/src/os/solaris/vm/os_solaris.cpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/os/solaris/vm/os_solaris.cpp	Fri May 09 05:26:59 2008 -0700
@@ -2602,7 +2602,7 @@
 }
 
 // Tell the OS to make the range local to the first-touching LWP
-void os::numa_make_local(char *addr, size_t bytes) {
+void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
     debug_only(warning("MADV_ACCESS_LWP failed."));
--- a/src/os/solaris/vm/os_solaris.inline.hpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/os/solaris/vm/os_solaris.inline.hpp	Fri May 09 05:26:59 2008 -0700
@@ -204,3 +204,6 @@
   RESTARTABLE(_cmd, _result); \
   return _result; \
 } while(false)
+
+inline bool os::numa_has_static_binding()   { return false; }
+inline bool os::numa_has_group_homing()     { return true;  }
--- a/src/os/windows/vm/os_windows.cpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/os/windows/vm/os_windows.cpp	Fri May 09 05:26:59 2008 -0700
@@ -2581,7 +2581,7 @@
 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
 void os::free_memory(char *addr, size_t bytes)         { }
 void os::numa_make_global(char *addr, size_t bytes)    { }
-void os::numa_make_local(char *addr, size_t bytes)     { }
+void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
 bool os::numa_topology_changed()                       { return false; }
 size_t os::numa_get_groups_num()                       { return 1; }
 int os::numa_get_group_id()                            { return 0; }
--- a/src/os/windows/vm/os_windows.inline.hpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/os/windows/vm/os_windows.inline.hpp	Fri May 09 05:26:59 2008 -0700
@@ -69,3 +69,6 @@
     *((int *)(sp - (pages * vm_page_size()))) = 0;
   }
 }
+
+inline bool os::numa_has_static_binding()   { return true;   }
+inline bool os::numa_has_group_homing()     { return false;  }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp	Fri May 09 05:26:59 2008 -0700
@@ -0,0 +1,51 @@
+/*
+ * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_assembler_linux_sparc.cpp.incl"
+
+#include <asm-sparc/traps.h>
+
+bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
+  // Since the linux kernel resides at the low end of
+  // user address space, no null pointer check is needed.
+  return offset < 0 || offset >= 0x100000;
+}
+
+void MacroAssembler::read_ccr_trap(Register ccr_save) {
+  // No implementation
+  breakpoint_trap();
+}
+
+void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) {
+  // No implementation
+  breakpoint_trap();
+}
+
+void MacroAssembler::flush_windows_trap() { trap(SP_TRAP_FWIN); }
+void MacroAssembler::clean_windows_trap() { trap(SP_TRAP_CWIN); }
+
+// Use software breakpoint trap until we figure out how to do this on Linux
+void MacroAssembler::get_psr_trap()       { trap(SP_TRAP_SBPT); }
+void MacroAssembler::set_psr_trap()       { trap(SP_TRAP_SBPT); }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Fri May 09 05:26:59 2008 -0700
@@ -0,0 +1,206 @@
+/*
+ * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Implementation of class atomic
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+inline void Atomic::inc    (volatile jint*     dest) { (void)add    (1, dest); }
+inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); }
+inline void Atomic::inc_ptr(volatile void*     dest) { (void)add_ptr(1, dest); }
+
+inline void Atomic::dec    (volatile jint*     dest) { (void)add    (-1, dest); }
+inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
+inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+  intptr_t rv;
+  __asm__ volatile(
+    "1: \n\t"
+    " ld     [%2], %%o2\n\t"
+    " add    %1, %%o2, %%o3\n\t"
+    " cas    [%2], %%o2, %%o3\n\t"
+    " cmp    %%o2, %%o3\n\t"
+    " bne    1b\n\t"
+    "  nop\n\t"
+    " add    %1, %%o2, %0\n\t"
+    : "=r" (rv)
+    : "r" (add_value), "r" (dest)
+    : "memory", "o2", "o3");
+  return rv;
+}
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  intptr_t rv;
+#ifdef _LP64
+  __asm__ volatile(
+    "1: \n\t"
+    " ldx    [%2], %%o2\n\t"
+    " add    %0, %%o2, %%o3\n\t"
+    " casx   [%2], %%o2, %%o3\n\t"
+    " cmp    %%o2, %%o3\n\t"
+    " bne    %%xcc, 1b\n\t"
+    "  nop\n\t"
+    " add    %0, %%o2, %0\n\t"
+    : "=r" (rv)
+    : "r" (add_value), "r" (dest)
+    : "memory", "o2", "o3");
+#else
+  __asm__ volatile(
+    "1: \n\t"
+    " ld     [%2], %%o2\n\t"
+    " add    %1, %%o2, %%o3\n\t"
+    " cas    [%2], %%o2, %%o3\n\t"
+    " cmp    %%o2, %%o3\n\t"
+    " bne    1b\n\t"
+    "  nop\n\t"
+    " add    %1, %%o2, %0\n\t"
+    : "=r" (rv)
+    : "r" (add_value), "r" (dest)
+    : "memory", "o2", "o3");
+#endif // _LP64
+  return rv;
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest);
+}
+
+
+inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+  intptr_t rv = exchange_value;
+  __asm__ volatile(
+    " swap   [%2],%1\n\t"
+    : "=r" (rv)
+    : "0" (exchange_value) /* we use same register as for return value */, "r" (dest)
+    : "memory");
+  return rv;
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  intptr_t rv = exchange_value;
+#ifdef _LP64
+  __asm__ volatile(
+    "1:\n\t"
+    " mov    %1, %%o3\n\t"
+    " ldx    [%2], %%o2\n\t"
+    " casx   [%2], %%o2, %%o3\n\t"
+    " cmp    %%o2, %%o3\n\t"
+    " bne    %%xcc, 1b\n\t"
+    "  nop\n\t"
+    " mov    %%o2, %0\n\t"
+    : "=r" (rv)
+    : "r" (exchange_value), "r" (dest)
+    : "memory", "o2", "o3");
+#else
+  __asm__ volatile(
+    "swap    [%2],%1\n\t"
+    : "=r" (rv)
+    : "0" (exchange_value) /* we use same register as for return value */, "r" (dest)
+    : "memory");
+#endif // _LP64
+  return rv;
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value) {
+  jint rv;
+  __asm__ volatile(
+    " cas    [%2], %3, %0"
+    : "=r" (rv)
+    : "0" (exchange_value), "r" (dest), "r" (compare_value)
+    : "memory");
+  return rv;
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value) {
+#ifdef _LP64
+  jlong rv;
+  __asm__ volatile(
+    " casx   [%2], %3, %0"
+    : "=r" (rv)
+    : "0" (exchange_value), "r" (dest), "r" (compare_value)
+    : "memory");
+  return rv;
+#else
+  assert(VM_Version::v9_instructions_work(), "cas only supported on v9");
+  volatile jlong_accessor evl, cvl, rv;
+  evl.long_value = exchange_value;
+  cvl.long_value = compare_value;
+
+  __asm__ volatile(
+    " sllx   %2, 32, %2\n\t"
+    " srl    %3, 0,  %3\n\t"
+    " or     %2, %3, %2\n\t"
+    " sllx   %5, 32, %5\n\t"
+    " srl    %6, 0,  %6\n\t"
+    " or     %5, %6, %5\n\t"
+    " casx   [%4], %5, %2\n\t"
+    " srl    %2, 0, %1\n\t"
+    " srlx   %2, 32, %0\n\t"
+    : "=r" (rv.words[0]), "=r" (rv.words[1])
+    : "r"  (evl.words[0]), "r" (evl.words[1]), "r" (dest), "r" (cvl.words[0]), "r" (cvl.words[1])
+    : "memory");
+
+  return rv.long_value;
+#endif
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) {
+  intptr_t rv;
+#ifdef _LP64
+  __asm__ volatile(
+    " casx    [%2], %3, %0"
+    : "=r" (rv)
+    : "0" (exchange_value), "r" (dest), "r" (compare_value)
+    : "memory");
+#else
+  __asm__ volatile(
+    " cas     [%2], %3, %0"
+    : "=r" (rv)
+    : "0" (exchange_value), "r" (dest), "r" (compare_value)
+    : "memory");
+#endif // _LP64
+  return rv;
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value) {
+  return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp	Fri May 09 05:26:59 2008 -0700
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2000-2004 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+//
+// Sets the default values for platform dependent flags used by the
+// runtime system.  (see globals.hpp)
+//
+
+define_pd_global(uintx, JVMInvokeMethodSlack,    12288);
+define_pd_global(intx, CompilerThreadStackSize,  0);
+
+// Only used on 64 bit Windows platforms
+define_pd_global(bool, UseVectoredExceptions, false);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_sparc/vm/linux_sparc.ad	Fri May 09 05:26:59 2008 -0700
@@ -0,0 +1,27 @@
+//
+// Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+// CA 95054 USA or visit www.sun.com if you need additional information or
+// have any questions.
+//
+
+//
+//
+
+// SPARC Linux Architecture Description File
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_sparc/vm/linux_sparc.s	Fri May 09 05:26:59 2008 -0700
@@ -0,0 +1,105 @@
+#
+# Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+# CA 95054 USA or visit www.sun.com if you need additional information or
+# have any questions.
+#
+
+    # Prototype: int SafeFetch32 (int * adr, int ErrValue)
+    # The "ld" at Fetch32 is potentially faulting instruction.
+    # If the instruction traps the trap handler will arrange
+    # for control to resume at Fetch32Resume.  
+    # By convention with the trap handler we ensure there is a non-CTI
+    # instruction in the trap shadow.  
+        
+
+    .globl  SafeFetch32, Fetch32PFI, Fetch32Resume
+    .globl  SafeFetchN
+    .align  32
+    .type    SafeFetch32,@function
+SafeFetch32:        
+    mov     %o0, %g1
+    mov     %o1, %o0
+Fetch32PFI:
+    # <-- Potentially faulting instruction
+    ld      [%g1], %o0         
+Fetch32Resume:
+    nop
+    retl
+    nop
+
+    .globl  SafeFetchN, FetchNPFI, FetchNResume
+    .type    SafeFetchN,@function
+    .align  32
+SafeFetchN:
+    mov     %o0, %g1
+    mov     %o1, %o0
+FetchNPFI:
+    ldn     [%g1], %o0
+FetchNResume:
+    nop
+    retl
+    nop
+
+    # Possibilities:
+    # -- membar
+    # -- CAS (SP + BIAS, G0, G0)
+    # -- wr %g0, %asi
+
+    .globl SpinPause
+    .type   SpinPause,@function
+    .align  32
+SpinPause:      
+    retl
+    mov %g0, %o0
+
+   .globl _Copy_conjoint_jlongs_atomic
+   .type   _Copy_conjoint_jlongs_atomic,@function
+_Copy_conjoint_jlongs_atomic:
+        cmp     %o0, %o1
+	bleu    4f
+	sll     %o2, 3, %o4
+        ba      2f
+   1:
+	subcc   %o4, 8, %o4
+	std     %o2, [%o1]
+	add     %o0, 8, %o0
+	add     %o1, 8, %o1
+   2:
+	bge,a   1b
+	ldd     [%o0], %o2
+	ba      5f
+        nop
+   3:
+	std     %o2, [%o1+%o4]
+   4:
+	subcc   %o4, 8, %o4
+	bge,a   3b
+	ldd     [%o0+%o4], %o2
+   5:      
+	retl
+	nop
+
+
+    .globl _flush_reg_windows
+    .align 32
+ _flush_reg_windows:
+        ta 0x03
+        retl
+        mov     %fp, %o0
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_sparc/vm/orderAccess_linux_sparc.inline.hpp	Fri May 09 05:26:59 2008 -0700
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Implementation of class OrderAccess.
+
+// Assume TSO.
+
+inline void OrderAccess::loadload()   { acquire(); }
+inline void OrderAccess::storestore() { release(); }
+inline void OrderAccess::loadstore()  { acquire(); }
+inline void OrderAccess::storeload()  { fence(); }
+
+inline void OrderAccess::acquire() {
+  __asm__ volatile ("nop" : : :);
+}
+
+inline void OrderAccess::release() {
+  jint* dummy = (jint*)&dummy;
+  __asm__ volatile("stw %%g0, [%0]" : : "r" (dummy) : "memory");
+}
+
+inline void OrderAccess::fence() {
+  __asm__ volatile ("membar  #StoreLoad" : : :);
+}
+
+inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
+inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
+inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
+inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return *p; }
+inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
+inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
+inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
+inline julong   OrderAccess::load_acquire(volatile julong*  p) { return *p; }
+inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
+inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
+
+inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t*   p) { return *p; }
+inline void*    OrderAccess::load_ptr_acquire(volatile void*       p) { return *(void* volatile *)p; }
+inline void*    OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
+
+inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
+inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
+inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
+
+inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
+inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v) { *(void* volatile *)p = v; }
+
+inline void     OrderAccess::store_fence(jbyte*   p, jbyte   v) { *p = v; fence(); }
+inline void     OrderAccess::store_fence(jshort*  p, jshort  v) { *p = v; fence(); }
+inline void     OrderAccess::store_fence(jint*    p, jint    v) { *p = v; fence(); }
+inline void     OrderAccess::store_fence(jlong*   p, jlong   v) { *p = v; fence(); }
+inline void     OrderAccess::store_fence(jubyte*  p, jubyte  v) { *p = v; fence(); }
+inline void     OrderAccess::store_fence(jushort* p, jushort v) { *p = v; fence(); }
+inline void     OrderAccess::store_fence(juint*   p, juint   v) { *p = v; fence(); }
+inline void     OrderAccess::store_fence(julong*  p, julong  v) { *p = v; fence(); }
+inline void     OrderAccess::store_fence(jfloat*  p, jfloat  v) { *p = v; fence(); }
+inline void     OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); }
+
+inline void     OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; fence(); }
+inline void     OrderAccess::store_ptr_fence(void**    p, void*    v) { *p = v; fence(); }
+
+inline void     OrderAccess::release_store_fence(volatile jbyte*   p, jbyte   v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile jshort*  p, jshort  v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile jint*    p, jint    v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }
+
+inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_ptr_fence(volatile void*     p, void*    v) { *(void* volatile *)p = v; fence(); }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Fri May 09 05:26:59 2008 -0700
@@ -0,0 +1,648 @@
+/*
+ * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// do not include  precompiled  header file
+
+#include "incls/_os_linux_sparc.cpp.incl"
+
+// Linux/Sparc has rather obscure naming of registers in sigcontext
+// different between 32 and 64 bits
+#ifdef _LP64
+#define SIG_PC(x) ((x)->sigc_regs.tpc)
+#define SIG_NPC(x) ((x)->sigc_regs.tnpc)
+#define SIG_REGS(x) ((x)->sigc_regs)
+#else
+#define SIG_PC(x) ((x)->si_regs.pc)
+#define SIG_NPC(x) ((x)->si_regs.npc)
+#define SIG_REGS(x) ((x)->si_regs)
+#endif
+
+// those are to reference registers in sigcontext
+enum {
+  CON_G0 = 0,
+  CON_G1,
+  CON_G2,
+  CON_G3,
+  CON_G4,
+  CON_G5,
+  CON_G6,
+  CON_G7,
+  CON_O0,
+  CON_O1,
+  CON_O2,
+  CON_O3,
+  CON_O4,
+  CON_O5,
+  CON_O6,
+  CON_O7,
+};
+
+static inline void set_cont_address(sigcontext* ctx, address addr) {
+  SIG_PC(ctx)  = (intptr_t)addr;
+  SIG_NPC(ctx) = (intptr_t)(addr+4);
+}
+
+// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
+// currently interrupted by SIGPROF.
+// os::Solaris::fetch_frame_from_ucontext() tries to skip nested
+// signal frames. Currently we don't do that on Linux, so it's the
+// same as os::fetch_frame_from_context().
+ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread,
+                                                ucontext_t* uc,
+                                                intptr_t** ret_sp,
+                                                intptr_t** ret_fp) {
+  assert(thread != NULL, "just checking");
+  assert(ret_sp != NULL, "just checking");
+  assert(ret_fp != NULL, "just checking");
+
+  return os::fetch_frame_from_context(uc, ret_sp, ret_fp);
+}
+
+ExtendedPC os::fetch_frame_from_context(void* ucVoid,
+                                        intptr_t** ret_sp,
+                                        intptr_t** ret_fp) {
+  ucontext_t* uc = (ucontext_t*) ucVoid;
+  ExtendedPC  epc;
+
+  if (uc != NULL) {
+    epc = ExtendedPC(os::Linux::ucontext_get_pc(uc));
+    if (ret_sp) {
+      *ret_sp = os::Linux::ucontext_get_sp(uc);
+    }
+    if (ret_fp) {
+      *ret_fp = os::Linux::ucontext_get_fp(uc);
+    }
+  } else {
+    // construct empty ExtendedPC for return value checking
+    epc = ExtendedPC(NULL);
+    if (ret_sp) {
+      *ret_sp = (intptr_t*) NULL;
+    }
+    if (ret_fp) {
+      *ret_fp = (intptr_t*) NULL;
+    }
+  }
+
+  return epc;
+}
+
+frame os::fetch_frame_from_context(void* ucVoid) {
+  intptr_t* sp;
+  intptr_t* fp;
+  ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
+  return frame(sp, fp, epc.pc());
+}
+
+frame os::get_sender_for_C_frame(frame* fr) {
+  return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
+}
+
+frame os::current_frame() {
+  fprintf(stderr, "current_frame()");
+
+  intptr_t* sp = StubRoutines::Sparc::flush_callers_register_windows_func()();
+  frame myframe(sp, frame::unpatchable,
+                CAST_FROM_FN_PTR(address, os::current_frame));
+  if (os::is_first_C_frame(&myframe)) {
+    // stack is not walkable
+    return frame(NULL, frame::unpatchable, NULL);
+  } else {
+    return os::get_sender_for_C_frame(&myframe);
+  }
+}
+
+address os::current_stack_pointer() {
+  register void *sp __asm__ ("sp");
+  return (address)sp;
+}
+
+static void current_stack_region(address* bottom, size_t* size) {
+  if (os::Linux::is_initial_thread()) {
+    // initial thread needs special handling because pthread_getattr_np()
+    // may return bogus value.
+    *bottom = os::Linux::initial_thread_stack_bottom();
+    *size = os::Linux::initial_thread_stack_size();
+  } else {
+    pthread_attr_t attr;
+
+    int rslt = pthread_getattr_np(pthread_self(), &attr);
+
+    // JVM needs to know exact stack location, abort if it fails
+    if (rslt != 0) {
+      if (rslt == ENOMEM) {
+        vm_exit_out_of_memory(0, "pthread_getattr_np");
+      } else {
+        fatal1("pthread_getattr_np failed with errno = %d", rslt);
+      }
+    }
+
+    if (pthread_attr_getstack(&attr, (void**)bottom, size) != 0) {
+      fatal("Can not locate current stack attributes!");
+    }
+
+    pthread_attr_destroy(&attr);
+  }
+  assert(os::current_stack_pointer() >= *bottom &&
+         os::current_stack_pointer() < *bottom + *size, "just checking");
+}
+
+address os::current_stack_base() {
+  address bottom;
+  size_t size;
+  current_stack_region(&bottom, &size);
+  return bottom + size;
+}
+
+size_t os::current_stack_size() {
+  // stack size includes normal stack and HotSpot guard pages
+  address bottom;
+  size_t size;
+  current_stack_region(&bottom, &size);
+  return size;
+}
+
+char* os::non_memory_address_word() {
+  // Must never look like an address returned by reserve_memory,
+  // even in its subfields (as defined by the CPU immediate fields,
+  // if the CPU splits constants across multiple instructions).
+  // On SPARC, 0 != %hi(any real address), because there is no
+  // allocation in the first 1Kb of the virtual address space.
+  return (char*) 0;
+}
+
+void os::initialize_thread() {}
+
+void os::print_context(outputStream *st, void *context) {
+  if (context == NULL) return;
+
+  ucontext_t* uc = (ucontext_t*)context;
+  sigcontext* sc = (sigcontext*)context;
+  st->print_cr("Registers:");
+
+  st->print_cr(" O0=" INTPTR_FORMAT " O1=" INTPTR_FORMAT
+               " O2=" INTPTR_FORMAT " O3=" INTPTR_FORMAT,
+               SIG_REGS(sc).u_regs[CON_O0],
+               SIG_REGS(sc).u_regs[CON_O1],
+               SIG_REGS(sc).u_regs[CON_O2],
+               SIG_REGS(sc).u_regs[CON_O3]);
+  st->print_cr(" O4=" INTPTR_FORMAT " O5=" INTPTR_FORMAT
+               " O6=" INTPTR_FORMAT " O7=" INTPTR_FORMAT,
+               SIG_REGS(sc).u_regs[CON_O4],
+               SIG_REGS(sc).u_regs[CON_O5],
+               SIG_REGS(sc).u_regs[CON_O6],
+               SIG_REGS(sc).u_regs[CON_O7]);
+
+  st->print_cr(" G1=" INTPTR_FORMAT " G2=" INTPTR_FORMAT
+               " G3=" INTPTR_FORMAT " G4=" INTPTR_FORMAT,
+               SIG_REGS(sc).u_regs[CON_G1],
+               SIG_REGS(sc).u_regs[CON_G2],
+               SIG_REGS(sc).u_regs[CON_G3],
+               SIG_REGS(sc).u_regs[CON_G4]);
+  st->print_cr(" G5=" INTPTR_FORMAT " G6=" INTPTR_FORMAT
+               " G7=" INTPTR_FORMAT " Y=" INTPTR_FORMAT,
+               SIG_REGS(sc).u_regs[CON_G5],
+               SIG_REGS(sc).u_regs[CON_G6],
+               SIG_REGS(sc).u_regs[CON_G7],
+               SIG_REGS(sc).y);
+
+  st->print_cr(" PC=" INTPTR_FORMAT " nPC=" INTPTR_FORMAT,
+               SIG_PC(sc),
+               SIG_NPC(sc));
+  st->cr();
+  st->cr();
+
+  intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
+  st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
+  print_hex_dump(st, (address)sp, (address)(sp + 32), sizeof(intptr_t));
+  st->cr();
+
+  // Note: it may be unsafe to inspect memory near pc. For example, pc may
+  // point to garbage if entry point in an nmethod is corrupted. Leave
+  // this at the end, and hope for the best.
+  address pc = os::Linux::ucontext_get_pc(uc);
+  st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
+  print_hex_dump(st, pc - 16, pc + 16, sizeof(char));
+}
+
+
+address os::Linux::ucontext_get_pc(ucontext_t* uc) {
+  return (address) SIG_PC((sigcontext*)uc);
+}
+
+intptr_t* os::Linux::ucontext_get_sp(ucontext_t *uc) {
+  return (intptr_t*)
+    ((intptr_t)SIG_REGS((sigcontext*)uc).u_regs[CON_O6] + STACK_BIAS);
+}
+
+// not used on Sparc
+intptr_t* os::Linux::ucontext_get_fp(ucontext_t *uc) {
+  ShouldNotReachHere();
+  return NULL;
+}
+
+// Utility functions
+
+extern "C" void Fetch32PFI();
+extern "C" void Fetch32Resume();
+extern "C" void FetchNPFI();
+extern "C" void FetchNResume();
+
+inline static bool checkPrefetch(sigcontext* uc, address pc) {
+  if (pc == (address) Fetch32PFI) {
+    set_cont_address(uc, address(Fetch32Resume));
+    return true;
+  }
+  if (pc == (address) FetchNPFI) {
+    set_cont_address(uc, address(FetchNResume));
+    return true;
+  }
+  return false;
+}
+
+inline static bool checkOverflow(sigcontext* uc,
+                                 address pc,
+                                 address addr,
+                                 JavaThread* thread,
+                                 address* stub) {
+  // check if fault address is within thread stack
+  if (addr < thread->stack_base() &&
+      addr >= thread->stack_base() - thread->stack_size()) {
+    // stack overflow
+    if (thread->in_stack_yellow_zone(addr)) {
+      thread->disable_stack_yellow_zone();
+      if (thread->thread_state() == _thread_in_Java) {
+        // Throw a stack overflow exception.  Guard pages will be reenabled
+        // while unwinding the stack.
+        *stub =
+          SharedRuntime::continuation_for_implicit_exception(thread,
+                                                             pc,
+                                                             SharedRuntime::STACK_OVERFLOW);
+      } else {
+        // Thread was in the vm or native code.  Return and try to finish.
+        return true;
+      }
+    } else if (thread->in_stack_red_zone(addr)) {
+      // Fatal red zone violation.  Disable the guard pages and fall through
+      // to handle_unexpected_exception way down below.
+      thread->disable_stack_red_zone();
+      tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
+    } else {
+      // Accessing stack address below sp may cause SEGV if current
+      // thread has MAP_GROWSDOWN stack. This should only happen when
+      // current thread was created by user code with MAP_GROWSDOWN flag
+      // and then attached to VM. See notes in os_linux.cpp.
+      if (thread->osthread()->expanding_stack() == 0) {
+        thread->osthread()->set_expanding_stack();
+        if (os::Linux::manually_expand_stack(thread, addr)) {
+          thread->osthread()->clear_expanding_stack();
+          return true;
+        }
+        thread->osthread()->clear_expanding_stack();
+      } else {
+        fatal("recursive segv. expanding stack.");
+      }
+    }
+  }
+  return false;
+}
+
+inline static bool checkPollingPage(address pc, address fault, address* stub) {
+  if (fault == os::get_polling_page()) {
+    *stub = SharedRuntime::get_poll_stub(pc);
+    return true;
+  }
+  return false;
+}
+
+inline static bool checkByteBuffer(address pc, address* stub) {
+  // BugId 4454115: A read from a MappedByteBuffer can fault
+  // here if the underlying file has been truncated.
+  // Do not crash the VM in such a case.
+  CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
+  nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
+  if (nm != NULL && nm->has_unsafe_access()) {
+    *stub = StubRoutines::handler_for_unsafe_access();
+    return true;
+  }
+  return false;
+}
+
+inline static bool checkVerifyOops(address pc, address fault, address* stub) {
+  if (pc >= MacroAssembler::_verify_oop_implicit_branch[0]
+      && pc <  MacroAssembler::_verify_oop_implicit_branch[1] ) {
+    *stub     =  MacroAssembler::_verify_oop_implicit_branch[2];
+    warning("fixed up memory fault in +VerifyOops at address "
+            INTPTR_FORMAT, fault);
+    return true;
+  }
+  return false;
+}
+
+inline static bool checkFPFault(address pc, int code,
+                                JavaThread* thread, address* stub) {
+  if (code == FPE_INTDIV || code == FPE_FLTDIV) {
+    *stub =
+      SharedRuntime::
+      continuation_for_implicit_exception(thread,
+                                          pc,
+                                          SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
+    return true;
+  }
+  return false;
+}
+
+inline static bool checkNullPointer(address pc, intptr_t fault,
+                                    JavaThread* thread, address* stub) {
+  if (!MacroAssembler::needs_explicit_null_check(fault)) {
+    // Determination of interpreter/vtable stub/compiled code null
+    // exception
+    *stub =
+      SharedRuntime::
+      continuation_for_implicit_exception(thread, pc,
+                                          SharedRuntime::IMPLICIT_NULL);
+    return true;
+  }
+  return false;
+}
+
+inline static bool checkFastJNIAccess(address pc, address* stub) {
+  address addr = JNI_FastGetField::find_slowcase_pc(pc);
+  if (addr != (address)-1) {
+    *stub = addr;
+    return true;
+  }
+  return false;
+}
+
+inline static bool checkSerializePage(JavaThread* thread, address addr) {
+  return os::is_memory_serialize_page(thread, addr);
+}
+
+inline static bool checkZombie(sigcontext* uc, address* pc, address* stub) {
+  if (nativeInstruction_at(*pc)->is_zombie()) {
+    // zombie method (ld [%g0],%o7 instruction)
+    *stub = SharedRuntime::get_handle_wrong_method_stub();
+
+    // At the stub it needs to look like a call from the caller of this
+    // method (not a call from the segv site).
+    *pc = (address)SIG_REGS(uc).u_regs[CON_O7];
+    return true;
+  }
+  return false;
+}
+
+inline static bool checkICMiss(sigcontext* uc, address* pc, address* stub) {
+#ifdef COMPILER2
+  if (nativeInstruction_at(*pc)->is_ic_miss_trap()) {
+#ifdef ASSERT
+#ifdef TIERED
+    CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
+    assert(cb->is_compiled_by_c2(), "Wrong compiler");
+#endif // TIERED
+#endif // ASSERT
+    // Inline cache missed and user trap "Tne G0+ST_RESERVED_FOR_USER_0+2" taken.
+    *stub = SharedRuntime::get_ic_miss_stub();
+    // At the stub it needs to look like a call from the caller of this
+    // method (not a call from the segv site).
+    *pc = (address)SIG_REGS(uc).u_regs[CON_O7];
+    return true;
+  }
+#endif  // COMPILER2
+  return false;
+}
+
+extern "C" int
+JVM_handle_linux_signal(int sig,
+                        siginfo_t* info,
+                        void* ucVoid,
+                        int abort_if_unrecognized) {
+  // in fact this isn't ucontext_t* at all, but struct sigcontext*
+  // but Linux porting layer uses ucontext_t, so to minimize code change
+  // we cast as needed
+  ucontext_t* ucFake = (ucontext_t*) ucVoid;
+  sigcontext* uc = (sigcontext*)ucVoid;
+
+  Thread* t = ThreadLocalStorage::get_thread_slow();
+
+  SignalHandlerMark shm(t);
+
+  // Note: it's not uncommon that JNI code uses signal/sigset to install
+  // then restore certain signal handler (e.g. to temporarily block SIGPIPE,
+  // or have a SIGILL handler when detecting CPU type). When that happens,
+  // JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To
+  // avoid unnecessary crash when libjsig is not preloaded, try handle signals
+  // that do not require siginfo/ucontext first.
+
+  if (sig == SIGPIPE || sig == SIGXFSZ) {
+    // allow chained handler to go first
+    if (os::Linux::chained_handler(sig, info, ucVoid)) {
+      return true;
+    } else {
+      if (PrintMiscellaneous && (WizardMode || Verbose)) {
+        char buf[64];
+        warning("Ignoring %s - see bugs 4229104 or 646499219",
+                os::exception_name(sig, buf, sizeof(buf)));
+      }
+      return true;
+    }
+  }
+
+  JavaThread* thread = NULL;
+  VMThread* vmthread = NULL;
+  if (os::Linux::signal_handlers_are_installed) {
+    if (t != NULL ){
+      if(t->is_Java_thread()) {
+        thread = (JavaThread*)t;
+      }
+      else if(t->is_VM_thread()){
+        vmthread = (VMThread *)t;
+      }
+    }
+  }
+
+  // decide if this trap can be handled by a stub
+  address stub = NULL;
+  address pc = NULL;
+  address npc = NULL;
+
+  //%note os_trap_1
+  if (info != NULL && uc != NULL && thread != NULL) {
+    pc = address(SIG_PC(uc));
+    npc = address(SIG_NPC(uc));
+
+    // Check to see if we caught the safepoint code in the
+    // process of write protecting the memory serialization page.
+    // It write enables the page immediately after protecting it
+    // so we can just return to retry the write.
+    if ((sig == SIGSEGV) && checkSerializePage(thread, (address)info->si_addr)) {
+      // Block current thread until the memory serialize page permission restored.
+      os::block_on_serialize_page_trap();
+      return 1;
+    }
+
+    if (checkPrefetch(uc, pc)) {
+      return 1;
+    }
+
+    // Handle ALL stack overflow variations here
+    if (sig == SIGSEGV) {
+      if (checkOverflow(uc, pc, (address)info->si_addr, thread, &stub)) {
+        return 1;
+      }
+    }
+
+    if (sig == SIGBUS &&
+        thread->thread_state() == _thread_in_vm &&
+        thread->doing_unsafe_access()) {
+      stub = StubRoutines::handler_for_unsafe_access();
+    }
+
+    if (thread->thread_state() == _thread_in_Java) {
+      do {
+        // Java thread running in Java code => find exception handler if any
+        // a fault inside compiled code, the interpreter, or a stub
+
+        if ((sig == SIGSEGV) && checkPollingPage(pc, (address)info->si_addr, &stub)) {
+          break;
+        }
+
+        if ((sig == SIGBUS) && checkByteBuffer(pc, &stub)) {
+          break;
+        }
+
+        if ((sig == SIGSEGV || sig == SIGBUS) &&
+            checkVerifyOops(pc, (address)info->si_addr, &stub)) {
+          break;
+        }
+
+        if ((sig == SIGSEGV) && checkZombie(uc, &pc, &stub)) {
+          break;
+        }
+
+        if ((sig == SIGILL) && checkICMiss(uc, &pc, &stub)) {
+          break;
+        }
+
+        if ((sig == SIGFPE) && checkFPFault(pc, info->si_code, thread, &stub)) {
+          break;
+        }
+
+        if ((sig == SIGSEGV) &&
+            checkNullPointer(pc, (intptr_t)info->si_addr, thread, &stub)) {
+          break;
+        }
+      } while (0);
+
+      // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
+      // and the heap gets shrunk before the field access.
+      if ((sig == SIGSEGV) || (sig == SIGBUS)) {
+        checkFastJNIAccess(pc, &stub);
+      }
+    }
+
+    if (stub != NULL) {
+      // save all thread context in case we need to restore it
+      thread->set_saved_exception_pc(pc);
+      thread->set_saved_exception_npc(npc);
+      set_cont_address(uc, stub);
+      return true;
+    }
+  }
+
+  // signal-chaining
+  if (os::Linux::chained_handler(sig, info, ucVoid)) {
+    return true;
+  }
+
+  if (!abort_if_unrecognized) {
+    // caller wants another chance, so give it to him
+    return false;
+  }
+
+  if (pc == NULL && uc != NULL) {
+    pc = os::Linux::ucontext_get_pc((ucontext_t*)uc);
+  }
+
+  // unmask current signal
+  sigset_t newset;
+  sigemptyset(&newset);
+  sigaddset(&newset, sig);
+  sigprocmask(SIG_UNBLOCK, &newset, NULL);
+
+  VMError err(t, sig, pc, info, ucVoid);
+  err.report_and_die();
+
+  ShouldNotReachHere();
+}
+
+void os::Linux::init_thread_fpu_state(void) {
+  // Nothing to do
+}
+
+int os::Linux::get_fpu_control_word() {
+  return 0;
+}
+
+void os::Linux::set_fpu_control_word(int fpu) {
+  // nothing
+}
+
+bool os::is_allocatable(size_t bytes) {
+#ifdef _LP64
+  return true;
+#else
+  if (bytes < 2 * G) {
+    return true;
+  }
+
+  char* addr = reserve_memory(bytes, NULL);
+
+  if (addr != NULL) {
+    release_memory(addr, bytes);
+  }
+
+  return addr != NULL;
+#endif // _LP64
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// thread stack
+
+size_t os::Linux::min_stack_allowed  = 128 * K;
+
+// pthread on Ubuntu is always in floating stack mode
+bool os::Linux::supports_variable_stack_size() {  return true; }
+
+// return default stack size for thr_type
+size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
+  // default stack size (compiler thread needs larger stack)
+  size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
+  return s;
+}
+
+size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
+  // Creating guard page is very expensive. Java thread has HotSpot
+  // guard page, only enable glibc guard page for non-Java threads.
+  return (thr_type == java_thread ? 0 : page_size());
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.hpp	Fri May 09 05:26:59 2008 -0700
@@ -0,0 +1,46 @@
+/*
+ * Copyright 1999-2004 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+  //
+  // NOTE: we are back in class os here, not Linux
+  //
+  static jint  (*atomic_xchg_func)        (jint,  volatile jint*);
+  static jint  (*atomic_cmpxchg_func)     (jint,  volatile jint*,  jint);
+  static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
+  static jint  (*atomic_add_func)         (jint,  volatile jint*);
+  static void  (*fence_func)              ();
+
+  static jint  atomic_xchg_bootstrap        (jint,  volatile jint*);
+  static jint  atomic_cmpxchg_bootstrap     (jint,  volatile jint*,  jint);
+  static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
+  static jint  atomic_add_bootstrap         (jint,  volatile jint*);
+  static void  fence_bootstrap              ();
+
+  static void setup_fpu() {}
+
+  static bool is_allocatable(size_t bytes);
+
+  // Used to register dynamic code cache area with the OS
+  // Note: Currently only used in 64 bit Windows implementations
+  static bool register_code_area(char *low, char *high) { return true; }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_sparc/vm/prefetch_linux_sparc.inline.hpp	Fri May 09 05:26:59 2008 -0700
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#if defined(COMPILER2) || defined(_LP64)
+
+inline void Prefetch::read(void *loc, intx interval) {
+  __asm__ volatile("prefetch [%0+%1], 0" : : "r" (loc), "r" (interval) : "memory" );
+}
+
+inline void Prefetch::write(void *loc, intx interval) {
+  __asm__ volatile("prefetch [%0+%1], 2" : : "r" (loc), "r" (interval) : "memory" );
+}
+
+#else
+
+inline void Prefetch::read (void *loc, intx interval) {}
+inline void Prefetch::write(void *loc, intx interval) {}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_sparc/vm/threadLS_linux_sparc.cpp	Fri May 09 05:26:59 2008 -0700
@@ -0,0 +1,37 @@
+/*
+ * Copyright 1998-2003 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_threadLS_linux_sparc.cpp.incl"
+
+void ThreadLocalStorage::generate_code_for_get_thread() {
+}
+
+void ThreadLocalStorage::pd_init() {
+   // Nothing to do
+}
+
+void ThreadLocalStorage::pd_set_thread(Thread* thread) {
+  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_sparc/vm/threadLS_linux_sparc.hpp	Fri May 09 05:26:59 2008 -0700
@@ -0,0 +1,28 @@
+/*
+ * Copyright 1998-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+public:
+  static Thread* thread() {
+    return (Thread*) os::thread_local_storage_at(thread_index());
+  }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_sparc/vm/thread_linux_sparc.cpp	Fri May 09 05:26:59 2008 -0700
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2003-2004 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_thread_linux_sparc.cpp.incl"
+
+// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
+// currently interrupted by SIGPROF
+bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
+                                                     void* ucontext,
+                                                     bool isInJava) {
+  assert(Thread::current() == this, "caller must be current thread");
+  assert(this->is_Java_thread(), "must be JavaThread");
+
+  JavaThread* jt = (JavaThread *)this;
+
+  if (!isInJava) {
+    // make_walkable flushes register windows and grabs last_Java_pc
+    // which can not be done if the ucontext sp matches last_Java_sp
+    // stack walking utilities assume last_Java_pc set if marked flushed
+    jt->frame_anchor()->make_walkable(jt);
+  }
+
+  // If we have a walkable last_Java_frame, then we should use it
+  // even if isInJava == true. It should be more reliable than
+  // ucontext info.
+  if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) {
+    *fr_addr = jt->pd_last_frame();
+    return true;
+  }
+
+  ucontext_t* uc = (ucontext_t*) ucontext;
+
+  // At this point, we don't have a walkable last_Java_frame, so
+  // we try to glean some information out of the ucontext.
+  intptr_t* ret_sp;
+  ExtendedPC addr =
+    os::fetch_frame_from_context(uc, &ret_sp,
+                                 NULL /* ret_fp only used on X86 */);
+  if (addr.pc() == NULL || ret_sp == NULL) {
+    // ucontext wasn't useful
+    return false;
+  }
+
+  // we were running Java code when SIGPROF came in
+  if (isInJava) {
+    // If we have a last_Java_sp, then the SIGPROF signal caught us
+    // right when we were transitioning from _thread_in_Java to a new
+    // JavaThreadState. We use last_Java_sp instead of the sp from
+    // the ucontext since it should be more reliable.
+    if (jt->has_last_Java_frame()) {
+      ret_sp = jt->last_Java_sp();
+    }
+    // Implied else: we don't have a last_Java_sp so we use what we
+    // got from the ucontext.
+
+    frame ret_frame(ret_sp, frame::unpatchable, addr.pc());
+    if (!ret_frame.safe_for_sender(jt)) {
+      // nothing else to try if the frame isn't good
+      return false;
+    }
+    *fr_addr = ret_frame;
+    return true;
+  }
+
+  // At this point, we know we weren't running Java code. We might
+  // have a last_Java_sp, but we don't have a walkable frame.
+  // However, we might still be able to construct something useful
+  // if the thread was running native code.
+  if (jt->has_last_Java_frame()) {
+    assert(!jt->frame_anchor()->walkable(), "case covered above");
+
+    if (jt->thread_state() == _thread_in_native) {
+      frame ret_frame(jt->last_Java_sp(), frame::unpatchable, addr.pc());
+      if (!ret_frame.safe_for_sender(jt)) {
+        // nothing else to try if the frame isn't good
+        return false;
+      }
+      *fr_addr = ret_frame;
+      return true;
+    }
+  }
+
+  // nothing else to try
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_sparc/vm/thread_linux_sparc.hpp	Fri May 09 05:26:59 2008 -0700
@@ -0,0 +1,98 @@
+/*
+ * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+private:
+
+  void pd_initialize() {
+    _anchor.clear();
+    _base_of_stack_pointer        = NULL;
+  }
+
+  frame pd_last_frame() {
+    assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+    assert(_anchor.walkable(), "thread has not dumped its register windows yet");
+
+    assert(_anchor.last_Java_pc() != NULL, "Ack no pc!");
+    return frame(last_Java_sp(), frame::unpatchable, _anchor.last_Java_pc());
+  }
+
+  // Sometimes the trap handler needs to record both PC and NPC.
+  // This is a SPARC-specific companion to Thread::set_saved_exception_pc.
+  address _saved_exception_npc;
+
+  // In polling_page_safepoint_handler_blob(s) we have to tail call other
+  // blobs without blowing any registers.  A tail call requires some
+  // register to jump with and we can't blow any registers, so it must
+  // be restored in the delay slot.  'restore' cannot be used as it
+  // will chop the heads off of 64-bit %o registers in the 32-bit
+  // build.  Instead we reload the registers using G2_thread and this
+  // location.  Must be 64bits in the 32-bit LION build.
+  jdouble _o_reg_temps[6];
+
+  // a stack pointer older than any java frame stack pointer.  It is
+  // used to validate stack pointers in frame::next_younger_sp (it
+  // provides the upper bound in the range check).  This is necessary
+  // on Solaris/SPARC since the ucontext passed to a signal handler is
+  // sometimes corrupt and we need a way to check the extracted sp.
+  intptr_t* _base_of_stack_pointer;
+
+public:
+
+  static int o_reg_temps_offset_in_bytes() { return offset_of(JavaThread, _o_reg_temps); }
+
+#ifndef _LP64
+  address o_reg_temps(int i) { return (address)&_o_reg_temps[i]; }
+#endif
+
+  static int saved_exception_npc_offset_in_bytes() { return offset_of(JavaThread,_saved_exception_npc); }
+
+  address  saved_exception_npc()             { return _saved_exception_npc; }
+  void set_saved_exception_npc(address a)    { _saved_exception_npc = a; }
+
+
+public:
+
+  intptr_t* base_of_stack_pointer() { return _base_of_stack_pointer; }
+
+  void set_base_of_stack_pointer(intptr_t* base_sp) {
+    _base_of_stack_pointer = base_sp;
+  }
+
+  void record_base_of_stack_pointer() {
+    intptr_t *sp = (intptr_t *)(((intptr_t)StubRoutines::Sparc::flush_callers_register_windows_func()()));
+    intptr_t *ysp;
+    while((ysp = (intptr_t*)sp[FP->sp_offset_in_saved_window()]) != NULL) {
+      sp = (intptr_t *)((intptr_t)ysp + STACK_BIAS);
+    }
+    _base_of_stack_pointer = sp;
+  }
+
+  bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
+    bool isInJava);
+
+  // These routines are only used on cpu architectures that
+  // have separate register stacks (Itanium).
+  static bool register_stack_overflow() { return false; }
+  static void enable_register_stack_guard() {}
+  static void disable_register_stack_guard() {}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_sparc/vm/vmStructs_linux_sparc.hpp	Fri May 09 05:26:59 2008 -0700
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// These are the OS and CPU-specific fields, types and integer
+// constants required by the Serviceability Agent. This file is
+// referenced by vmStructs.cpp.
+
+#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field, last_entry) \
+                                                                                                                                     \
+  /******************************/                                                                                                   \
+  /* Threads (NOTE: incomplete) */                                                                                                   \
+  /******************************/                                                                                                   \
+                                                                                                                                     \
+  nonstatic_field(JavaThread,                  _base_of_stack_pointer,                        intptr_t*)                             \
+  nonstatic_field(OSThread,                    _thread_id,                                    pid_t)                                 \
+  nonstatic_field(OSThread,                    _pthread_id,                                   pthread_t)                             \
+  /* This must be the last entry, and must be present */                                                                             \
+  last_entry()
+
+
+#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type, last_entry) \
+                                                                          \
+  /**********************/                                                \
+  /* POSIX Thread IDs */                                                  \
+  /**********************/                                                \
+                                                                          \
+  declare_integer_type(pid_t)                                             \
+  declare_unsigned_integer_type(pthread_t)                                \
+                                                                          \
+  /* This must be the last entry, and must be present */                  \
+  last_entry()
+
+
+#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
+                                                                        \
+  /************************/                                            \
+  /* JavaThread constants */                                            \
+  /************************/                                            \
+                                                                        \
+  declare_constant(JavaFrameAnchor::flushed)                            \
+                                                                        \
+  /* This must be the last entry, and must be present */                \
+  last_entry()
+
+#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
+                                                                        \
+  /* This must be the last entry, and must be present */                \
+  last_entry()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp	Fri May 09 05:26:59 2008 -0700
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_vm_version_linux_sparc.cpp.incl"
+
+static bool detect_niagara() {
+  char cpu[128];
+  bool rv = false;
+
+  FILE* fp = fopen("/proc/cpuinfo", "r");
+  if (fp == NULL) {
+    return rv;
+  }
+
+  while (!feof(fp)) {
+    if (fscanf(fp, "cpu\t\t: %100[^\n]", &cpu) == 1) {
+      if (strstr(cpu, "Niagara") != NULL) {
+        rv = true;
+      }
+      break;
+    }
+  }
+
+  fclose(fp);
+
+  return rv;
+}
+
+int VM_Version::platform_features(int features) {
+  // Default to generic v9
+  features = generic_v9_m;
+
+  if (detect_niagara()) {
+    NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on Niagara");)
+    features = niagara1_m;
+  }
+
+  return features;
+}
--- a/src/share/vm/classfile/classFileParser.cpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/classfile/classFileParser.cpp	Fri May 09 05:26:59 2008 -0700
@@ -34,7 +34,7 @@
 
 #define JAVA_CLASSFILE_MAGIC              0xCAFEBABE
 #define JAVA_MIN_SUPPORTED_VERSION        45
-#define JAVA_MAX_SUPPORTED_VERSION        50
+#define JAVA_MAX_SUPPORTED_VERSION        51
 #define JAVA_MAX_SUPPORTED_MINOR_VERSION  0
 
 // Used for two backward compatibility reasons:
--- a/src/share/vm/classfile/verifier.cpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/classfile/verifier.cpp	Fri May 09 05:26:59 2008 -0700
@@ -1205,7 +1205,7 @@
         case Bytecodes::_arraylength :
           type = current_frame.pop_stack(
             VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!type.is_array()) {
+          if (!(type.is_null() || type.is_array())) {
             verify_error(bci, bad_type_msg, "arraylength");
           }
           current_frame.push_stack(
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp	Fri May 09 05:26:59 2008 -0700
@@ -44,52 +44,12 @@
   bool lock_owned = lock->owned_by_self();
   if (lock_owned) {
     MutexUnlocker mul(lock);
-    return mem_allocate_work(size);
+    return mem_allocate_in_gen(size, _gen);
   } else {
-    return mem_allocate_work(size);
+    return mem_allocate_in_gen(size, _gen);
   }
 }
 
-HeapWord* CMSPermGen::mem_allocate_work(size_t size) {
-  assert(!_gen->freelistLock()->owned_by_self(), "Potetntial deadlock");
-
-  MutexLocker ml(Heap_lock);
-  HeapWord* obj = NULL;
-
-  obj = _gen->allocate(size, false);
-  // Since we want to minimize pause times, we will prefer
-  // expanding the perm gen rather than doing a stop-world
-  // collection to satisfy the allocation request.
-  if (obj == NULL) {
-    // Try to expand the perm gen and allocate space.
-    obj = _gen->expand_and_allocate(size, false, false);
-    if (obj == NULL) {
-      // Let's see if a normal stop-world full collection will
-      // free up enough space.
-      SharedHeap::heap()->collect_locked(GCCause::_permanent_generation_full);
-      obj = _gen->allocate(size, false);
-      if (obj == NULL) {
-        // The collection above may have shrunk the space, so try
-        // to expand again and allocate space.
-        obj = _gen->expand_and_allocate(size, false, false);
-      }
-      if (obj == NULL) {
-        // We have not been able to allocate space despite a
-        // full stop-world collection. We now make a last-ditch collection
-        // attempt (in which soft refs are all aggressively freed)
-        // that will try to reclaim as much space as possible.
-        SharedHeap::heap()->collect_locked(GCCause::_last_ditch_collection);
-        obj = _gen->allocate(size, false);
-        if (obj == NULL) {
-          // Expand generation in case it was shrunk following the collection.
-          obj = _gen->expand_and_allocate(size, false, false);
-        }
-      }
-    }
-  }
-  return obj;
-}
-
 void CMSPermGen::compute_new_size() {
   _gen->compute_new_size();
 }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp	Fri May 09 05:26:59 2008 -0700
@@ -29,7 +29,6 @@
 class CMSPermGen:  public PermGen {
   friend class VMStructs;
 
-  HeapWord* mem_allocate_work(size_t size);
  protected:
   // The "generation" view.
   ConcurrentMarkSweepGeneration* _gen;
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri May 09 05:26:59 2008 -0700
@@ -590,6 +590,31 @@
       full_gc_count = Universe::heap()->total_full_collections();
 
       result = perm_gen()->allocate_permanent(size);
+
+      if (result != NULL) {
+        return result;
+      }
+
+      if (GC_locker::is_active_and_needs_gc()) {
+        // If this thread is not in a jni critical section, we stall
+        // the requestor until the critical section has cleared and
+        // GC allowed. When the critical section clears, a GC is
+        // initiated by the last thread exiting the critical section; so
+        // we retry the allocation sequence from the beginning of the loop,
+        // rather than causing more, now probably unnecessary, GC attempts.
+        JavaThread* jthr = JavaThread::current();
+        if (!jthr->in_critical()) {
+          MutexUnlocker mul(Heap_lock);
+          GC_locker::stall_until_clear();
+          continue;
+        } else {
+          if (CheckJNICalls) {
+            fatal("Possible deadlock due to allocating while"
+                  " in jni critical section");
+          }
+          return NULL;
+        }
+      }
     }
 
     if (result == NULL) {
@@ -622,6 +647,12 @@
       if (op.prologue_succeeded()) {
         assert(Universe::heap()->is_in_permanent_or_null(op.result()),
           "result not in heap");
+        // If GC was locked out during VM operation then retry allocation
+        // and/or stall as necessary.
+        if (op.gc_locked()) {
+          assert(op.result() == NULL, "must be NULL if gc_locked() is true");
+          continue;  // retry and/or stall as necessary
+        }
         // If a NULL results is being returned, an out-of-memory
         // will be thrown now.  Clear the gc_time_limit_exceeded
         // flag to avoid the following situation.
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Fri May 09 05:26:59 2008 -0700
@@ -169,8 +169,9 @@
   size_t large_typearray_limit() { return FastAllocateSizeLimit; }
 
   bool supports_inline_contig_alloc() const { return !UseNUMA; }
-  HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : NULL; }
-  HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : NULL; }
+
+  HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
+  HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
 
   void ensure_parsability(bool retire_tlabs);
   void accumulate_statistics_all_tlabs();
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri May 09 05:26:59 2008 -0700
@@ -976,7 +976,7 @@
   DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
 
   // Increment the invocation count
-  heap->increment_total_collections();
+  heap->increment_total_collections(true);
 
   // We need to track unique mark sweep invocations as well.
   _total_invocations++;
@@ -1941,7 +1941,7 @@
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
   assert(ref_processor() != NULL, "Sanity");
 
-  if (GC_locker::is_active()) {
+  if (GC_locker::check_active_before_gc()) {
     return;
   }
 
--- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Fri May 09 05:26:59 2008 -0700
@@ -69,6 +69,9 @@
 
   GCCauseSetter gccs(heap, _gc_cause);
   _result = heap->failed_permanent_mem_allocate(_size);
+  if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
+    set_gc_locked();
+  }
   notify_gc_end();
 }
 
--- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Fri May 09 05:26:59 2008 -0700
@@ -46,9 +46,11 @@
   for (int i = 0; i < lgrp_spaces()->length(); i++) {
     LGRPSpace *ls = lgrp_spaces()->at(i);
     MutableSpace *s = ls->space();
-    HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
-    if (top < s->end()) {
-      ls->add_invalid_region(MemRegion(top, s->end()));
+    if (!os::numa_has_static_binding()) {
+      HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
+      if (top < s->end()) {
+        ls->add_invalid_region(MemRegion(top, s->end()));
+      }
     }
     s->mangle_unused_area();
   }
@@ -70,32 +72,36 @@
                                     area_touched_words);
         }
 #endif
-        MemRegion invalid;
-        HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size());
-        HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words),
-                                                     os::vm_page_size());
-        if (crossing_start != crossing_end) {
-          // If object header crossed a small page boundary we mark the area
-          // as invalid rounding it to a page_size().
-          HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
-          HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()),
-                               s->end());
-          invalid = MemRegion(start, end);
+        if (!os::numa_has_static_binding()) {
+          MemRegion invalid;
+          HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size());
+          HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words),
+                                                       os::vm_page_size());
+          if (crossing_start != crossing_end) {
+            // If object header crossed a small page boundary we mark the area
+            // as invalid rounding it to a page_size().
+            HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
+            HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()),
+                                 s->end());
+            invalid = MemRegion(start, end);
+          }
+
+          ls->add_invalid_region(invalid);
         }
-
-        ls->add_invalid_region(invalid);
         s->set_top(s->end());
       }
     } else {
+      if (!os::numa_has_static_binding()) {
 #ifdef ASSERT
-      MemRegion invalid(s->top(), s->end());
-      ls->add_invalid_region(invalid);
-#else
-      if (ZapUnusedHeapArea) {
         MemRegion invalid(s->top(), s->end());
         ls->add_invalid_region(invalid);
-      } else break;
+#else
+        if (ZapUnusedHeapArea) {
+          MemRegion invalid(s->top(), s->end());
+          ls->add_invalid_region(invalid);
+        } else break;
 #endif
+      }
     }
   }
 }
@@ -194,7 +200,7 @@
 }
 
 // Bias region towards the first-touching lgrp. Set the right page sizes.
-void MutableNUMASpace::bias_region(MemRegion mr) {
+void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
   HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
   HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
   if (end > start) {
@@ -202,9 +208,13 @@
     assert((intptr_t)aligned_region.start()     % page_size() == 0 &&
            (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
     assert(region().contains(aligned_region), "Sanity");
-    os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
+    // First we tell the OS which page size we want in the given range. The underlying
+    // large page can be broken down if we require small pages.
     os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
-    os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size());
+    // Then we uncommit the pages in the range.
+    os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
+    // And make them local/first-touch biased.
+    os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id);
   }
 }
 
@@ -233,10 +243,12 @@
     initialize(region(), true);
   } else {
     bool should_initialize = false;
-    for (int i = 0; i < lgrp_spaces()->length(); i++) {
-      if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) {
-        should_initialize = true;
-        break;
+    if (!os::numa_has_static_binding()) {
+      for (int i = 0; i < lgrp_spaces()->length(); i++) {
+        if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) {
+          should_initialize = true;
+          break;
+        }
       }
     }
 
@@ -472,8 +484,8 @@
       intersection = MemRegion(new_region.start(), new_region.start());
     }
     select_tails(new_region, intersection, &bottom_region, &top_region);
-    bias_region(bottom_region);
-    bias_region(top_region);
+    bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id());
+    bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id());
   }
 
   // Check if the space layout has changed significantly?
@@ -545,22 +557,37 @@
       intersection = MemRegion(new_region.start(), new_region.start());
     }
 
-    MemRegion invalid_region = ls->invalid_region().intersection(new_region);
-    if (!invalid_region.is_empty()) {
-      merge_regions(new_region, &intersection, &invalid_region);
-      free_region(invalid_region);
+    if (!os::numa_has_static_binding()) {
+      MemRegion invalid_region = ls->invalid_region().intersection(new_region);
+      // Invalid region is a range of memory that could've possibly
+      // been allocated on the other node. That's relevant only on Solaris where
+      // there is no static memory binding.
+      if (!invalid_region.is_empty()) {
+        merge_regions(new_region, &intersection, &invalid_region);
+        free_region(invalid_region);
+        ls->set_invalid_region(MemRegion());
+      }
     }
+
     select_tails(new_region, intersection, &bottom_region, &top_region);
-    free_region(bottom_region);
-    free_region(top_region);
+
+    if (!os::numa_has_static_binding()) {
+      // If that's a system with the first-touch policy then it's enough
+      // to free the pages.
+      free_region(bottom_region);
+      free_region(top_region);
+    } else {
+      // In a system with static binding we have to change the bias whenever
+      // we reshape the heap.
+      bias_region(bottom_region, ls->lgrp_id());
+      bias_region(top_region, ls->lgrp_id());
+    }
 
     // If we clear the region, we would mangle it in debug. That would cause page
     // allocation in a different place. Hence setting the top directly.
     s->initialize(new_region, false);
     s->set_top(s->bottom());
 
-    ls->set_invalid_region(MemRegion());
-
     set_adaptation_cycles(samples_count());
   }
 }
@@ -575,7 +602,7 @@
     HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
 
     if (s->contains(value)) {
-      if (top < value && top < s->end()) {
+      if (!os::numa_has_static_binding() && top < value && top < s->end()) {
         ls->add_invalid_region(MemRegion(top, value));
       }
       s->set_top(value);
@@ -584,10 +611,10 @@
         if (found_top) {
             s->set_top(s->bottom());
         } else {
-            if (top < s->end()) {
-              ls->add_invalid_region(MemRegion(top, s->end()));
-            }
-            s->set_top(s->end());
+          if (!os::numa_has_static_binding() && top < s->end()) {
+            ls->add_invalid_region(MemRegion(top, s->end()));
+          }
+          s->set_top(s->end());
         }
     }
   }
@@ -601,11 +628,23 @@
   }
 }
 
+/*
+   Linux supports static memory binding, therefore the most part of the
+   logic dealing with the possible invalid page allocation is effectively
+   disabled. Besides there is no notion of the home node in Linux. A
+   thread is allowed to migrate freely. Although the scheduler is rather
+   reluctant to move threads between the nodes. We check for the current
+   node every allocation. And with a high probability a thread stays on
+   the same node for some time allowing local access to recently allocated
+   objects.
+ */
+
 HeapWord* MutableNUMASpace::allocate(size_t size) {
-  int lgrp_id = Thread::current()->lgrp_id();
-  if (lgrp_id == -1) {
+  Thread* thr = Thread::current();
+  int lgrp_id = thr->lgrp_id();
+  if (lgrp_id == -1 || !os::numa_has_group_homing()) {
     lgrp_id = os::numa_get_group_id();
-    Thread::current()->set_lgrp_id(lgrp_id);
+    thr->set_lgrp_id(lgrp_id);
   }
 
   int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
@@ -628,22 +667,22 @@
       MutableSpace::set_top(s->top());
     }
   }
-  // Make the page allocation happen here.
-  if (p != NULL) {
+  // Make the page allocation happen here if there is no static binding..
+  if (p != NULL && !os::numa_has_static_binding()) {
     for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
       *(int*)i = 0;
     }
   }
-
   return p;
 }
 
 // This version is lock-free.
 HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
-  int lgrp_id = Thread::current()->lgrp_id();
-  if (lgrp_id == -1) {
+  Thread* thr = Thread::current();
+  int lgrp_id = thr->lgrp_id();
+  if (lgrp_id == -1 || !os::numa_has_group_homing()) {
     lgrp_id = os::numa_get_group_id();
-    Thread::current()->set_lgrp_id(lgrp_id);
+    thr->set_lgrp_id(lgrp_id);
   }
 
   int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
@@ -670,8 +709,8 @@
     }
   }
 
-  // Make the page allocation happen here.
-  if (p != NULL) {
+  // Make the page allocation happen here if there is no static binding.
+  if (p != NULL && !os::numa_has_static_binding() ) {
     for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
       *(int*)i = 0;
     }
--- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp	Fri May 09 05:26:59 2008 -0700
@@ -139,8 +139,8 @@
   // Check if the NUMA topology has changed. Add and remove spaces if needed.
   // The update can be forced by setting the force parameter equal to true.
   bool update_layout(bool force);
-  // Bias region towards the first-touching lgrp.
-  void bias_region(MemRegion mr);
+  // Bias region towards the lgrp.
+  void bias_region(MemRegion mr, int lgrp_id);
   // Free pages in a given region.
   void free_region(MemRegion mr);
   // Get current chunk size.
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Fri May 09 05:26:59 2008 -0700
@@ -144,3 +144,18 @@
   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
   notify_gc_end();
 }
+
+void VM_GenCollectForPermanentAllocation::doit() {
+  JvmtiGCForAllocationMarker jgcm;
+  notify_gc_begin(true);
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  GCCauseSetter gccs(gch, _gc_cause);
+  gch->do_full_collection(gch->must_clear_all_soft_refs(),
+                          gch->n_gens() - 1);
+  _res = gch->perm_gen()->allocate(_size, false);
+  assert(gch->is_in_reserved_or_null(_res), "result not in heap");
+  if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
+    set_gc_locked();
+  }
+  notify_gc_end();
+}
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Fri May 09 05:26:59 2008 -0700
@@ -43,6 +43,7 @@
 //     is specified; and also the attach "inspectheap" operation
 //
 //  VM_GenCollectForAllocation
+//  VM_GenCollectForPermanentAllocation
 //  VM_ParallelGCFailedAllocation
 //  VM_ParallelGCFailedPermanentAllocation
 //   - this operation is invoked when allocation is failed;
@@ -166,3 +167,23 @@
   virtual VMOp_Type type() const { return VMOp_GenCollectFull; }
   virtual void doit();
 };
+
+class VM_GenCollectForPermanentAllocation: public VM_GC_Operation {
+ private:
+  HeapWord*   _res;
+  size_t      _size;                       // size of object to be allocated
+ public:
+  VM_GenCollectForPermanentAllocation(size_t size,
+                                      unsigned int gc_count_before,
+                                      unsigned int full_gc_count_before,
+                                      GCCause::Cause gc_cause)
+    : VM_GC_Operation(gc_count_before, full_gc_count_before, true),
+      _size(size) {
+    _res = NULL;
+    _gc_cause = gc_cause;
+  }
+  ~VM_GenCollectForPermanentAllocation()  {}
+  virtual VMOp_Type type() const { return VMOp_GenCollectForPermanentAllocation; }
+  virtual void doit();
+  HeapWord* result() const       { return _res; }
+};
--- a/src/share/vm/includeDB_core	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/includeDB_core	Fri May 09 05:26:59 2008 -0700
@@ -718,6 +718,11 @@
 ciObjArray.cpp                          ciUtilities.hpp
 ciObjArray.cpp                          objArrayOop.hpp
 
+ciObjArray.cpp                          ciObjArray.hpp
+ciObjArray.cpp                          ciNullObject.hpp
+ciObjArray.cpp                          ciUtilities.hpp
+ciObjArray.cpp                          objArrayOop.hpp
+
 ciObjArrayKlass.cpp                     ciInstanceKlass.hpp
 ciObjArrayKlass.cpp                     ciObjArrayKlass.hpp
 ciObjArrayKlass.cpp                     ciObjArrayKlassKlass.hpp
@@ -1662,6 +1667,7 @@
 
 gcLocker.cpp                            gcLocker.inline.hpp
 gcLocker.cpp                            sharedHeap.hpp
+gcLocker.cpp                            resourceArea.hpp
 
 gcLocker.hpp                            collectedHeap.hpp
 gcLocker.hpp                            genCollectedHeap.hpp
@@ -3094,13 +3100,14 @@
 oopMap.cpp                              signature.hpp
 
 oopMap.hpp                              allocation.hpp
+oopMapCache.cpp                         jvmtiRedefineClassesTrace.hpp
 oopMap.hpp                              compressedStream.hpp
 oopMap.hpp                              growableArray.hpp
 oopMap.hpp                              vmreg.hpp
 
 oopMapCache.cpp                         allocation.inline.hpp
+oopMapCache.cpp                         jvmtiRedefineClassesTrace.hpp
 oopMapCache.cpp                         handles.inline.hpp
-oopMapCache.cpp                         jvmtiRedefineClassesTrace.hpp
 oopMapCache.cpp                         oop.inline.hpp
 oopMapCache.cpp                         oopMapCache.hpp
 oopMapCache.cpp                         resourceArea.hpp
@@ -3207,6 +3214,7 @@
 os_<os_family>.cpp                      extendedPC.hpp
 os_<os_family>.cpp                      filemap.hpp
 os_<os_family>.cpp                      globals.hpp
+os_<os_family>.cpp                      growableArray.hpp
 os_<os_family>.cpp                      hpi.hpp
 os_<os_family>.cpp                      icBuffer.hpp
 os_<os_family>.cpp                      interfaceSupport.hpp
@@ -3348,6 +3356,10 @@
 permGen.cpp                             oop.inline.hpp
 permGen.cpp                             permGen.hpp
 permGen.cpp                             universe.hpp
+permGen.cpp                             gcLocker.hpp
+permGen.cpp                             gcLocker.inline.hpp
+permGen.cpp                             vmGCOperations.hpp
+permGen.cpp                             vmThread.hpp
 
 permGen.hpp                             gcCause.hpp
 permGen.hpp                             generation.hpp
--- a/src/share/vm/memory/gcLocker.cpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/memory/gcLocker.cpp	Fri May 09 05:26:59 2008 -0700
@@ -32,6 +32,12 @@
 
 void GC_locker::stall_until_clear() {
   assert(!JavaThread::current()->in_critical(), "Would deadlock");
+  if (PrintJNIGCStalls && PrintGCDetails) {
+    ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
+    gclog_or_tty->print_cr(
+      "Allocation failed. Thread \"%s\" is stalled by JNI critical section.",
+      JavaThread::current()->name());
+  }
   MutexLocker   ml(JNICritical_lock);
   // Wait for _needs_gc  to be cleared
   while (GC_locker::needs_gc()) {
--- a/src/share/vm/memory/genCollectedHeap.hpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Fri May 09 05:26:59 2008 -0700
@@ -35,6 +35,7 @@
   friend class CMSCollector;
   friend class GenMarkSweep;
   friend class VM_GenCollectForAllocation;
+  friend class VM_GenCollectForPermanentAllocation;
   friend class VM_GenCollectFull;
   friend class VM_GenCollectFullConcurrent;
   friend class VM_GC_HeapInspection;
--- a/src/share/vm/memory/permGen.cpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/memory/permGen.cpp	Fri May 09 05:26:59 2008 -0700
@@ -25,6 +25,70 @@
 #include "incls/_precompiled.incl"
 #include "incls/_permGen.cpp.incl"
 
+HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
+  MutexLocker ml(Heap_lock);
+  GCCause::Cause next_cause = GCCause::_permanent_generation_full;
+  GCCause::Cause prev_cause = GCCause::_no_gc;
+
+  for (;;) {
+    HeapWord* obj = gen->allocate(size, false);
+    if (obj != NULL) {
+      return obj;
+    }
+    if (gen->capacity() < _capacity_expansion_limit ||
+        prev_cause != GCCause::_no_gc) {
+      obj = gen->expand_and_allocate(size, false);
+    }
+    if (obj == NULL && prev_cause != GCCause::_last_ditch_collection) {
+      if (GC_locker::is_active_and_needs_gc()) {
+        // If this thread is not in a jni critical section, we stall
+        // the requestor until the critical section has cleared and
+        // GC allowed. When the critical section clears, a GC is
+        // initiated by the last thread exiting the critical section; so
+        // we retry the allocation sequence from the beginning of the loop,
+        // rather than causing more, now probably unnecessary, GC attempts.
+        JavaThread* jthr = JavaThread::current();
+        if (!jthr->in_critical()) {
+          MutexUnlocker mul(Heap_lock);
+          // Wait for JNI critical section to be exited
+          GC_locker::stall_until_clear();
+          continue;
+        } else {
+          if (CheckJNICalls) {
+            fatal("Possible deadlock due to allocating while"
+                  " in jni critical section");
+          }
+          return NULL;
+        }
+      }
+
+      // Read the GC count while holding the Heap_lock
+      unsigned int gc_count_before      = SharedHeap::heap()->total_collections();
+      unsigned int full_gc_count_before = SharedHeap::heap()->total_full_collections();
+      {
+        MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
+        VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before,
+                                               next_cause);
+        VMThread::execute(&op);
+        if (!op.prologue_succeeded() || op.gc_locked()) {
+          assert(op.result() == NULL, "must be NULL if gc_locked() is true");
+          continue;  // retry and/or stall as necessary
+        }
+        obj = op.result();
+        assert(obj == NULL || SharedHeap::heap()->is_in_reserved(obj),
+               "result not in heap");
+        if (obj != NULL) {
+          return obj;
+        }
+      }
+      prev_cause = next_cause;
+      next_cause = GCCause::_last_ditch_collection;
+    } else {
+      return obj;
+    }
+  }
+}
+
 CompactingPermGen::CompactingPermGen(ReservedSpace rs,
                                      ReservedSpace shared_rs,
                                      size_t initial_byte_size,
@@ -44,40 +108,7 @@
 }
 
 HeapWord* CompactingPermGen::mem_allocate(size_t size) {
-  MutexLocker ml(Heap_lock);
-  HeapWord* obj = _gen->allocate(size, false);
-  bool tried_collection = false;
-  bool tried_expansion = false;
-  while (obj == NULL) {
-    if (_gen->capacity() >= _capacity_expansion_limit || tried_expansion) {
-      // Expansion limit reached, try collection before expanding further
-      // For now we force a full collection, this could be changed
-      SharedHeap::heap()->collect_locked(GCCause::_permanent_generation_full);
-      obj = _gen->allocate(size, false);
-      tried_collection = true;
-      tried_expansion =  false;    // ... following the collection:
-                                   // the collection may have shrunk the space.
-    }
-    if (obj == NULL && !tried_expansion) {
-      obj = _gen->expand_and_allocate(size, false);
-      tried_expansion = true;
-    }
-    if (obj == NULL && tried_collection && tried_expansion) {
-      // We have not been able to allocate despite a collection and
-      // an attempted space expansion. We now make a last-ditch collection
-      // attempt that will try to reclaim as much space as possible (for
-      // example by aggressively clearing all soft refs).
-      SharedHeap::heap()->collect_locked(GCCause::_last_ditch_collection);
-      obj = _gen->allocate(size, false);
-      if (obj == NULL) {
-        // An expansion attempt is necessary since the previous
-        // collection may have shrunk the space.
-        obj = _gen->expand_and_allocate(size, false);
-      }
-      break;
-    }
-  }
-  return obj;
+  return mem_allocate_in_gen(size, _gen);
 }
 
 void CompactingPermGen::compute_new_size() {
--- a/src/share/vm/memory/permGen.hpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/memory/permGen.hpp	Fri May 09 05:26:59 2008 -0700
@@ -38,6 +38,8 @@
   size_t _capacity_expansion_limit;  // maximum expansion allowed without a
                                      // full gc occuring
 
+  HeapWord* mem_allocate_in_gen(size_t size, Generation* gen);
+
  public:
   enum Name {
     MarkSweepCompact, MarkSweep, ConcurrentMarkSweep
--- a/src/share/vm/oops/oop.hpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/oops/oop.hpp	Fri May 09 05:26:59 2008 -0700
@@ -138,6 +138,10 @@
   // Need this as public for garbage collection.
   template <class T> T* obj_field_addr(int offset) const;
 
+  // Oop encoding heap max
+  static const uint64_t OopEncodingHeapMax =
+              (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
+
   static bool is_null(oop obj);
   static bool is_null(narrowOop obj);
 
--- a/src/share/vm/oops/oop.inline.hpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/oops/oop.inline.hpp	Fri May 09 05:26:59 2008 -0700
@@ -134,8 +134,10 @@
 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
   assert(!is_null(v), "oop value can never be zero");
   address heap_base = Universe::heap_base();
-  uint64_t result = (uint64_t)(pointer_delta((void*)v, (void*)heap_base, 1) >> LogMinObjAlignmentInBytes);
-  assert((result & 0xffffffff00000000L) == 0, "narrow oop overflow");
+  uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)heap_base, 1));
+  assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
+  uint64_t result = pd >> LogMinObjAlignmentInBytes;
+  assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
   return (narrowOop)result;
 }
 
--- a/src/share/vm/runtime/arguments.cpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Fri May 09 05:26:59 2008 -0700
@@ -1125,6 +1125,11 @@
   }
 }
 
+inline uintx max_heap_for_compressed_oops() {
+  LP64_ONLY(return oopDesc::OopEncodingHeapMax - MaxPermSize - os::vm_page_size());
+  NOT_LP64(return DefaultMaxRAM);
+}
+
 bool Arguments::should_auto_select_low_pause_collector() {
   if (UseAutoGCSelectPolicy &&
       !FLAG_IS_DEFAULT(MaxGCPauseMillis) &&
@@ -1169,7 +1174,7 @@
   // field offset to determine free list chunk markers.
   // Check that UseCompressedOops can be set with the max heap size allocated
   // by ergonomics.
-  if (!UseConcMarkSweepGC && MaxHeapSize <= (32*G - os::vm_page_size())) {
+  if (!UseConcMarkSweepGC && MaxHeapSize <= max_heap_for_compressed_oops()) {
     if (FLAG_IS_DEFAULT(UseCompressedOops)) {
       FLAG_SET_ERGO(bool, UseCompressedOops, true);
     }
@@ -1205,7 +1210,10 @@
     if (FLAG_IS_DEFAULT(MaxHeapSize)) {
       const uint64_t reasonable_fraction =
         os::physical_memory() / DefaultMaxRAMFraction;
-      const uint64_t maximum_size = (uint64_t) DefaultMaxRAM;
+      const uint64_t maximum_size = (uint64_t)
+                 (FLAG_IS_DEFAULT(DefaultMaxRAM) && UseCompressedOops ?
+                     MIN2(max_heap_for_compressed_oops(), DefaultMaxRAM) :
+                     DefaultMaxRAM);
       size_t reasonable_max =
         (size_t) os::allocatable_physical_memory(reasonable_fraction);
       if (reasonable_max > maximum_size) {
--- a/src/share/vm/runtime/globals.hpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/runtime/globals.hpp	Fri May 09 05:26:59 2008 -0700
@@ -1928,6 +1928,10 @@
   develop(bool, IgnoreLibthreadGPFault, false,                              \
           "Suppress workaround for libthread GP fault")                     \
                                                                             \
+  product(bool, PrintJNIGCStalls, false,                                    \
+          "Print diagnostic message when GC is stalled"                     \
+          "by JNI critical section")                                        \
+                                                                            \
   /* JVMTI heap profiling */                                                \
                                                                             \
   diagnostic(bool, TraceJVMTIObjectTagging, false,                          \
--- a/src/share/vm/runtime/os.hpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/runtime/os.hpp	Fri May 09 05:26:59 2008 -0700
@@ -33,6 +33,7 @@
 class Event;
 class DLL;
 class FileHandle;
+template<class E> class GrowableArray;
 
 // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose
 
@@ -206,7 +207,9 @@
   static void   realign_memory(char *addr, size_t bytes, size_t alignment_hint);
 
   // NUMA-specific interface
-  static void   numa_make_local(char *addr, size_t bytes);
+  static bool   numa_has_static_binding();
+  static bool   numa_has_group_homing();
+  static void   numa_make_local(char *addr, size_t bytes, int lgrp_hint);
   static void   numa_make_global(char *addr, size_t bytes);
   static size_t numa_get_groups_num();
   static size_t numa_get_leaf_groups(int *ids, size_t size);
--- a/src/share/vm/runtime/vm_operations.hpp	Wed May 07 08:06:46 2008 -0700
+++ b/src/share/vm/runtime/vm_operations.hpp	Fri May 09 05:26:59 2008 -0700
@@ -49,6 +49,7 @@
   template(GenCollectFull)                        \
   template(GenCollectFullConcurrent)              \
   template(GenCollectForAllocation)               \
+  template(GenCollectForPermanentAllocation)      \
   template(ParallelGCFailedAllocation)            \
   template(ParallelGCFailedPermanentAllocation)   \
   template(ParallelGCSystemGC)                    \