comparison src/cpu/sparc/vm/sharedRuntime_sparc.cpp @ 4970:33df1aeaebbf

Merge with http://hg.openjdk.java.net/hsx/hsx24/hotspot/
author Thomas Wuerthinger <thomas.wuerthinger@oracle.com>
date Mon, 27 Feb 2012 13:10:13 +0100
parents 931e5f39e365
children 8a48c2906f91
comparison
equal deleted inserted replaced
4703:2cfb7fb2dce7 4970:33df1aeaebbf
1 /* 1 /*
2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
317 // a frame with no abi restrictions. Since we must observe abi restrictions 317 // a frame with no abi restrictions. Since we must observe abi restrictions
318 // (like the placement of the register window) the slots must be biased by 318 // (like the placement of the register window) the slots must be biased by
319 // the following value. 319 // the following value.
320 static int reg2offset(VMReg r) { 320 static int reg2offset(VMReg r) {
321 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 321 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
322 }
323
324 static VMRegPair reg64_to_VMRegPair(Register r) {
325 VMRegPair ret;
326 if (wordSize == 8) {
327 ret.set2(r->as_VMReg());
328 } else {
329 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
330 }
331 return ret;
322 } 332 }
323 333
324 // --------------------------------------------------------------------------- 334 // ---------------------------------------------------------------------------
325 // Read the array of BasicTypes from a signature, and compute where the 335 // Read the array of BasicTypes from a signature, and compute where the
326 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size) 336 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
1442 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1452 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1443 } 1453 }
1444 } 1454 }
1445 1455
1446 1456
1457 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1458 if (src.first()->is_stack()) {
1459 if (dst.first()->is_stack()) {
1460 // stack to stack
1461 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1462 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1463 } else {
1464 // stack to reg
1465 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1466 }
1467 } else if (dst.first()->is_stack()) {
1468 // reg to stack
1469 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1470 } else {
1471 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1472 }
1473 }
1474
1475
1447 // An oop arg. Must pass a handle not the oop itself 1476 // An oop arg. Must pass a handle not the oop itself
1448 static void object_move(MacroAssembler* masm, 1477 static void object_move(MacroAssembler* masm,
1449 OopMap* map, 1478 OopMap* map,
1450 int oop_handle_offset, 1479 int oop_handle_offset,
1451 int framesize_in_slots, 1480 int framesize_in_slots,
1746 __ mov(G2_thread, L7_thread_cache); 1775 __ mov(G2_thread, L7_thread_cache);
1747 *already_created = true; 1776 *already_created = true;
1748 } 1777 }
1749 } 1778 }
1750 1779
1780
1781 static void save_or_restore_arguments(MacroAssembler* masm,
1782 const int stack_slots,
1783 const int total_in_args,
1784 const int arg_save_area,
1785 OopMap* map,
1786 VMRegPair* in_regs,
1787 BasicType* in_sig_bt) {
1788 // if map is non-NULL then the code should store the values,
1789 // otherwise it should load them.
1790 if (map != NULL) {
1791 // Fill in the map
1792 for (int i = 0; i < total_in_args; i++) {
1793 if (in_sig_bt[i] == T_ARRAY) {
1794 if (in_regs[i].first()->is_stack()) {
1795 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1796 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1797 } else if (in_regs[i].first()->is_Register()) {
1798 map->set_oop(in_regs[i].first());
1799 } else {
1800 ShouldNotReachHere();
1801 }
1802 }
1803 }
1804 }
1805
1806 // Save or restore double word values
1807 int handle_index = 0;
1808 for (int i = 0; i < total_in_args; i++) {
1809 int slot = handle_index + arg_save_area;
1810 int offset = slot * VMRegImpl::stack_slot_size;
1811 if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) {
1812 const Register reg = in_regs[i].first()->as_Register();
1813 if (reg->is_global()) {
1814 handle_index += 2;
1815 assert(handle_index <= stack_slots, "overflow");
1816 if (map != NULL) {
1817 __ stx(reg, SP, offset + STACK_BIAS);
1818 } else {
1819 __ ldx(SP, offset + STACK_BIAS, reg);
1820 }
1821 }
1822 } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) {
1823 handle_index += 2;
1824 assert(handle_index <= stack_slots, "overflow");
1825 if (map != NULL) {
1826 __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
1827 } else {
1828 __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
1829 }
1830 }
1831 }
1832 // Save floats
1833 for (int i = 0; i < total_in_args; i++) {
1834 int slot = handle_index + arg_save_area;
1835 int offset = slot * VMRegImpl::stack_slot_size;
1836 if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) {
1837 handle_index++;
1838 assert(handle_index <= stack_slots, "overflow");
1839 if (map != NULL) {
1840 __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
1841 } else {
1842 __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
1843 }
1844 }
1845 }
1846
1847 }
1848
1849
1850 // Check GC_locker::needs_gc and enter the runtime if it's true. This
1851 // keeps a new JNI critical region from starting until a GC has been
1852 // forced. Save down any oops in registers and describe them in an
1853 // OopMap.
1854 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1855 const int stack_slots,
1856 const int total_in_args,
1857 const int arg_save_area,
1858 OopMapSet* oop_maps,
1859 VMRegPair* in_regs,
1860 BasicType* in_sig_bt) {
1861 __ block_comment("check GC_locker::needs_gc");
1862 Label cont;
1863 AddressLiteral sync_state(GC_locker::needs_gc_address());
1864 __ load_bool_contents(sync_state, G3_scratch);
1865 __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont);
1866 __ delayed()->nop();
1867
1868 // Save down any values that are live in registers and call into the
1869 // runtime to halt for a GC
1870 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1871 save_or_restore_arguments(masm, stack_slots, total_in_args,
1872 arg_save_area, map, in_regs, in_sig_bt);
1873
1874 __ mov(G2_thread, L7_thread_cache);
1875
1876 __ set_last_Java_frame(SP, noreg);
1877
1878 __ block_comment("block_for_jni_critical");
1879 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type);
1880 __ delayed()->mov(L7_thread_cache, O0);
1881 oop_maps->add_gc_map( __ offset(), map);
1882
1883 __ restore_thread(L7_thread_cache); // restore G2_thread
1884 __ reset_last_Java_frame();
1885
1886 // Reload all the register arguments
1887 save_or_restore_arguments(masm, stack_slots, total_in_args,
1888 arg_save_area, NULL, in_regs, in_sig_bt);
1889
1890 __ bind(cont);
1891 #ifdef ASSERT
1892 if (StressCriticalJNINatives) {
1893 // Stress register saving
1894 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1895 save_or_restore_arguments(masm, stack_slots, total_in_args,
1896 arg_save_area, map, in_regs, in_sig_bt);
1897 // Destroy argument registers
1898 for (int i = 0; i < total_in_args; i++) {
1899 if (in_regs[i].first()->is_Register()) {
1900 const Register reg = in_regs[i].first()->as_Register();
1901 if (reg->is_global()) {
1902 __ mov(G0, reg);
1903 }
1904 } else if (in_regs[i].first()->is_FloatRegister()) {
1905 __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
1906 }
1907 }
1908
1909 save_or_restore_arguments(masm, stack_slots, total_in_args,
1910 arg_save_area, NULL, in_regs, in_sig_bt);
1911 }
1912 #endif
1913 }
1914
1915 // Unpack an array argument into a pointer to the body and the length
1916 // if the array is non-null, otherwise pass 0 for both.
1917 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1918 // Pass the length, ptr pair
1919 Label is_null, done;
1920 if (reg.first()->is_stack()) {
1921 VMRegPair tmp = reg64_to_VMRegPair(L2);
1922 // Load the arg up from the stack
1923 move_ptr(masm, reg, tmp);
1924 reg = tmp;
1925 }
1926 __ cmp(reg.first()->as_Register(), G0);
1927 __ brx(Assembler::equal, false, Assembler::pt, is_null);
1928 __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4);
1929 move_ptr(masm, reg64_to_VMRegPair(L4), body_arg);
1930 __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4);
1931 move32_64(masm, reg64_to_VMRegPair(L4), length_arg);
1932 __ ba_short(done);
1933 __ bind(is_null);
1934 // Pass zeros
1935 move_ptr(masm, reg64_to_VMRegPair(G0), body_arg);
1936 move32_64(masm, reg64_to_VMRegPair(G0), length_arg);
1937 __ bind(done);
1938 }
1939
1751 // --------------------------------------------------------------------------- 1940 // ---------------------------------------------------------------------------
1752 // Generate a native wrapper for a given method. The method takes arguments 1941 // Generate a native wrapper for a given method. The method takes arguments
1753 // in the Java compiled code convention, marshals them to the native 1942 // in the Java compiled code convention, marshals them to the native
1754 // convention (handlizes oops, etc), transitions to native, makes the call, 1943 // convention (handlizes oops, etc), transitions to native, makes the call,
1755 // returns to java state (possibly blocking), unhandlizes any result and 1944 // returns to java state (possibly blocking), unhandlizes any result and
1760 int total_in_args, 1949 int total_in_args,
1761 int comp_args_on_stack, // in VMRegStackSlots 1950 int comp_args_on_stack, // in VMRegStackSlots
1762 BasicType *in_sig_bt, 1951 BasicType *in_sig_bt,
1763 VMRegPair *in_regs, 1952 VMRegPair *in_regs,
1764 BasicType ret_type) { 1953 BasicType ret_type) {
1954 bool is_critical_native = true;
1955 address native_func = method->critical_native_function();
1956 if (native_func == NULL) {
1957 native_func = method->native_function();
1958 is_critical_native = false;
1959 }
1960 assert(native_func != NULL, "must have function");
1765 1961
1766 // Native nmethod wrappers never take possesion of the oop arguments. 1962 // Native nmethod wrappers never take possesion of the oop arguments.
1767 // So the caller will gc the arguments. The only thing we need an 1963 // So the caller will gc the arguments. The only thing we need an
1768 // oopMap for is if the call is static 1964 // oopMap for is if the call is static
1769 // 1965 //
1839 // on entry to the wrapper. We need to convert these args to where 2035 // on entry to the wrapper. We need to convert these args to where
1840 // the jni function will expect them. To figure out where they go 2036 // the jni function will expect them. To figure out where they go
1841 // we convert the java signature to a C signature by inserting 2037 // we convert the java signature to a C signature by inserting
1842 // the hidden arguments as arg[0] and possibly arg[1] (static method) 2038 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1843 2039
1844 int total_c_args = total_in_args + 1; 2040 int total_c_args = total_in_args;
1845 if (method->is_static()) { 2041 int total_save_slots = 6 * VMRegImpl::slots_per_word;
1846 total_c_args++; 2042 if (!is_critical_native) {
2043 total_c_args += 1;
2044 if (method->is_static()) {
2045 total_c_args++;
2046 }
2047 } else {
2048 for (int i = 0; i < total_in_args; i++) {
2049 if (in_sig_bt[i] == T_ARRAY) {
2050 // These have to be saved and restored across the safepoint
2051 total_c_args++;
2052 }
2053 }
1847 } 2054 }
1848 2055
1849 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 2056 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1850 VMRegPair * out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 2057 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
2058 BasicType* in_elem_bt = NULL;
1851 2059
1852 int argc = 0; 2060 int argc = 0;
1853 out_sig_bt[argc++] = T_ADDRESS; 2061 if (!is_critical_native) {
1854 if (method->is_static()) { 2062 out_sig_bt[argc++] = T_ADDRESS;
1855 out_sig_bt[argc++] = T_OBJECT; 2063 if (method->is_static()) {
1856 } 2064 out_sig_bt[argc++] = T_OBJECT;
1857 2065 }
1858 for (int i = 0; i < total_in_args ; i++ ) { 2066
1859 out_sig_bt[argc++] = in_sig_bt[i]; 2067 for (int i = 0; i < total_in_args ; i++ ) {
2068 out_sig_bt[argc++] = in_sig_bt[i];
2069 }
2070 } else {
2071 Thread* THREAD = Thread::current();
2072 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
2073 SignatureStream ss(method->signature());
2074 for (int i = 0; i < total_in_args ; i++ ) {
2075 if (in_sig_bt[i] == T_ARRAY) {
2076 // Arrays are passed as int, elem* pair
2077 out_sig_bt[argc++] = T_INT;
2078 out_sig_bt[argc++] = T_ADDRESS;
2079 Symbol* atype = ss.as_symbol(CHECK_NULL);
2080 const char* at = atype->as_C_string();
2081 if (strlen(at) == 2) {
2082 assert(at[0] == '[', "must be");
2083 switch (at[1]) {
2084 case 'B': in_elem_bt[i] = T_BYTE; break;
2085 case 'C': in_elem_bt[i] = T_CHAR; break;
2086 case 'D': in_elem_bt[i] = T_DOUBLE; break;
2087 case 'F': in_elem_bt[i] = T_FLOAT; break;
2088 case 'I': in_elem_bt[i] = T_INT; break;
2089 case 'J': in_elem_bt[i] = T_LONG; break;
2090 case 'S': in_elem_bt[i] = T_SHORT; break;
2091 case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
2092 default: ShouldNotReachHere();
2093 }
2094 }
2095 } else {
2096 out_sig_bt[argc++] = in_sig_bt[i];
2097 in_elem_bt[i] = T_VOID;
2098 }
2099 if (in_sig_bt[i] != T_VOID) {
2100 assert(in_sig_bt[i] == ss.type(), "must match");
2101 ss.next();
2102 }
2103 }
1860 } 2104 }
1861 2105
1862 // Now figure out where the args must be stored and how much stack space 2106 // Now figure out where the args must be stored and how much stack space
1863 // they require (neglecting out_preserve_stack_slots but space for storing 2107 // they require (neglecting out_preserve_stack_slots but space for storing
1864 // the 1st six register arguments). It's weird see int_stk_helper. 2108 // the 1st six register arguments). It's weird see int_stk_helper.
1865 // 2109 //
1866 int out_arg_slots; 2110 int out_arg_slots;
1867 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); 2111 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
2112
2113 if (is_critical_native) {
2114 // Critical natives may have to call out so they need a save area
2115 // for register arguments.
2116 int double_slots = 0;
2117 int single_slots = 0;
2118 for ( int i = 0; i < total_in_args; i++) {
2119 if (in_regs[i].first()->is_Register()) {
2120 const Register reg = in_regs[i].first()->as_Register();
2121 switch (in_sig_bt[i]) {
2122 case T_ARRAY:
2123 case T_BOOLEAN:
2124 case T_BYTE:
2125 case T_SHORT:
2126 case T_CHAR:
2127 case T_INT: assert(reg->is_in(), "don't need to save these"); break;
2128 case T_LONG: if (reg->is_global()) double_slots++; break;
2129 default: ShouldNotReachHere();
2130 }
2131 } else if (in_regs[i].first()->is_FloatRegister()) {
2132 switch (in_sig_bt[i]) {
2133 case T_FLOAT: single_slots++; break;
2134 case T_DOUBLE: double_slots++; break;
2135 default: ShouldNotReachHere();
2136 }
2137 }
2138 }
2139 total_save_slots = double_slots * 2 + single_slots;
2140 }
1868 2141
1869 // Compute framesize for the wrapper. We need to handlize all oops in 2142 // Compute framesize for the wrapper. We need to handlize all oops in
1870 // registers. We must create space for them here that is disjoint from 2143 // registers. We must create space for them here that is disjoint from
1871 // the windowed save area because we have no control over when we might 2144 // the windowed save area because we have no control over when we might
1872 // flush the window again and overwrite values that gc has since modified. 2145 // flush the window again and overwrite values that gc has since modified.
1883 // First count the abi requirement plus all of the outgoing args 2156 // First count the abi requirement plus all of the outgoing args
1884 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 2157 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1885 2158
1886 // Now the space for the inbound oop handle area 2159 // Now the space for the inbound oop handle area
1887 2160
1888 int oop_handle_offset = stack_slots; 2161 int oop_handle_offset = round_to(stack_slots, 2);
1889 stack_slots += 6*VMRegImpl::slots_per_word; 2162 stack_slots += total_save_slots;
1890 2163
1891 // Now any space we need for handlizing a klass if static method 2164 // Now any space we need for handlizing a klass if static method
1892 2165
1893 int oop_temp_slot_offset = 0;
1894 int klass_slot_offset = 0; 2166 int klass_slot_offset = 0;
1895 int klass_offset = -1; 2167 int klass_offset = -1;
1896 int lock_slot_offset = 0; 2168 int lock_slot_offset = 0;
1897 bool is_static = false; 2169 bool is_static = false;
1898 2170
1952 2224
1953 int frame_complete = ((intptr_t)__ pc()) - start; 2225 int frame_complete = ((intptr_t)__ pc()) - start;
1954 2226
1955 __ verify_thread(); 2227 __ verify_thread();
1956 2228
2229 if (is_critical_native) {
2230 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args,
2231 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2232 }
1957 2233
1958 // 2234 //
1959 // We immediately shuffle the arguments so that any vm call we have to 2235 // We immediately shuffle the arguments so that any vm call we have to
1960 // make from here on out (sync slow path, jvmti, etc.) we will have 2236 // make from here on out (sync slow path, jvmti, etc.) we will have
1961 // captured the oops from our caller and have a valid oopMap for 2237 // captured the oops from our caller and have a valid oopMap for
1980 // more args than the caller doubling is enough to make 2256 // more args than the caller doubling is enough to make
1981 // sure we can capture all the incoming oop args from the 2257 // sure we can capture all the incoming oop args from the
1982 // caller. 2258 // caller.
1983 // 2259 //
1984 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2260 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1985 int c_arg = total_c_args - 1;
1986 // Record sp-based slot for receiver on stack for non-static methods 2261 // Record sp-based slot for receiver on stack for non-static methods
1987 int receiver_offset = -1; 2262 int receiver_offset = -1;
1988 2263
1989 // We move the arguments backward because the floating point registers 2264 // We move the arguments backward because the floating point registers
1990 // destination will always be to a register with a greater or equal register 2265 // destination will always be to a register with a greater or equal register
2000 freg_destroyed[f] = false; 2275 freg_destroyed[f] = false;
2001 } 2276 }
2002 2277
2003 #endif /* ASSERT */ 2278 #endif /* ASSERT */
2004 2279
2005 for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) { 2280 for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) {
2006 2281
2007 #ifdef ASSERT 2282 #ifdef ASSERT
2008 if (in_regs[i].first()->is_Register()) { 2283 if (in_regs[i].first()->is_Register()) {
2009 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!"); 2284 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
2010 } else if (in_regs[i].first()->is_FloatRegister()) { 2285 } else if (in_regs[i].first()->is_FloatRegister()) {
2017 } 2292 }
2018 #endif /* ASSERT */ 2293 #endif /* ASSERT */
2019 2294
2020 switch (in_sig_bt[i]) { 2295 switch (in_sig_bt[i]) {
2021 case T_ARRAY: 2296 case T_ARRAY:
2297 if (is_critical_native) {
2298 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]);
2299 c_arg--;
2300 break;
2301 }
2022 case T_OBJECT: 2302 case T_OBJECT:
2303 assert(!is_critical_native, "no oop arguments");
2023 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], 2304 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2024 ((i == 0) && (!is_static)), 2305 ((i == 0) && (!is_static)),
2025 &receiver_offset); 2306 &receiver_offset);
2026 break; 2307 break;
2027 case T_VOID: 2308 case T_VOID:
2028 break; 2309 break;
2029 2310
2030 case T_FLOAT: 2311 case T_FLOAT:
2031 float_move(masm, in_regs[i], out_regs[c_arg]); 2312 float_move(masm, in_regs[i], out_regs[c_arg]);
2032 break; 2313 break;
2033 2314
2034 case T_DOUBLE: 2315 case T_DOUBLE:
2035 assert( i + 1 < total_in_args && 2316 assert( i + 1 < total_in_args &&
2036 in_sig_bt[i + 1] == T_VOID && 2317 in_sig_bt[i + 1] == T_VOID &&
2037 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 2318 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2049 } 2330 }
2050 } 2331 }
2051 2332
2052 // Pre-load a static method's oop into O1. Used both by locking code and 2333 // Pre-load a static method's oop into O1. Used both by locking code and
2053 // the normal JNI call code. 2334 // the normal JNI call code.
2054 if (method->is_static()) { 2335 if (method->is_static() && !is_critical_native) {
2055 __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1); 2336 __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1);
2056 2337
2057 // Now handlize the static class mirror in O1. It's known not-null. 2338 // Now handlize the static class mirror in O1. It's known not-null.
2058 __ st_ptr(O1, SP, klass_offset + STACK_BIAS); 2339 __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
2059 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2340 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2062 2343
2063 2344
2064 const Register L6_handle = L6; 2345 const Register L6_handle = L6;
2065 2346
2066 if (method->is_synchronized()) { 2347 if (method->is_synchronized()) {
2348 assert(!is_critical_native, "unhandled");
2067 __ mov(O1, L6_handle); 2349 __ mov(O1, L6_handle);
2068 } 2350 }
2069 2351
2070 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs 2352 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
2071 // except O6/O7. So if we must call out we must push a new frame. We immediately 2353 // except O6/O7. So if we must call out we must push a new frame. We immediately
2072 // push a new frame and flush the windows. 2354 // push a new frame and flush the windows.
2073
2074 #ifdef _LP64 2355 #ifdef _LP64
2075 intptr_t thepc = (intptr_t) __ pc(); 2356 intptr_t thepc = (intptr_t) __ pc();
2076 { 2357 {
2077 address here = __ pc(); 2358 address here = __ pc();
2078 // Call the next instruction 2359 // Call the next instruction
2200 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2481 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2201 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2482 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2202 } 2483 }
2203 2484
2204 // get JNIEnv* which is first argument to native 2485 // get JNIEnv* which is first argument to native
2205 2486 if (!is_critical_native) {
2206 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0); 2487 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
2488 }
2207 2489
2208 // Use that pc we placed in O7 a while back as the current frame anchor 2490 // Use that pc we placed in O7 a while back as the current frame anchor
2209
2210 __ set_last_Java_frame(SP, O7); 2491 __ set_last_Java_frame(SP, O7);
2492
2493 // We flushed the windows ages ago now mark them as flushed before transitioning.
2494 __ set(JavaFrameAnchor::flushed, G3_scratch);
2495 __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
2211 2496
2212 // Transition from _thread_in_Java to _thread_in_native. 2497 // Transition from _thread_in_Java to _thread_in_native.
2213 __ set(_thread_in_native, G3_scratch); 2498 __ set(_thread_in_native, G3_scratch);
2214 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2215
2216 // We flushed the windows ages ago now mark them as flushed
2217
2218 // mark windows as flushed
2219 __ set(JavaFrameAnchor::flushed, G3_scratch);
2220
2221 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
2222 2499
2223 #ifdef _LP64 2500 #ifdef _LP64
2224 AddressLiteral dest(method->native_function()); 2501 AddressLiteral dest(native_func);
2225 __ relocate(relocInfo::runtime_call_type); 2502 __ relocate(relocInfo::runtime_call_type);
2226 __ jumpl_to(dest, O7, O7); 2503 __ jumpl_to(dest, O7, O7);
2227 #else 2504 #else
2228 __ call(method->native_function(), relocInfo::runtime_call_type); 2505 __ call(native_func, relocInfo::runtime_call_type);
2229 #endif 2506 #endif
2230 __ delayed()->st(G3_scratch, flags); 2507 __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2231 2508
2232 __ restore_thread(L7_thread_cache); // restore G2_thread 2509 __ restore_thread(L7_thread_cache); // restore G2_thread
2233 2510
2234 // Unpack native results. For int-types, we do any needed sign-extension 2511 // Unpack native results. For int-types, we do any needed sign-extension
2235 // and move things into I0. The return value there will survive any VM 2512 // and move things into I0. The return value there will survive any VM
2257 break; // Cannot de-handlize until after reclaiming jvm_lock 2534 break; // Cannot de-handlize until after reclaiming jvm_lock
2258 default: 2535 default:
2259 ShouldNotReachHere(); 2536 ShouldNotReachHere();
2260 } 2537 }
2261 2538
2539 Label after_transition;
2262 // must we block? 2540 // must we block?
2263 2541
2264 // Block, if necessary, before resuming in _thread_in_Java state. 2542 // Block, if necessary, before resuming in _thread_in_Java state.
2265 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2543 // In order for GC to work, don't clear the last_Java_sp until after blocking.
2266 { Label no_block; 2544 { Label no_block;
2301 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2579 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2302 // lets us share the oopMap we used when we went native rather the create 2580 // lets us share the oopMap we used when we went native rather the create
2303 // a distinct one for this pc 2581 // a distinct one for this pc
2304 // 2582 //
2305 save_native_result(masm, ret_type, stack_slots); 2583 save_native_result(masm, ret_type, stack_slots);
2306 __ call_VM_leaf(L7_thread_cache, 2584 if (!is_critical_native) {
2307 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 2585 __ call_VM_leaf(L7_thread_cache,
2308 G2_thread); 2586 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2587 G2_thread);
2588 } else {
2589 __ call_VM_leaf(L7_thread_cache,
2590 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
2591 G2_thread);
2592 }
2309 2593
2310 // Restore any method result value 2594 // Restore any method result value
2311 restore_native_result(masm, ret_type, stack_slots); 2595 restore_native_result(masm, ret_type, stack_slots);
2596
2597 if (is_critical_native) {
2598 // The call above performed the transition to thread_in_Java so
2599 // skip the transition logic below.
2600 __ ba(after_transition);
2601 __ delayed()->nop();
2602 }
2603
2312 __ bind(no_block); 2604 __ bind(no_block);
2313 } 2605 }
2314 2606
2315 // thread state is thread_in_native_trans. Any safepoint blocking has already 2607 // thread state is thread_in_native_trans. Any safepoint blocking has already
2316 // happened so we can now change state to _thread_in_Java. 2608 // happened so we can now change state to _thread_in_Java.
2317
2318
2319 __ set(_thread_in_Java, G3_scratch); 2609 __ set(_thread_in_Java, G3_scratch);
2320 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); 2610 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2321 2611 __ bind(after_transition);
2322 2612
2323 Label no_reguard; 2613 Label no_reguard;
2324 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch); 2614 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
2325 __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_disabled, Assembler::notEqual, Assembler::pt, no_reguard); 2615 __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_disabled, Assembler::notEqual, Assembler::pt, no_reguard);
2326 2616
2414 __ mov(G0, I0); 2704 __ mov(G0, I0);
2415 __ bind(L); 2705 __ bind(L);
2416 __ verify_oop(I0); 2706 __ verify_oop(I0);
2417 } 2707 }
2418 2708
2419 // reset handle block 2709 if (!is_critical_native) {
2420 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5); 2710 // reset handle block
2421 __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes()); 2711 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
2422 2712 __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
2423 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch); 2713
2424 check_forward_pending_exception(masm, G3_scratch); 2714 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
2715 check_forward_pending_exception(masm, G3_scratch);
2716 }
2425 2717
2426 2718
2427 // Return 2719 // Return
2428 2720
2429 #ifndef _LP64 2721 #ifndef _LP64
2448 frame_complete, 2740 frame_complete,
2449 stack_slots / VMRegImpl::slots_per_word, 2741 stack_slots / VMRegImpl::slots_per_word,
2450 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2742 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2451 in_ByteSize(lock_offset), 2743 in_ByteSize(lock_offset),
2452 oop_maps); 2744 oop_maps);
2745
2746 if (is_critical_native) {
2747 nm->set_lazy_critical_native(true);
2748 }
2453 return nm; 2749 return nm;
2454 2750
2455 } 2751 }
2456 2752
2457 #ifdef HAVE_DTRACE_H 2753 #ifdef HAVE_DTRACE_H
2470 // can use for converting the strings. (256 chars per string in the signature). 2766 // can use for converting the strings. (256 chars per string in the signature).
2471 // So any java string larger then this is truncated. 2767 // So any java string larger then this is truncated.
2472 2768
2473 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 }; 2769 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
2474 static bool offsets_initialized = false; 2770 static bool offsets_initialized = false;
2475
2476 static VMRegPair reg64_to_VMRegPair(Register r) {
2477 VMRegPair ret;
2478 if (wordSize == 8) {
2479 ret.set2(r->as_VMReg());
2480 } else {
2481 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
2482 }
2483 return ret;
2484 }
2485
2486 2771
2487 nmethod *SharedRuntime::generate_dtrace_nmethod( 2772 nmethod *SharedRuntime::generate_dtrace_nmethod(
2488 MacroAssembler *masm, methodHandle method) { 2773 MacroAssembler *masm, methodHandle method) {
2489 2774
2490 2775
3144 void SharedRuntime::generate_deopt_blob() { 3429 void SharedRuntime::generate_deopt_blob() {
3145 // allocate space for the code 3430 // allocate space for the code
3146 ResourceMark rm; 3431 ResourceMark rm;
3147 // setup code generation tools 3432 // setup code generation tools
3148 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code 3433 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
3434 if (UseStackBanging) {
3435 pad += StackShadowPages*16 + 32;
3436 }
3149 #ifdef _LP64 3437 #ifdef _LP64
3150 CodeBuffer buffer("deopt_blob", 2100+pad, 512); 3438 CodeBuffer buffer("deopt_blob", 2100+pad, 512);
3151 #else 3439 #else
3152 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread) 3440 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
3153 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread) 3441 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
3363 void SharedRuntime::generate_uncommon_trap_blob() { 3651 void SharedRuntime::generate_uncommon_trap_blob() {
3364 // allocate space for the code 3652 // allocate space for the code
3365 ResourceMark rm; 3653 ResourceMark rm;
3366 // setup code generation tools 3654 // setup code generation tools
3367 int pad = VerifyThread ? 512 : 0; 3655 int pad = VerifyThread ? 512 : 0;
3656 if (UseStackBanging) {
3657 pad += StackShadowPages*16 + 32;
3658 }
3368 #ifdef _LP64 3659 #ifdef _LP64
3369 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512); 3660 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
3370 #else 3661 #else
3371 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread) 3662 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
3372 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread) 3663 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)