comparison src/cpu/x86/vm/c1_Runtime1_x86.cpp @ 7126:ce248dc0a656

removed all Graal modifications to ci and c1
author Doug Simon <doug.simon@oracle.com>
date Mon, 03 Dec 2012 17:54:05 +0100
parents 1baf7f1e3f23
children 5fc51c1ecdeb 96a337d307bd
comparison
equal deleted inserted replaced
7125:1baf7f1e3f23 7126:ce248dc0a656
36 #include "runtime/sharedRuntime.hpp" 36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/signature.hpp" 37 #include "runtime/signature.hpp"
38 #include "runtime/vframeArray.hpp" 38 #include "runtime/vframeArray.hpp"
39 #include "vmreg_x86.inline.hpp" 39 #include "vmreg_x86.inline.hpp"
40 40
41 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true);
42 41
43 // Implementation of StubAssembler 42 // Implementation of StubAssembler
44 43
45 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { 44 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
46 // setup registers 45 // setup registers
111 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 110 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
112 } 111 }
113 if (metadata_result->is_valid()) { 112 if (metadata_result->is_valid()) {
114 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 113 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
115 } 114 }
116 #ifdef GRAAL
117 // (thomaswue) Deoptimize in case of an exception.
118 restore_live_registers(this, false);
119 movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
120 leave();
121 movl(rscratch1, Deoptimization::make_trap_request(Deoptimization::Reason_constraint, Deoptimization::Action_reinterpret));
122 jump(RuntimeAddress(SharedRuntime::deopt_blob()->uncommon_trap()));
123 #else
124 if (frame_size() == no_frame_size) { 115 if (frame_size() == no_frame_size) {
125 leave(); 116 leave();
126 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 117 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
127 } else if (_stub_id == Runtime1::forward_exception_id) { 118 } else if (_stub_id == Runtime1::forward_exception_id) {
128 should_not_reach_here(); 119 should_not_reach_here();
129 } else { 120 } else {
130 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 121 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
131 } 122 }
132 #endif
133 bind(L); 123 bind(L);
134 } 124 }
135 // get oop results if there are any and reset the values in the thread 125 // get oop results if there are any and reset the values in the thread
136 if (oop_result1->is_valid()) { 126 if (oop_result1->is_valid()) {
137 get_vm_result(oop_result1, thread); 127 get_vm_result(oop_result1, thread);
559 549
560 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 550 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
561 } 551 }
562 552
563 553
564 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers/* = true*/) { 554 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
565 __ block_comment("restore_live_registers"); 555 __ block_comment("restore_live_registers");
566 556
567 restore_fpu(sasm, restore_fpu_registers); 557 restore_fpu(sasm, restore_fpu_registers);
568 __ popa(); 558 __ popa();
569 } 559 }
613 603
614 // target: the entry point of the method that creates and posts the exception oop 604 // target: the entry point of the method that creates and posts the exception oop
615 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved) 605 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
616 606
617 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 607 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
618 OopMapSet* oop_maps = new OopMapSet();
619 #ifdef GRAAL
620 OopMap* oop_map = save_live_registers(sasm, 1);
621
622 // now all registers are saved and can be used freely
623 // verify that no old value is used accidentally
624 __ invalidate_registers(true, true, true, true, true, true);
625
626 // registers used by this stub
627 const Register temp_reg = rbx;
628
629 // load argument for exception that is passed as an argument into the stub
630 if (has_argument) {
631 __ movptr(c_rarg1, r10);
632 }
633 int call_offset = __ call_RT(noreg, noreg, target, has_argument ? 1 : 0);
634
635 oop_maps->add_gc_map(call_offset, oop_map);
636 #else
637 // preserve all registers 608 // preserve all registers
638 int num_rt_args = has_argument ? 2 : 1; 609 int num_rt_args = has_argument ? 2 : 1;
639 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 610 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
640 611
641 // now all registers are saved and can be used freely 612 // now all registers are saved and can be used freely
654 __ push(temp_reg); 625 __ push(temp_reg);
655 #endif // _LP64 626 #endif // _LP64
656 } 627 }
657 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); 628 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1);
658 629
630 OopMapSet* oop_maps = new OopMapSet();
659 oop_maps->add_gc_map(call_offset, oop_map); 631 oop_maps->add_gc_map(call_offset, oop_map);
660 #endif
661 632
662 __ stop("should not reach here"); 633 __ stop("should not reach here");
663 634
664 return oop_maps; 635 return oop_maps;
665 } 636 }
1000 __ ret(0); 971 __ ret(0);
1001 972
1002 return oop_maps; 973 return oop_maps;
1003 } 974 }
1004 975
976
1005 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 977 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
1006 978
1007 // for better readability 979 // for better readability
1008 const bool must_gc_arguments = true; 980 const bool must_gc_arguments = true;
1009 const bool dont_gc_arguments = false; 981 const bool dont_gc_arguments = false;
1046 Register t2 = rsi; 1018 Register t2 = rsi;
1047 assert_different_registers(klass, obj, obj_size, t1, t2); 1019 assert_different_registers(klass, obj, obj_size, t1, t2);
1048 1020
1049 __ push(rdi); 1021 __ push(rdi);
1050 __ push(rbx); 1022 __ push(rbx);
1051 #ifdef GRAAL
1052 __ push(rcx);
1053 __ push(rsi);
1054 #endif
1055 1023
1056 if (id == fast_new_instance_init_check_id) { 1024 if (id == fast_new_instance_init_check_id) {
1057 // make sure the klass is initialized 1025 // make sure the klass is initialized
1058 __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 1026 __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
1059 __ jcc(Assembler::notEqual, slow_path); 1027 __ jcc(Assembler::notEqual, slow_path);
1088 1056
1089 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); 1057 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
1090 1058
1091 __ initialize_object(obj, klass, obj_size, 0, t1, t2); 1059 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1092 __ verify_oop(obj); 1060 __ verify_oop(obj);
1093 #ifdef GRAAL
1094 __ pop(rsi);
1095 __ pop(rcx);
1096 #endif
1097 __ pop(rbx); 1061 __ pop(rbx);
1098 __ pop(rdi); 1062 __ pop(rdi);
1099 __ ret(0); 1063 __ ret(0);
1100 1064
1101 __ bind(try_eden); 1065 __ bind(try_eden);
1105 __ eden_allocate(obj, obj_size, 0, t1, slow_path); 1069 __ eden_allocate(obj, obj_size, 0, t1, slow_path);
1106 __ incr_allocated_bytes(thread, obj_size, 0); 1070 __ incr_allocated_bytes(thread, obj_size, 0);
1107 1071
1108 __ initialize_object(obj, klass, obj_size, 0, t1, t2); 1072 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1109 __ verify_oop(obj); 1073 __ verify_oop(obj);
1110 #ifdef GRAAL
1111 __ pop(rsi);
1112 __ pop(rcx);
1113 #endif
1114 __ pop(rbx); 1074 __ pop(rbx);
1115 __ pop(rdi); 1075 __ pop(rdi);
1116 __ ret(0); 1076 __ ret(0);
1117 1077
1118 __ bind(slow_path); 1078 __ bind(slow_path);
1119 #ifdef GRAAL
1120 __ pop(rsi);
1121 __ pop(rcx);
1122 #endif
1123 __ pop(rbx); 1079 __ pop(rbx);
1124 __ pop(rdi); 1080 __ pop(rdi);
1125 } 1081 }
1126 1082
1127 __ enter(); 1083 __ enter();
1308 1264
1309 // This is called via call_runtime so the arguments 1265 // This is called via call_runtime so the arguments
1310 // will be place in C abi locations 1266 // will be place in C abi locations
1311 1267
1312 #ifdef _LP64 1268 #ifdef _LP64
1313 #ifdef GRAAL
1314 __ verify_oop(j_rarg0);
1315 __ mov(rax, j_rarg0);
1316 #else
1317 __ verify_oop(c_rarg0); 1269 __ verify_oop(c_rarg0);
1318 __ mov(rax, c_rarg0); 1270 __ mov(rax, c_rarg0);
1319 #endif
1320 #else 1271 #else
1321 // The object is passed on the stack and we haven't pushed a 1272 // The object is passed on the stack and we haven't pushed a
1322 // frame yet so it's one work away from top of stack. 1273 // frame yet so it's one work away from top of stack.
1323 __ movptr(rax, Address(rsp, 1 * BytesPerWord)); 1274 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1324 __ verify_oop(rax); 1275 __ verify_oop(rax);
1444 // This is called by pushing args and not with C abi 1395 // This is called by pushing args and not with C abi
1445 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass 1396 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
1446 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass 1397 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
1447 1398
1448 Label miss; 1399 Label miss;
1449 #ifdef GRAAL
1450 Label success;
1451 __ check_klass_subtype_fast_path(rsi, rax, rcx, &success, &miss, NULL);
1452 #endif
1453
1454 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss); 1400 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss);
1455 1401
1456 // fallthrough on success: 1402 // fallthrough on success:
1457 #ifdef GRAAL
1458 __ bind(success);
1459 #endif
1460 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result 1403 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
1461 __ pop(rax); 1404 __ pop(rax);
1462 __ pop(rcx); 1405 __ pop(rcx);
1463 __ pop(rsi); 1406 __ pop(rsi);
1464 __ pop(rdi); 1407 __ pop(rdi);
1860 __ pop(rax); 1803 __ pop(rax);
1861 1804
1862 } 1805 }
1863 break; 1806 break;
1864 #endif // !SERIALGC 1807 #endif // !SERIALGC
1865 #ifdef GRAAL
1866 case graal_unwind_exception_call_id: {
1867 // remove the frame from the stack
1868 __ movptr(rsp, rbp);
1869 __ pop(rbp);
1870 // exception_oop is passed using ordinary java calling conventions
1871 __ movptr(rax, j_rarg0);
1872
1873 Label nonNullExceptionOop;
1874 __ testptr(rax, rax);
1875 __ jcc(Assembler::notZero, nonNullExceptionOop);
1876 {
1877 __ enter();
1878 oop_maps = new OopMapSet();
1879 OopMap* oop_map = save_live_registers(sasm, 0);
1880 int call_offset = __ call_RT(rax, noreg, (address)graal_create_null_exception, 0);
1881 oop_maps->add_gc_map(call_offset, oop_map);
1882 __ leave();
1883 }
1884 __ bind(nonNullExceptionOop);
1885
1886 __ set_info("unwind_exception", dont_gc_arguments);
1887 // note: no stubframe since we are about to leave the current
1888 // activation and we are calling a leaf VM function only.
1889 generate_unwind_exception(sasm);
1890 __ should_not_reach_here();
1891 break;
1892 }
1893
1894 case graal_OSR_migration_end_id: {
1895 __ enter();
1896 save_live_registers(sasm, 0);
1897 __ movptr(c_rarg0, j_rarg0);
1898 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end)));
1899 restore_live_registers(sasm);
1900 __ leave();
1901 __ ret(0);
1902 break;
1903 }
1904
1905 case graal_set_deopt_info_id: {
1906 __ movptr(Address(r15_thread, JavaThread::graal_deopt_info_offset()), rscratch1);
1907 __ ret(0);
1908 break;
1909 }
1910
1911 case graal_create_null_pointer_exception_id: {
1912 __ enter();
1913 oop_maps = new OopMapSet();
1914 OopMap* oop_map = save_live_registers(sasm, 0);
1915 int call_offset = __ call_RT(rax, noreg, (address)graal_create_null_exception, 0);
1916 oop_maps->add_gc_map(call_offset, oop_map);
1917 __ leave();
1918 __ ret(0);
1919 break;
1920 }
1921
1922 case graal_create_out_of_bounds_exception_id: {
1923 __ enter();
1924 oop_maps = new OopMapSet();
1925 OopMap* oop_map = save_live_registers(sasm, 0);
1926 int call_offset = __ call_RT(rax, noreg, (address)graal_create_out_of_bounds_exception, j_rarg0);
1927 oop_maps->add_gc_map(call_offset, oop_map);
1928 __ leave();
1929 __ ret(0);
1930 break;
1931 }
1932
1933 case graal_vm_error_id: {
1934 __ enter();
1935 oop_maps = new OopMapSet();
1936 OopMap* oop_map = save_live_registers(sasm, 0);
1937 int call_offset = __ call_RT(noreg, noreg, (address)graal_vm_error, j_rarg0, j_rarg1, j_rarg2);
1938 oop_maps->add_gc_map(call_offset, oop_map);
1939 restore_live_registers(sasm);
1940 __ leave();
1941 __ ret(0);
1942 break;
1943 }
1944
1945 case graal_log_printf_id: {
1946 __ enter();
1947 oop_maps = new OopMapSet();
1948 OopMap* oop_map = save_live_registers(sasm, 0);
1949 int call_offset = __ call_RT(noreg, noreg, (address)graal_log_printf, j_rarg0, j_rarg1, j_rarg2);
1950 oop_maps->add_gc_map(call_offset, oop_map);
1951 restore_live_registers(sasm);
1952 __ leave();
1953 __ ret(0);
1954 break;
1955 }
1956
1957 case graal_log_primitive_id: {
1958 __ enter();
1959 oop_maps = new OopMapSet();
1960 OopMap* oop_map = save_live_registers(sasm, 0);
1961 int call_offset = __ call_RT(noreg, noreg, (address)graal_log_primitive, j_rarg0, j_rarg1, j_rarg2);
1962 oop_maps->add_gc_map(call_offset, oop_map);
1963 restore_live_registers(sasm);
1964 __ leave();
1965 __ ret(0);
1966 break;
1967 }
1968
1969 case graal_log_object_id: {
1970 __ enter();
1971 oop_maps = new OopMapSet();
1972 OopMap* oop_map = save_live_registers(sasm, 0);
1973 int call_offset = __ call_RT(noreg, noreg, (address)graal_log_object, j_rarg0, j_rarg1);
1974 oop_maps->add_gc_map(call_offset, oop_map);
1975 restore_live_registers(sasm);
1976 __ leave();
1977 __ ret(0);
1978 break;
1979 }
1980
1981 case graal_verify_oop_id: {
1982 // We use enter & leave so that a better stack trace is produced in the hs_err file
1983 __ enter();
1984 __ verify_oop(r13, "Graal verify oop");
1985 __ leave();
1986 __ ret(0);
1987 break;
1988 }
1989
1990 case graal_arithmetic_frem_id: {
1991 __ subptr(rsp, 8);
1992 __ movflt(Address(rsp, 0), xmm1);
1993 __ fld_s(Address(rsp, 0));
1994 __ movflt(Address(rsp, 0), xmm0);
1995 __ fld_s(Address(rsp, 0));
1996 Label L;
1997 __ bind(L);
1998 __ fprem();
1999 __ fwait();
2000 __ fnstsw_ax();
2001 __ testl(rax, 0x400);
2002 __ jcc(Assembler::notZero, L);
2003 __ fxch(1);
2004 __ fpop();
2005 __ fstp_s(Address(rsp, 0));
2006 __ movflt(xmm0, Address(rsp, 0));
2007 __ addptr(rsp, 8);
2008 __ ret(0);
2009 break;
2010 }
2011 case graal_arithmetic_drem_id: {
2012 __ subptr(rsp, 8);
2013 __ movdbl(Address(rsp, 0), xmm1);
2014 __ fld_d(Address(rsp, 0));
2015 __ movdbl(Address(rsp, 0), xmm0);
2016 __ fld_d(Address(rsp, 0));
2017 Label L;
2018 __ bind(L);
2019 __ fprem();
2020 __ fwait();
2021 __ fnstsw_ax();
2022 __ testl(rax, 0x400);
2023 __ jcc(Assembler::notZero, L);
2024 __ fxch(1);
2025 __ fpop();
2026 __ fstp_d(Address(rsp, 0));
2027 __ movdbl(xmm0, Address(rsp, 0));
2028 __ addptr(rsp, 8);
2029 __ ret(0);
2030 break;
2031 }
2032 case graal_monitorenter_id: {
2033 Label slow_case;
2034
2035 Register obj = j_rarg0;
2036 Register lock = j_rarg1;
2037
2038 Register scratch1 = rax;
2039 Register scratch2 = rbx;
2040 assert_different_registers(obj, lock, scratch1, scratch2);
2041
2042 // copied from LIR_Assembler::emit_lock
2043 if (UseFastLocking) {
2044 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2045 __ lock_object(scratch1, obj, lock, scratch2, slow_case, false);
2046 __ ret(0);
2047 }
2048
2049 __ bind(slow_case);
2050 {
2051 StubFrame f(sasm, "graal_monitorenter", dont_gc_arguments);
2052 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
2053
2054 // Called with store_parameter and not C abi
2055 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, graal_monitorenter), obj, lock);
2056
2057 oop_maps = new OopMapSet();
2058 oop_maps->add_gc_map(call_offset, map);
2059 restore_live_registers(sasm, save_fpu_registers);
2060 }
2061 __ ret(0);
2062 break;
2063 }
2064 case graal_monitorexit_id: {
2065 Label slow_case;
2066
2067 Register obj = j_rarg0;
2068 Register lock = j_rarg1;
2069
2070 // needed in rax later on...
2071 Register lock2 = rax;
2072 __ mov(lock2, lock);
2073 Register scratch1 = rbx;
2074 assert_different_registers(obj, lock, scratch1, lock2);
2075
2076 // copied from LIR_Assembler::emit_lock
2077 if (UseFastLocking) {
2078 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2079 __ unlock_object(scratch1, obj, lock2, slow_case, false);
2080 __ ret(0);
2081 }
2082
2083 __ bind(slow_case);
2084 {
2085 StubFrame f(sasm, "graal_monitorexit", dont_gc_arguments);
2086 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
2087
2088 // note: really a leaf routine but must setup last java sp
2089 // => use call_RT for now (speed can be improved by
2090 // doing last java sp setup manually)
2091 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, graal_monitorexit), obj, lock);
2092
2093 oop_maps = new OopMapSet();
2094 oop_maps->add_gc_map(call_offset, map);
2095 restore_live_registers(sasm, save_fpu_registers);
2096 }
2097 __ ret(0);
2098 break;
2099 }
2100 #endif
2101 1808
2102 default: 1809 default:
2103 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); 1810 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
2104 __ movptr(rax, (int)id); 1811 __ movptr(rax, (int)id);
2105 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1812 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);