comparison src/cpu/sparc/vm/templateInterpreter_sparc.cpp @ 3839:3d42f82cd811

7063628: Use cbcond on T4 Summary: Add new short branch instruction to Hotspot sparc assembler. Reviewed-by: never, twisti, jrose
author kvn
date Thu, 21 Jul 2011 11:25:07 -0700
parents d83ac25d0304
children dca455dea3a7 069ab3f976d3
comparison
equal deleted inserted replaced
3838:6a991dcb52bb 3839:3d42f82cd811
188 Label L_got_cache, L_giant_index; 188 Label L_got_cache, L_giant_index;
189 const Register cache = G3_scratch; 189 const Register cache = G3_scratch;
190 const Register size = G1_scratch; 190 const Register size = G1_scratch;
191 if (EnableInvokeDynamic) { 191 if (EnableInvokeDynamic) {
192 __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode. 192 __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode.
193 __ cmp(G1_scratch, Bytecodes::_invokedynamic); 193 __ cmp_and_br_short(G1_scratch, Bytecodes::_invokedynamic, Assembler::equal, Assembler::pn, L_giant_index);
194 __ br(Assembler::equal, false, Assembler::pn, L_giant_index);
195 __ delayed()->nop();
196 } 194 }
197 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1); 195 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
198 __ bind(L_got_cache); 196 __ bind(L_got_cache);
199 __ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() + 197 __ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() +
200 ConstantPoolCacheEntry::flags_offset(), size); 198 ConstantPoolCacheEntry::flags_offset(), size);
205 203
206 // out of the main line of code... 204 // out of the main line of code...
207 if (EnableInvokeDynamic) { 205 if (EnableInvokeDynamic) {
208 __ bind(L_giant_index); 206 __ bind(L_giant_index);
209 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4)); 207 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4));
210 __ ba(false, L_got_cache); 208 __ ba_short(L_got_cache);
211 __ delayed()->nop();
212 } 209 }
213 210
214 return entry; 211 return entry;
215 } 212 }
216 213
219 address entry = __ pc(); 216 address entry = __ pc();
220 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache 217 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
221 { Label L; 218 { Label L;
222 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 219 Address exception_addr(G2_thread, Thread::pending_exception_offset());
223 __ ld_ptr(exception_addr, Gtemp); // Load pending exception. 220 __ ld_ptr(exception_addr, Gtemp); // Load pending exception.
224 __ tst(Gtemp); 221 __ br_null_short(Gtemp, Assembler::pt, L);
225 __ brx(Assembler::equal, false, Assembler::pt, L);
226 __ delayed()->nop();
227 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 222 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
228 __ should_not_reach_here(); 223 __ should_not_reach_here();
229 __ bind(L); 224 __ bind(L);
230 } 225 }
231 __ dispatch_next(state, step); 226 __ dispatch_next(state, step);
302 const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 297 const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
303 Label no_mdo, done; 298 Label no_mdo, done;
304 if (ProfileInterpreter) { 299 if (ProfileInterpreter) {
305 // If no method data exists, go to profile_continue. 300 // If no method data exists, go to profile_continue.
306 __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch); 301 __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
307 __ br_null(G4_scratch, false, Assembler::pn, no_mdo); 302 __ br_null_short(G4_scratch, Assembler::pn, no_mdo);
308 __ delayed()->nop();
309 // Increment counter 303 // Increment counter
310 Address mdo_invocation_counter(G4_scratch, 304 Address mdo_invocation_counter(G4_scratch,
311 in_bytes(methodDataOopDesc::invocation_counter_offset()) + 305 in_bytes(methodDataOopDesc::invocation_counter_offset()) +
312 in_bytes(InvocationCounter::counter_offset())); 306 in_bytes(InvocationCounter::counter_offset()));
313 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, 307 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
314 G3_scratch, Lscratch, 308 G3_scratch, Lscratch,
315 Assembler::zero, overflow); 309 Assembler::zero, overflow);
316 __ ba(false, done); 310 __ ba_short(done);
317 __ delayed()->nop();
318 } 311 }
319 312
320 // Increment counter in methodOop 313 // Increment counter in methodOop
321 __ bind(no_mdo); 314 __ bind(no_mdo);
322 Address invocation_counter(Lmethod, 315 Address invocation_counter(Lmethod,
338 331
339 if (ProfileInterpreter && profile_method != NULL) { 332 if (ProfileInterpreter && profile_method != NULL) {
340 // Test to see if we should create a method data oop 333 // Test to see if we should create a method data oop
341 AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit); 334 AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit);
342 __ load_contents(profile_limit, G3_scratch); 335 __ load_contents(profile_limit, G3_scratch);
343 __ cmp(O0, G3_scratch); 336 __ cmp_and_br_short(O0, G3_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
344 __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue);
345 __ delayed()->nop();
346 337
347 // if no method data exists, go to profile_method 338 // if no method data exists, go to profile_method
348 __ test_method_data_pointer(*profile_method); 339 __ test_method_data_pointer(*profile_method);
349 } 340 }
350 341
351 AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit); 342 AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
352 __ load_contents(invocation_limit, G3_scratch); 343 __ load_contents(invocation_limit, G3_scratch);
353 __ cmp(O0, G3_scratch); 344 __ cmp(O0, G3_scratch);
354 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); 345 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance
355 __ delayed()->nop(); 346 __ delayed()->nop();
356 } 347 }
357 348
358 } 349 }
359 350
408 Address saved_exception_pc(G2_thread, JavaThread::saved_exception_pc_offset()); 399 Address saved_exception_pc(G2_thread, JavaThread::saved_exception_pc_offset());
409 Label after_frame_check; 400 Label after_frame_check;
410 401
411 assert_different_registers(Rframe_size, Rscratch, Rscratch2); 402 assert_different_registers(Rframe_size, Rscratch, Rscratch2);
412 403
413 __ set( page_size, Rscratch ); 404 __ set(page_size, Rscratch);
414 __ cmp( Rframe_size, Rscratch ); 405 __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check);
415
416 __ br( Assembler::lessEqual, false, Assembler::pt, after_frame_check );
417 __ delayed()->nop();
418 406
419 // get the stack base, and in debug, verify it is non-zero 407 // get the stack base, and in debug, verify it is non-zero
420 __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch ); 408 __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
421 #ifdef ASSERT 409 #ifdef ASSERT
422 Label base_not_zero; 410 Label base_not_zero;
423 __ cmp( Rscratch, G0 ); 411 __ br_notnull_short(Rscratch, Assembler::pn, base_not_zero);
424 __ brx( Assembler::notEqual, false, Assembler::pn, base_not_zero );
425 __ delayed()->nop();
426 __ stop("stack base is zero in generate_stack_overflow_check"); 412 __ stop("stack base is zero in generate_stack_overflow_check");
427 __ bind(base_not_zero); 413 __ bind(base_not_zero);
428 #endif 414 #endif
429 415
430 // get the stack size, and in debug, verify it is non-zero 416 // get the stack size, and in debug, verify it is non-zero
431 assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" ); 417 assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
432 __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 ); 418 __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
433 #ifdef ASSERT 419 #ifdef ASSERT
434 Label size_not_zero; 420 Label size_not_zero;
435 __ cmp( Rscratch2, G0 ); 421 __ br_notnull_short(Rscratch2, Assembler::pn, size_not_zero);
436 __ brx( Assembler::notEqual, false, Assembler::pn, size_not_zero );
437 __ delayed()->nop();
438 __ stop("stack size is zero in generate_stack_overflow_check"); 422 __ stop("stack size is zero in generate_stack_overflow_check");
439 __ bind(size_not_zero); 423 __ bind(size_not_zero);
440 #endif 424 #endif
441 425
442 // compute the beginning of the protected zone minus the requested frame size 426 // compute the beginning of the protected zone minus the requested frame size
448 // SP, which would take another register 432 // SP, which would take another register
449 __ add( Rscratch, Rframe_size, Rscratch ); 433 __ add( Rscratch, Rframe_size, Rscratch );
450 434
451 // the frame is greater than one page in size, so check against 435 // the frame is greater than one page in size, so check against
452 // the bottom of the stack 436 // the bottom of the stack
453 __ cmp( SP, Rscratch ); 437 __ cmp_and_brx_short(SP, Rscratch, Assembler::greater, Assembler::pt, after_frame_check);
454 __ brx( Assembler::greater, false, Assembler::pt, after_frame_check );
455 __ delayed()->nop();
456 438
457 // Save the return address as the exception pc 439 // Save the return address as the exception pc
458 __ st_ptr(O7, saved_exception_pc); 440 __ st_ptr(O7, saved_exception_pc);
459 441
460 // the stack will overflow, throw an exception 442 // the stack will overflow, throw an exception
622 // do nothing for empty methods (do not even increment invocation counter) 604 // do nothing for empty methods (do not even increment invocation counter)
623 if ( UseFastEmptyMethods) { 605 if ( UseFastEmptyMethods) {
624 // If we need a safepoint check, generate full interpreter entry. 606 // If we need a safepoint check, generate full interpreter entry.
625 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 607 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
626 __ set(sync_state, G3_scratch); 608 __ set(sync_state, G3_scratch);
627 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 609 __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
628 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
629 __ delayed()->nop();
630 610
631 // Code: _return 611 // Code: _return
632 __ retl(); 612 __ retl();
633 __ delayed()->mov(O5_savedSP, SP); 613 __ delayed()->mov(O5_savedSP, SP);
634 614
662 // Check if we need to reach a safepoint and generate full interpreter 642 // Check if we need to reach a safepoint and generate full interpreter
663 // frame if so. 643 // frame if so.
664 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 644 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
665 __ load_contents(sync_state, G3_scratch); 645 __ load_contents(sync_state, G3_scratch);
666 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 646 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
667 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); 647 __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
668 __ delayed()->nop();
669 648
670 // Check if local 0 != NULL 649 // Check if local 0 != NULL
671 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 650 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
672 __ tst(Otos_i); // check if local 0 == NULL and go the slow path 651 // check if local 0 == NULL and go the slow path
673 __ brx(Assembler::zero, false, Assembler::pn, slow_path); 652 __ br_null_short(Otos_i, Assembler::pn, slow_path);
674 __ delayed()->nop();
675 653
676 654
677 // read first instruction word and extract bytecode @ 1 and index @ 2 655 // read first instruction word and extract bytecode @ 1 and index @ 2
678 // get first 4 bytes of the bytecodes (big endian!) 656 // get first 4 bytes of the bytecodes (big endian!)
679 __ ld_ptr(G5_method, methodOopDesc::const_offset(), G1_scratch); 657 __ ld_ptr(G5_method, methodOopDesc::const_offset(), G1_scratch);
695 // If not, need the slow path. 673 // If not, need the slow path.
696 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); 674 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
697 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch); 675 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
698 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch); 676 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
699 __ and3(G1_scratch, 0xFF, G1_scratch); 677 __ and3(G1_scratch, 0xFF, G1_scratch);
700 __ cmp(G1_scratch, Bytecodes::_getfield); 678 __ cmp_and_br_short(G1_scratch, Bytecodes::_getfield, Assembler::notEqual, Assembler::pn, slow_path);
701 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
702 __ delayed()->nop();
703 679
704 // Get the type and return field offset from the constant pool cache 680 // Get the type and return field offset from the constant pool cache
705 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch); 681 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
706 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch); 682 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
707 683
785 // continue and the thread will safepoint at the next bytecode dispatch. 761 // continue and the thread will safepoint at the next bytecode dispatch.
786 762
787 // Check if local 0 != NULL 763 // Check if local 0 != NULL
788 // If the receiver is null then it is OK to jump to the slow path. 764 // If the receiver is null then it is OK to jump to the slow path.
789 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 765 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
790 __ tst(Otos_i); // check if local 0 == NULL and go the slow path 766 // check if local 0 == NULL and go the slow path
791 __ brx(Assembler::zero, false, Assembler::pn, slow_path); 767 __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path);
792 __ delayed()->nop();
793 768
794 769
795 // Load the value of the referent field. 770 // Load the value of the referent field.
796 if (Assembler::is_simm13(referent_offset)) { 771 if (Assembler::is_simm13(referent_offset)) {
797 __ load_heap_oop(Otos_i, referent_offset, Otos_i); 772 __ load_heap_oop(Otos_i, referent_offset, Otos_i);
950 925
951 // get signature handler 926 // get signature handler
952 { Label L; 927 { Label L;
953 Address signature_handler(Lmethod, methodOopDesc::signature_handler_offset()); 928 Address signature_handler(Lmethod, methodOopDesc::signature_handler_offset());
954 __ ld_ptr(signature_handler, G3_scratch); 929 __ ld_ptr(signature_handler, G3_scratch);
955 __ tst(G3_scratch); 930 __ br_notnull_short(G3_scratch, Assembler::pt, L);
956 __ brx(Assembler::notZero, false, Assembler::pt, L);
957 __ delayed()->nop();
958 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod); 931 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
959 __ ld_ptr(signature_handler, G3_scratch); 932 __ ld_ptr(signature_handler, G3_scratch);
960 __ bind(L); 933 __ bind(L);
961 } 934 }
962 935
1017 __ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1); 990 __ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);
1018 __ ld_ptr(O1, mirror_offset, O1); 991 __ ld_ptr(O1, mirror_offset, O1);
1019 #ifdef ASSERT 992 #ifdef ASSERT
1020 if (!PrintSignatureHandlers) // do not dirty the output with this 993 if (!PrintSignatureHandlers) // do not dirty the output with this
1021 { Label L; 994 { Label L;
1022 __ tst(O1); 995 __ br_notnull_short(O1, Assembler::pt, L);
1023 __ brx(Assembler::notZero, false, Assembler::pt, L);
1024 __ delayed()->nop();
1025 __ stop("mirror is missing"); 996 __ stop("mirror is missing");
1026 __ bind(L); 997 __ bind(L);
1027 } 998 }
1028 #endif // ASSERT 999 #endif // ASSERT
1029 __ st_ptr(O1, Lscratch2, 0); 1000 __ st_ptr(O1, Lscratch2, 0);
1036 // Oops are boxed in-place on the stack, with handles copied to arguments. 1007 // Oops are boxed in-place on the stack, with handles copied to arguments.
1037 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*. 1008 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*.
1038 1009
1039 #ifdef ASSERT 1010 #ifdef ASSERT
1040 { Label L; 1011 { Label L;
1041 __ tst(O0); 1012 __ br_notnull_short(O0, Assembler::pt, L);
1042 __ brx(Assembler::notZero, false, Assembler::pt, L);
1043 __ delayed()->nop();
1044 __ stop("native entry point is missing"); 1013 __ stop("native entry point is missing");
1045 __ bind(L); 1014 __ bind(L);
1046 } 1015 }
1047 #endif // ASSERT 1016 #endif // ASSERT
1048 1017
1077 1046
1078 Address thread_state(G2_thread, JavaThread::thread_state_offset()); 1047 Address thread_state(G2_thread, JavaThread::thread_state_offset());
1079 #ifdef ASSERT 1048 #ifdef ASSERT
1080 { Label L; 1049 { Label L;
1081 __ ld(thread_state, G3_scratch); 1050 __ ld(thread_state, G3_scratch);
1082 __ cmp(G3_scratch, _thread_in_Java); 1051 __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L);
1083 __ br(Assembler::equal, false, Assembler::pt, L);
1084 __ delayed()->nop();
1085 __ stop("Wrong thread state in native stub"); 1052 __ stop("Wrong thread state in native stub");
1086 __ bind(L); 1053 __ bind(L);
1087 } 1054 }
1088 #endif // ASSERT 1055 #endif // ASSERT
1089 __ set(_thread_in_native, G3_scratch); 1056 __ set(_thread_in_native, G3_scratch);
1132 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 1099 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
1133 1100
1134 Label L; 1101 Label L;
1135 __ br(Assembler::notEqual, false, Assembler::pn, L); 1102 __ br(Assembler::notEqual, false, Assembler::pn, L);
1136 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch); 1103 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
1137 __ cmp(G3_scratch, 0); 1104 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
1138 __ br(Assembler::equal, false, Assembler::pt, no_block);
1139 __ delayed()->nop();
1140 __ bind(L); 1105 __ bind(L);
1141 1106
1142 // Block. Save any potential method result value before the operation and 1107 // Block. Save any potential method result value before the operation and
1143 // use a leaf call to leave the last_Java_frame setup undisturbed. 1108 // use a leaf call to leave the last_Java_frame setup undisturbed.
1144 save_native_result(); 1109 save_native_result();
1183 1148
1184 { 1149 {
1185 Label no_oop, store_result; 1150 Label no_oop, store_result;
1186 1151
1187 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch); 1152 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
1188 __ cmp(G3_scratch, Lscratch); 1153 __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop);
1189 __ brx(Assembler::notEqual, false, Assembler::pt, no_oop);
1190 __ delayed()->nop();
1191 __ addcc(G0, O0, O0); 1154 __ addcc(G0, O0, O0);
1192 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL: 1155 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL:
1193 __ delayed()->ld_ptr(O0, 0, O0); // unbox it 1156 __ delayed()->ld_ptr(O0, 0, O0); // unbox it
1194 __ mov(G0, O0); 1157 __ mov(G0, O0);
1195 1158
1204 1167
1205 // handle exceptions (exception handling will handle unlocking!) 1168 // handle exceptions (exception handling will handle unlocking!)
1206 { Label L; 1169 { Label L;
1207 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 1170 Address exception_addr(G2_thread, Thread::pending_exception_offset());
1208 __ ld_ptr(exception_addr, Gtemp); 1171 __ ld_ptr(exception_addr, Gtemp);
1209 __ tst(Gtemp); 1172 __ br_null_short(Gtemp, Assembler::pt, L);
1210 __ brx(Assembler::equal, false, Assembler::pt, L);
1211 __ delayed()->nop();
1212 // Note: This could be handled more efficiently since we know that the native 1173 // Note: This could be handled more efficiently since we know that the native
1213 // method doesn't have an exception handler. We could directly return 1174 // method doesn't have an exception handler. We could directly return
1214 // to the exception handler for the caller. 1175 // to the exception handler for the caller.
1215 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 1176 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
1216 __ should_not_reach_here(); 1177 __ should_not_reach_here();
1243 1204
1244 // dispose of return address and remove activation 1205 // dispose of return address and remove activation
1245 #ifdef ASSERT 1206 #ifdef ASSERT
1246 { 1207 {
1247 Label ok; 1208 Label ok;
1248 __ cmp(I5_savedSP, FP); 1209 __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok);
1249 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok);
1250 __ delayed()->nop();
1251 __ stop("bad I5_savedSP value"); 1210 __ stop("bad I5_savedSP value");
1252 __ should_not_reach_here(); 1211 __ should_not_reach_here();
1253 __ bind(ok); 1212 __ bind(ok);
1254 } 1213 }
1255 #endif 1214 #endif
1427 // We have decided to profile this method in the interpreter 1386 // We have decided to profile this method in the interpreter
1428 __ bind(profile_method); 1387 __ bind(profile_method);
1429 1388
1430 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1389 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1431 __ set_method_data_pointer_for_bcp(); 1390 __ set_method_data_pointer_for_bcp();
1432 __ ba(false, profile_method_continue); 1391 __ ba_short(profile_method_continue);
1433 __ delayed()->nop();
1434 } 1392 }
1435 1393
1436 // handle invocation counter overflow 1394 // handle invocation counter overflow
1437 __ bind(invocation_counter_overflow); 1395 __ bind(invocation_counter_overflow);
1438 generate_counter_overflow(Lcontinue); 1396 generate_counter_overflow(Lcontinue);
1854 // Note that we don't compare the return PC against the 1812 // Note that we don't compare the return PC against the
1855 // deoptimization blob's unpack entry because of the presence of 1813 // deoptimization blob's unpack entry because of the presence of
1856 // adapter frames in C2. 1814 // adapter frames in C2.
1857 Label caller_not_deoptimized; 1815 Label caller_not_deoptimized;
1858 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7); 1816 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
1859 __ tst(O0); 1817 __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized);
1860 __ brx(Assembler::notEqual, false, Assembler::pt, caller_not_deoptimized);
1861 __ delayed()->nop();
1862 1818
1863 const Register Gtmp1 = G3_scratch; 1819 const Register Gtmp1 = G3_scratch;
1864 const Register Gtmp2 = G1_scratch; 1820 const Register Gtmp2 = G1_scratch;
1865 1821
1866 // Compute size of arguments for saving when returning to deoptimized caller 1822 // Compute size of arguments for saving when returning to deoptimized caller
1990 // Helper for vtos entry point generation 1946 // Helper for vtos entry point generation
1991 1947
1992 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 1948 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1993 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1949 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1994 Label L; 1950 Label L;
1995 aep = __ pc(); __ push_ptr(); __ ba(false, L); __ delayed()->nop(); 1951 aep = __ pc(); __ push_ptr(); __ ba_short(L);
1996 fep = __ pc(); __ push_f(); __ ba(false, L); __ delayed()->nop(); 1952 fep = __ pc(); __ push_f(); __ ba_short(L);
1997 dep = __ pc(); __ push_d(); __ ba(false, L); __ delayed()->nop(); 1953 dep = __ pc(); __ push_d(); __ ba_short(L);
1998 lep = __ pc(); __ push_l(); __ ba(false, L); __ delayed()->nop(); 1954 lep = __ pc(); __ push_l(); __ ba_short(L);
1999 iep = __ pc(); __ push_i(); 1955 iep = __ pc(); __ push_i();
2000 bep = cep = sep = iep; // there aren't any 1956 bep = cep = sep = iep; // there aren't any
2001 vep = __ pc(); __ bind(L); // fall through 1957 vep = __ pc(); __ bind(L); // fall through
2002 generate_and_dispatch(t); 1958 generate_and_dispatch(t);
2003 } 1959 }