comparison src/cpu/sparc/vm/c1_Runtime1_sparc.cpp @ 3839:3d42f82cd811

7063628: Use cbcond on T4 Summary: Add new short branch instruction to Hotspot sparc assembler. Reviewed-by: never, twisti, jrose
author kvn
date Thu, 21 Jul 2011 11:25:07 -0700
parents 1b4e6a5d98e0
children 4fe626cbf0bf c124e2e7463e
comparison
equal deleted inserted replaced
3838:6a991dcb52bb 3839:3d42f82cd811
69 69
70 // check for pending exceptions 70 // check for pending exceptions
71 { Label L; 71 { Label L;
72 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 72 Address exception_addr(G2_thread, Thread::pending_exception_offset());
73 ld_ptr(exception_addr, Gtemp); 73 ld_ptr(exception_addr, Gtemp);
74 br_null(Gtemp, false, pt, L); 74 br_null_short(Gtemp, pt, L);
75 delayed()->nop();
76 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 75 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
77 st_ptr(G0, vm_result_addr); 76 st_ptr(G0, vm_result_addr);
78 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 77 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
79 st_ptr(G0, vm_result_addr_2); 78 st_ptr(G0, vm_result_addr_2);
80 79
331 // deoptimization handler entry that will cause re-execution of the current bytecode 330 // deoptimization handler entry that will cause re-execution of the current bytecode
332 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 331 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
333 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 332 assert(deopt_blob != NULL, "deoptimization blob must have been created");
334 333
335 Label no_deopt; 334 Label no_deopt;
336 __ tst(O0); 335 __ br_null_short(O0, Assembler::pt, no_deopt);
337 __ brx(Assembler::equal, false, Assembler::pt, no_deopt);
338 __ delayed()->nop();
339 336
340 // return to the deoptimization handler entry for unpacking and rexecute 337 // return to the deoptimization handler entry for unpacking and rexecute
341 // if we simply returned the we'd deopt as if any call we patched had just 338 // if we simply returned the we'd deopt as if any call we patched had just
342 // returned. 339 // returned.
343 340
400 __ save_frame(0); 397 __ save_frame(0);
401 398
402 if (id == fast_new_instance_init_check_id) { 399 if (id == fast_new_instance_init_check_id) {
403 // make sure the klass is initialized 400 // make sure the klass is initialized
404 __ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1); 401 __ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1);
405 __ cmp(G3_t1, instanceKlass::fully_initialized); 402 __ cmp_and_br_short(G3_t1, instanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path);
406 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
407 __ delayed()->nop();
408 } 403 }
409 #ifdef ASSERT 404 #ifdef ASSERT
410 // assert object can be fast path allocated 405 // assert object can be fast path allocated
411 { 406 {
412 Label ok, not_ok; 407 Label ok, not_ok;
413 __ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size); 408 __ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size);
414 __ cmp(G1_obj_size, 0); // make sure it's an instance (LH > 0) 409 // make sure it's an instance (LH > 0)
415 __ br(Assembler::lessEqual, false, Assembler::pn, not_ok); 410 __ cmp_and_br_short(G1_obj_size, 0, Assembler::lessEqual, Assembler::pn, not_ok);
416 __ delayed()->nop();
417 __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size); 411 __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);
418 __ br(Assembler::zero, false, Assembler::pn, ok); 412 __ br(Assembler::zero, false, Assembler::pn, ok);
419 __ delayed()->nop(); 413 __ delayed()->nop();
420 __ bind(not_ok); 414 __ bind(not_ok);
421 __ stop("assert(can be fast path allocated)"); 415 __ stop("assert(can be fast path allocated)");
499 __ ld(klass_lh, G3_t1); 493 __ ld(klass_lh, G3_t1);
500 __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1); 494 __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1);
501 int tag = ((id == new_type_array_id) 495 int tag = ((id == new_type_array_id)
502 ? Klass::_lh_array_tag_type_value 496 ? Klass::_lh_array_tag_type_value
503 : Klass::_lh_array_tag_obj_value); 497 : Klass::_lh_array_tag_obj_value);
504 __ cmp(G3_t1, tag); 498 __ cmp_and_brx_short(G3_t1, tag, Assembler::equal, Assembler::pt, ok);
505 __ brx(Assembler::equal, false, Assembler::pt, ok);
506 __ delayed()->nop();
507 __ stop("assert(is an array klass)"); 499 __ stop("assert(is an array klass)");
508 __ should_not_reach_here(); 500 __ should_not_reach_here();
509 __ bind(ok); 501 __ bind(ok);
510 } 502 }
511 #endif // ASSERT 503 #endif // ASSERT
517 Register O1_t2 = O1; 509 Register O1_t2 = O1;
518 assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2); 510 assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2);
519 511
520 // check that array length is small enough for fast path 512 // check that array length is small enough for fast path
521 __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1); 513 __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);
522 __ cmp(G4_length, G3_t1); 514 __ cmp_and_br_short(G4_length, G3_t1, Assembler::greaterUnsigned, Assembler::pn, slow_path);
523 __ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path);
524 __ delayed()->nop();
525 515
526 // if we got here then the TLAB allocation failed, so try 516 // if we got here then the TLAB allocation failed, so try
527 // refilling the TLAB or allocating directly from eden. 517 // refilling the TLAB or allocating directly from eden.
528 Label retry_tlab, try_eden; 518 Label retry_tlab, try_eden;
529 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass 519 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass