comparison src/cpu/sparc/vm/stubGenerator_sparc.cpp @ 11173:6b0fd0964b87

Merge with http://hg.openjdk.java.net/hsx/hsx25/hotspot/
author Doug Simon <doug.simon@oracle.com>
date Wed, 31 Jul 2013 11:00:54 +0200
parents 40b8c383bc31 980532a806a5
children cefad50507d8
comparison
equal deleted inserted replaced
10912:4ea54634f03e 11173:6b0fd0964b87
408 __ delayed()->st_ptr(G0, exception_addr); 408 __ delayed()->st_ptr(G0, exception_addr);
409 409
410 return start; 410 return start;
411 } 411 }
412 412
413 // Safefetch stubs.
414 void generate_safefetch(const char* name, int size, address* entry,
415 address* fault_pc, address* continuation_pc) {
416 // safefetch signatures:
417 // int SafeFetch32(int* adr, int errValue);
418 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
419 //
420 // arguments:
421 // o0 = adr
422 // o1 = errValue
423 //
424 // result:
425 // o0 = *adr or errValue
426
427 StubCodeMark mark(this, "StubRoutines", name);
428
429 // Entry point, pc or function descriptor.
430 __ align(CodeEntryAlignment);
431 *entry = __ pc();
432
433 __ mov(O0, G1); // g1 = o0
434 __ mov(O1, O0); // o0 = o1
435 // Load *adr into c_rarg1, may fault.
436 *fault_pc = __ pc();
437 switch (size) {
438 case 4:
439 // int32_t
440 __ ldsw(G1, 0, O0); // o0 = [g1]
441 break;
442 case 8:
443 // int64_t
444 __ ldx(G1, 0, O0); // o0 = [g1]
445 break;
446 default:
447 ShouldNotReachHere();
448 }
449
450 // return errValue or *adr
451 *continuation_pc = __ pc();
452 // By convention with the trap handler we ensure there is a non-CTI
453 // instruction in the trap shadow.
454 __ nop();
455 __ retl();
456 __ delayed()->nop();
457 }
413 458
414 //------------------------------------------------------------------------------------------------------------------------ 459 //------------------------------------------------------------------------------------------------------------------------
415 // Continuation point for throwing of implicit exceptions that are not handled in 460 // Continuation point for throwing of implicit exceptions that are not handled in
416 // the current activation. Fabricates an exception oop and initiates normal 461 // the current activation. Fabricates an exception oop and initiates normal
417 // exception dispatching in this frame. Only callee-saved registers are preserved 462 // exception dispatching in this frame. Only callee-saved registers are preserved
564 609
565 address generate_flush_callers_register_windows() { 610 address generate_flush_callers_register_windows() {
566 StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows"); 611 StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
567 address start = __ pc(); 612 address start = __ pc();
568 613
569 __ flush_windows(); 614 __ flushw();
570 __ retl(false); 615 __ retl(false);
571 __ delayed()->add( FP, STACK_BIAS, O0 ); 616 __ delayed()->add( FP, STACK_BIAS, O0 );
572 // The returned value must be a stack pointer whose register save area 617 // The returned value must be a stack pointer whose register save area
573 // is flushed, and will stay flushed while the caller executes. 618 // is flushed, and will stay flushed while the caller executes.
574 619
575 return start; 620 return start;
576 } 621 }
577 622
578 // Helper functions for v8 atomic operations.
579 //
580 void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) {
581 if (mark_oop_reg == noreg) {
582 address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr();
583 __ set((intptr_t)lock_ptr, lock_ptr_reg);
584 } else {
585 assert(scratch_reg != noreg, "just checking");
586 address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache;
587 __ set((intptr_t)lock_ptr, lock_ptr_reg);
588 __ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg);
589 __ add(lock_ptr_reg, scratch_reg, lock_ptr_reg);
590 }
591 }
592
593 void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
594
595 get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg);
596 __ set(StubRoutines::Sparc::locked, lock_reg);
597 // Initialize yield counter
598 __ mov(G0,yield_reg);
599
600 __ BIND(retry);
601 __ cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dontyield);
602
603 // This code can only be called from inside the VM, this
604 // stub is only invoked from Atomic::add(). We do not
605 // want to use call_VM, because _last_java_sp and such
606 // must already be set.
607 //
608 // Save the regs and make space for a C call
609 __ save(SP, -96, SP);
610 __ save_all_globals_into_locals();
611 BLOCK_COMMENT("call os::naked_sleep");
612 __ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
613 __ delayed()->nop();
614 __ restore_globals_from_locals();
615 __ restore();
616 // reset the counter
617 __ mov(G0,yield_reg);
618
619 __ BIND(dontyield);
620
621 // try to get lock
622 __ swap(lock_ptr_reg, 0, lock_reg);
623
624 // did we get the lock?
625 __ cmp(lock_reg, StubRoutines::Sparc::unlocked);
626 __ br(Assembler::notEqual, true, Assembler::pn, retry);
627 __ delayed()->add(yield_reg,1,yield_reg);
628
629 // yes, got lock. do the operation here.
630 }
631
632 void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
633 __ st(lock_reg, lock_ptr_reg, 0); // unlock
634 }
635
636 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). 623 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
637 // 624 //
638 // Arguments : 625 // Arguments:
639 // 626 //
640 // exchange_value: O0 627 // exchange_value: O0
641 // dest: O1 628 // dest: O1
642 // 629 //
643 // Results: 630 // Results:
654 Label retry; 641 Label retry;
655 __ BIND(retry); 642 __ BIND(retry);
656 __ mov(O0, O3); // scratch copy of exchange value 643 __ mov(O0, O3); // scratch copy of exchange value
657 __ ld(O1, 0, O2); // observe the previous value 644 __ ld(O1, 0, O2); // observe the previous value
658 // try to replace O2 with O3 645 // try to replace O2 with O3
659 __ cas_under_lock(O1, O2, O3, 646 __ cas(O1, O2, O3);
660 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
661 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry); 647 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
662 648
663 __ retl(false); 649 __ retl(false);
664 __ delayed()->mov(O2, O0); // report previous value to caller 650 __ delayed()->mov(O2, O0); // report previous value to caller
665
666 } else { 651 } else {
667 if (VM_Version::v9_instructions_work()) { 652 __ retl(false);
668 __ retl(false); 653 __ delayed()->swap(O1, 0, O0);
669 __ delayed()->swap(O1, 0, O0);
670 } else {
671 const Register& lock_reg = O2;
672 const Register& lock_ptr_reg = O3;
673 const Register& yield_reg = O4;
674
675 Label retry;
676 Label dontyield;
677
678 generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
679 // got the lock, do the swap
680 __ swap(O1, 0, O0);
681
682 generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
683 __ retl(false);
684 __ delayed()->nop();
685 }
686 } 654 }
687 655
688 return start; 656 return start;
689 } 657 }
690 658
691 659
692 // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) 660 // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
693 // 661 //
694 // Arguments : 662 // Arguments:
695 // 663 //
696 // exchange_value: O0 664 // exchange_value: O0
697 // dest: O1 665 // dest: O1
698 // compare_value: O2 666 // compare_value: O2
699 // 667 //
700 // Results: 668 // Results:
701 // 669 //
702 // O0: the value previously stored in dest 670 // O0: the value previously stored in dest
703 // 671 //
704 // Overwrites (v8): O3,O4,O5
705 //
706 address generate_atomic_cmpxchg() { 672 address generate_atomic_cmpxchg() {
707 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 673 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
708 address start = __ pc(); 674 address start = __ pc();
709 675
710 // cmpxchg(dest, compare_value, exchange_value) 676 // cmpxchg(dest, compare_value, exchange_value)
711 __ cas_under_lock(O1, O2, O0, 677 __ cas(O1, O2, O0);
712 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
713 __ retl(false); 678 __ retl(false);
714 __ delayed()->nop(); 679 __ delayed()->nop();
715 680
716 return start; 681 return start;
717 } 682 }
718 683
719 // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value) 684 // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
720 // 685 //
721 // Arguments : 686 // Arguments:
722 // 687 //
723 // exchange_value: O1:O0 688 // exchange_value: O1:O0
724 // dest: O2 689 // dest: O2
725 // compare_value: O4:O3 690 // compare_value: O4:O3
726 // 691 //
727 // Results: 692 // Results:
728 // 693 //
729 // O1:O0: the value previously stored in dest 694 // O1:O0: the value previously stored in dest
730 // 695 //
731 // This only works on V9, on V8 we don't generate any
732 // code and just return NULL.
733 //
734 // Overwrites: G1,G2,G3 696 // Overwrites: G1,G2,G3
735 // 697 //
736 address generate_atomic_cmpxchg_long() { 698 address generate_atomic_cmpxchg_long() {
737 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 699 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
738 address start = __ pc(); 700 address start = __ pc();
739 701
740 if (!VM_Version::supports_cx8())
741 return NULL;;
742 __ sllx(O0, 32, O0); 702 __ sllx(O0, 32, O0);
743 __ srl(O1, 0, O1); 703 __ srl(O1, 0, O1);
744 __ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value 704 __ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value
745 __ sllx(O3, 32, O3); 705 __ sllx(O3, 32, O3);
746 __ srl(O4, 0, O4); 706 __ srl(O4, 0, O4);
754 } 714 }
755 715
756 716
757 // Support for jint Atomic::add(jint add_value, volatile jint* dest). 717 // Support for jint Atomic::add(jint add_value, volatile jint* dest).
758 // 718 //
759 // Arguments : 719 // Arguments:
760 // 720 //
761 // add_value: O0 (e.g., +1 or -1) 721 // add_value: O0 (e.g., +1 or -1)
762 // dest: O1 722 // dest: O1
763 // 723 //
764 // Results: 724 // Results:
765 // 725 //
766 // O0: the new value stored in dest 726 // O0: the new value stored in dest
767 // 727 //
768 // Overwrites (v9): O3 728 // Overwrites: O3
769 // Overwrites (v8): O3,O4,O5
770 // 729 //
771 address generate_atomic_add() { 730 address generate_atomic_add() {
772 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 731 StubCodeMark mark(this, "StubRoutines", "atomic_add");
773 address start = __ pc(); 732 address start = __ pc();
774 __ BIND(_atomic_add_stub); 733 __ BIND(_atomic_add_stub);
775 734
776 if (VM_Version::v9_instructions_work()) { 735 Label(retry);
777 Label(retry); 736 __ BIND(retry);
778 __ BIND(retry); 737
779 738 __ lduw(O1, 0, O2);
780 __ lduw(O1, 0, O2); 739 __ add(O0, O2, O3);
781 __ add(O0, O2, O3); 740 __ cas(O1, O2, O3);
782 __ cas(O1, O2, O3); 741 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
783 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry); 742 __ retl(false);
784 __ retl(false); 743 __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
785 __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
786 } else {
787 const Register& lock_reg = O2;
788 const Register& lock_ptr_reg = O3;
789 const Register& value_reg = O4;
790 const Register& yield_reg = O5;
791
792 Label(retry);
793 Label(dontyield);
794
795 generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
796 // got lock, do the increment
797 __ ld(O1, 0, value_reg);
798 __ add(O0, value_reg, value_reg);
799 __ st(value_reg, O1, 0);
800
801 // %%% only for RMO and PSO
802 __ membar(Assembler::StoreStore);
803
804 generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
805
806 __ retl(false);
807 __ delayed()->mov(value_reg, O0);
808 }
809 744
810 return start; 745 return start;
811 } 746 }
812 Label _atomic_add_stub; // called from other stubs 747 Label _atomic_add_stub; // called from other stubs
813 748
839 __ mov(G1, L1); 774 __ mov(G1, L1);
840 __ mov(G2, L2); 775 __ mov(G2, L2);
841 __ mov(G3, L3); 776 __ mov(G3, L3);
842 __ mov(G4, L4); 777 __ mov(G4, L4);
843 __ mov(G5, L5); 778 __ mov(G5, L5);
844 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { 779 for (i = 0; i < 64; i += 2) {
845 __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize); 780 __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
846 } 781 }
847 782
848 address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access); 783 address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access);
849 BLOCK_COMMENT("call handle_unsafe_access"); 784 BLOCK_COMMENT("call handle_unsafe_access");
853 __ mov(L1, G1); 788 __ mov(L1, G1);
854 __ mov(L2, G2); 789 __ mov(L2, G2);
855 __ mov(L3, G3); 790 __ mov(L3, G3);
856 __ mov(L4, G4); 791 __ mov(L4, G4);
857 __ mov(L5, G5); 792 __ mov(L5, G5);
858 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { 793 for (i = 0; i < 64; i += 2) {
859 __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize); 794 __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
860 } 795 }
861 796
862 __ verify_thread(); 797 __ verify_thread();
863 798
3424 // arraycopy stubs used by compilers 3359 // arraycopy stubs used by compilers
3425 generate_arraycopy_stubs(); 3360 generate_arraycopy_stubs();
3426 3361
3427 // Don't initialize the platform math functions since sparc 3362 // Don't initialize the platform math functions since sparc
3428 // doesn't have intrinsics for these operations. 3363 // doesn't have intrinsics for these operations.
3364
3365 // Safefetch stubs.
3366 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
3367 &StubRoutines::_safefetch32_fault_pc,
3368 &StubRoutines::_safefetch32_continuation_pc);
3369 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
3370 &StubRoutines::_safefetchN_fault_pc,
3371 &StubRoutines::_safefetchN_continuation_pc);
3429 } 3372 }
3430 3373
3431 3374
3432 public: 3375 public:
3433 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 3376 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {