comparison src/cpu/sparc/vm/assembler_sparc.cpp @ 1503:c640000b7cc1

6829193: JSR 292 needs to support SPARC Summary: There are unimplemented portions of the hotspot code for method handles and invokedynamic specific to SPARC. Reviewed-by: kvn, never, jrose
author twisti
date Thu, 29 Apr 2010 06:30:25 -0700
parents dcf03e02b020
children 2338d41fbd81
comparison
equal deleted inserted replaced
1399:90acda19b80f 1503:c640000b7cc1
1 /* 1 /*
2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. 2 * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
2331 movcc(greater, false, xcc, 1, Rresult); 2331 movcc(greater, false, xcc, 1, Rresult);
2332 } 2332 }
2333 #endif 2333 #endif
2334 2334
2335 2335
2336 void MacroAssembler::load_sized_value(Address src, Register dst,
2337 size_t size_in_bytes, bool is_signed) {
2338 switch (size_in_bytes) {
2339 case 8: ldx(src, dst); break;
2340 case 4: ld( src, dst); break;
2341 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break;
2342 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break;
2343 default: ShouldNotReachHere();
2344 }
2345 }
2346
2347
2336 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 2348 void MacroAssembler::float_cmp( bool is_float, int unordered_result,
2337 FloatRegister Fa, FloatRegister Fb, 2349 FloatRegister Fa, FloatRegister Fb,
2338 Register Rresult) { 2350 Register Rresult) {
2339 2351
2340 fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb); 2352 fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
2623 2635
2624 return RegisterOrConstant(tmp); 2636 return RegisterOrConstant(tmp);
2625 } 2637 }
2626 2638
2627 2639
2628 void MacroAssembler::regcon_inc_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) { 2640 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
2629 assert(dest.register_or_noreg() != G0, "lost side effect"); 2641 assert(d.register_or_noreg() != G0, "lost side effect");
2630 if ((src.is_constant() && src.as_constant() == 0) || 2642 if ((s2.is_constant() && s2.as_constant() == 0) ||
2631 (src.is_register() && src.as_register() == G0)) { 2643 (s2.is_register() && s2.as_register() == G0)) {
2632 // do nothing 2644 // Do nothing, just move value.
2633 } else if (dest.is_register()) { 2645 if (s1.is_register()) {
2634 add(dest.as_register(), ensure_simm13_or_reg(src, temp), dest.as_register()); 2646 if (d.is_constant()) d = temp;
2635 } else if (src.is_constant()) { 2647 mov(s1.as_register(), d.as_register());
2636 intptr_t res = dest.as_constant() + src.as_constant(); 2648 return d;
2637 dest = RegisterOrConstant(res); // side effect seen by caller 2649 } else {
2650 return s1;
2651 }
2652 }
2653
2654 if (s1.is_register()) {
2655 assert_different_registers(s1.as_register(), temp);
2656 if (d.is_constant()) d = temp;
2657 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
2658 return d;
2638 } else { 2659 } else {
2639 assert(temp != noreg, "cannot handle constant += register"); 2660 if (s2.is_register()) {
2640 add(src.as_register(), ensure_simm13_or_reg(dest, temp), temp); 2661 assert_different_registers(s2.as_register(), temp);
2641 dest = RegisterOrConstant(temp); // side effect seen by caller 2662 if (d.is_constant()) d = temp;
2642 } 2663 set(s1.as_constant(), temp);
2643 } 2664 andn(temp, s2.as_register(), d.as_register());
2644 2665 return d;
2645 void MacroAssembler::regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) { 2666 } else {
2646 assert(dest.register_or_noreg() != G0, "lost side effect"); 2667 intptr_t res = s1.as_constant() & ~s2.as_constant();
2647 if (!is_simm13(src.constant_or_zero())) 2668 return res;
2648 src = (src.as_constant() & 0xFF); 2669 }
2649 if ((src.is_constant() && src.as_constant() == 0) || 2670 }
2650 (src.is_register() && src.as_register() == G0)) { 2671 }
2651 // do nothing 2672
2652 } else if (dest.is_register()) { 2673 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
2653 sll_ptr(dest.as_register(), src, dest.as_register()); 2674 assert(d.register_or_noreg() != G0, "lost side effect");
2654 } else if (src.is_constant()) { 2675 if ((s2.is_constant() && s2.as_constant() == 0) ||
2655 intptr_t res = dest.as_constant() << src.as_constant(); 2676 (s2.is_register() && s2.as_register() == G0)) {
2656 dest = RegisterOrConstant(res); // side effect seen by caller 2677 // Do nothing, just move value.
2678 if (s1.is_register()) {
2679 if (d.is_constant()) d = temp;
2680 mov(s1.as_register(), d.as_register());
2681 return d;
2682 } else {
2683 return s1;
2684 }
2685 }
2686
2687 if (s1.is_register()) {
2688 assert_different_registers(s1.as_register(), temp);
2689 if (d.is_constant()) d = temp;
2690 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
2691 return d;
2657 } else { 2692 } else {
2658 assert(temp != noreg, "cannot handle constant <<= register"); 2693 if (s2.is_register()) {
2659 set(dest.as_constant(), temp); 2694 assert_different_registers(s2.as_register(), temp);
2660 sll_ptr(temp, src, temp); 2695 if (d.is_constant()) d = temp;
2661 dest = RegisterOrConstant(temp); // side effect seen by caller 2696 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register());
2697 return d;
2698 } else {
2699 intptr_t res = s1.as_constant() + s2.as_constant();
2700 return res;
2701 }
2702 }
2703 }
2704
2705 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
2706 assert(d.register_or_noreg() != G0, "lost side effect");
2707 if (!is_simm13(s2.constant_or_zero()))
2708 s2 = (s2.as_constant() & 0xFF);
2709 if ((s2.is_constant() && s2.as_constant() == 0) ||
2710 (s2.is_register() && s2.as_register() == G0)) {
2711 // Do nothing, just move value.
2712 if (s1.is_register()) {
2713 if (d.is_constant()) d = temp;
2714 mov(s1.as_register(), d.as_register());
2715 return d;
2716 } else {
2717 return s1;
2718 }
2719 }
2720
2721 if (s1.is_register()) {
2722 assert_different_registers(s1.as_register(), temp);
2723 if (d.is_constant()) d = temp;
2724 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
2725 return d;
2726 } else {
2727 if (s2.is_register()) {
2728 assert_different_registers(s2.as_register(), temp);
2729 if (d.is_constant()) d = temp;
2730 set(s1.as_constant(), temp);
2731 sll_ptr(temp, s2.as_register(), d.as_register());
2732 return d;
2733 } else {
2734 intptr_t res = s1.as_constant() << s2.as_constant();
2735 return res;
2736 }
2662 } 2737 }
2663 } 2738 }
2664 2739
2665 2740
2666 // Look up the method for a megamorphic invokeinterface call. 2741 // Look up the method for a megamorphic invokeinterface call.
2706 } 2781 }
2707 add(recv_klass, scan_temp, scan_temp); 2782 add(recv_klass, scan_temp, scan_temp);
2708 2783
2709 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 2784 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
2710 RegisterOrConstant itable_offset = itable_index; 2785 RegisterOrConstant itable_offset = itable_index;
2711 regcon_sll_ptr(itable_offset, exact_log2(itableMethodEntry::size() * wordSize)); 2786 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
2712 regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes()); 2787 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
2713 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); 2788 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
2714 2789
2715 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 2790 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
2716 // if (scan->interface() == intf) { 2791 // if (scan->interface() == intf) {
2717 // result = (klass + scan->offset() + itable_index); 2792 // result = (klass + scan->offset() + itable_index);
2803 bool need_slow_path = (must_load_sco || 2878 bool need_slow_path = (must_load_sco ||
2804 super_check_offset.constant_or_zero() == sco_offset); 2879 super_check_offset.constant_or_zero() == sco_offset);
2805 2880
2806 assert_different_registers(sub_klass, super_klass, temp_reg); 2881 assert_different_registers(sub_klass, super_klass, temp_reg);
2807 if (super_check_offset.is_register()) { 2882 if (super_check_offset.is_register()) {
2808 assert_different_registers(sub_klass, super_klass, 2883 assert_different_registers(sub_klass, super_klass, temp_reg,
2809 super_check_offset.as_register()); 2884 super_check_offset.as_register());
2810 } else if (must_load_sco) { 2885 } else if (must_load_sco) {
2811 assert(temp2_reg != noreg, "supply either a temp or a register offset"); 2886 assert(temp2_reg != noreg, "supply either a temp or a register offset");
2812 } 2887 }
2813 2888
2853 // Check the supertype display: 2928 // Check the supertype display:
2854 if (must_load_sco) { 2929 if (must_load_sco) {
2855 // The super check offset is always positive... 2930 // The super check offset is always positive...
2856 lduw(super_klass, sco_offset, temp2_reg); 2931 lduw(super_klass, sco_offset, temp2_reg);
2857 super_check_offset = RegisterOrConstant(temp2_reg); 2932 super_check_offset = RegisterOrConstant(temp2_reg);
2933 // super_check_offset is register.
2934 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register());
2858 } 2935 }
2859 ld_ptr(sub_klass, super_check_offset, temp_reg); 2936 ld_ptr(sub_klass, super_check_offset, temp_reg);
2860 cmp(super_klass, temp_reg); 2937 cmp(super_klass, temp_reg);
2861 2938
2862 // This check has worked decisively for primary supers. 2939 // This check has worked decisively for primary supers.
3012 3089
3013 bind(L_fallthrough); 3090 bind(L_fallthrough);
3014 } 3091 }
3015 3092
3016 3093
3017
3018
3019 void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg, 3094 void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
3020 Register temp_reg, 3095 Register temp_reg,
3021 Label& wrong_method_type) { 3096 Label& wrong_method_type) {
3097 if (UseCompressedOops) unimplemented("coop"); // field accesses must decode
3022 assert_different_registers(mtype_reg, mh_reg, temp_reg); 3098 assert_different_registers(mtype_reg, mh_reg, temp_reg);
3023 // compare method type against that of the receiver 3099 // compare method type against that of the receiver
3024 RegisterOrConstant mhtype_offset = delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg); 3100 RegisterOrConstant mhtype_offset = delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg);
3025 ld_ptr(mh_reg, mhtype_offset, temp_reg); 3101 ld_ptr(mh_reg, mhtype_offset, temp_reg);
3026 cmp(temp_reg, mtype_reg); 3102 cmp(temp_reg, mtype_reg);
3027 br(Assembler::notEqual, false, Assembler::pn, wrong_method_type); 3103 br(Assembler::notEqual, false, Assembler::pn, wrong_method_type);
3028 delayed()->nop(); 3104 delayed()->nop();
3029 } 3105 }
3030 3106
3031 3107
3032 void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg) { 3108 // A method handle has a "vmslots" field which gives the size of its
3109 // argument list in JVM stack slots. This field is either located directly
3110 // in every method handle, or else is indirectly accessed through the
3111 // method handle's MethodType. This macro hides the distinction.
3112 void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
3113 Register temp_reg) {
3114 assert_different_registers(vmslots_reg, mh_reg, temp_reg);
3115 if (UseCompressedOops) unimplemented("coop"); // field accesses must decode
3116 // load mh.type.form.vmslots
3117 if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
3118 // hoist vmslots into every mh to avoid dependent load chain
3119 ld( Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
3120 } else {
3121 Register temp2_reg = vmslots_reg;
3122 ld_ptr(Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg);
3123 ld_ptr(Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg);
3124 ld( Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
3125 }
3126 }
3127
3128
3129 void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) {
3033 assert(mh_reg == G3_method_handle, "caller must put MH object in G3"); 3130 assert(mh_reg == G3_method_handle, "caller must put MH object in G3");
3034 assert_different_registers(mh_reg, temp_reg); 3131 assert_different_registers(mh_reg, temp_reg);
3132
3133 if (UseCompressedOops) unimplemented("coop"); // field accesses must decode
3035 3134
3036 // pick out the interpreted side of the handler 3135 // pick out the interpreted side of the handler
3037 ld_ptr(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg); 3136 ld_ptr(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg);
3038 3137
3039 // off we go... 3138 // off we go...
3041 jmp(temp_reg, 0); 3140 jmp(temp_reg, 0);
3042 3141
3043 // for the various stubs which take control at this point, 3142 // for the various stubs which take control at this point,
3044 // see MethodHandles::generate_method_handle_stub 3143 // see MethodHandles::generate_method_handle_stub
3045 3144
3046 // (Can any caller use this delay slot? If so, add an option for supression.) 3145 // Some callers can fill the delay slot.
3047 delayed()->nop(); 3146 if (emit_delayed_nop) {
3048 } 3147 delayed()->nop();
3148 }
3149 }
3150
3049 3151
3050 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, 3152 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
3051 int extra_slot_offset) { 3153 int extra_slot_offset) {
3052 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 3154 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
3053 int stackElementSize = Interpreter::stackElementWords() * wordSize; 3155 int stackElementSize = Interpreter::stackElementSize();
3054 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 3156 int offset = extra_slot_offset * stackElementSize;
3055 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
3056 assert(offset1 - offset == stackElementSize, "correct arithmetic");
3057 if (arg_slot.is_constant()) { 3157 if (arg_slot.is_constant()) {
3058 offset += arg_slot.as_constant() * stackElementSize; 3158 offset += arg_slot.as_constant() * stackElementSize;
3059 return offset; 3159 return offset;
3060 } else { 3160 } else {
3061 Register temp = arg_slot.as_register(); 3161 Register temp = arg_slot.as_register();
3064 add(temp, offset, temp); 3164 add(temp, offset, temp);
3065 return temp; 3165 return temp;
3066 } 3166 }
3067 } 3167 }
3068 3168
3169
3170 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
3171 int extra_slot_offset) {
3172 return Address(Gargs, argument_offset(arg_slot, extra_slot_offset));
3173 }
3069 3174
3070 3175
3071 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, 3176 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
3072 Register temp_reg, 3177 Register temp_reg,
3073 Label& done, Label* slow_case, 3178 Label& done, Label* slow_case,