Mercurial > hg > graal-jvmci-8
comparison src/cpu/sparc/vm/assembler_sparc.inline.hpp @ 7204:f0c2369fda5a
8003250: SPARC: move MacroAssembler into separate file
Reviewed-by: jrose, kvn
author | twisti |
---|---|
date | Thu, 06 Dec 2012 09:57:41 -0800 |
parents | 7eca5de9e0b6 |
children | ffa87474d7a4 |
comparison
equal
deleted
inserted
replaced
7201:c5d414e98fd4 | 7204:f0c2369fda5a |
---|---|
23 */ | 23 */ |
24 | 24 |
25 #ifndef CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP | 25 #ifndef CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP |
26 #define CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP | 26 #define CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP |
27 | 27 |
28 #include "asm/assembler.inline.hpp" | 28 #include "asm/assembler.hpp" |
29 #include "asm/codeBuffer.hpp" | |
30 #include "code/codeCache.hpp" | |
31 #include "runtime/handles.inline.hpp" | |
32 | 29 |
33 inline void MacroAssembler::pd_patch_instruction(address branch, address target) { | |
34 jint& stub_inst = *(jint*) branch; | |
35 stub_inst = patched_branch(target - branch, stub_inst, 0); | |
36 } | |
37 | |
38 #ifndef PRODUCT | |
39 inline void MacroAssembler::pd_print_patched_instruction(address branch) { | |
40 jint stub_inst = *(jint*) branch; | |
41 print_instruction(stub_inst); | |
42 ::tty->print("%s", " (unresolved)"); | |
43 } | |
44 #endif // PRODUCT | |
45 | |
46 inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); } | |
47 | |
48 | |
49 inline int AddressLiteral::low10() const { | |
50 return Assembler::low10(value()); | |
51 } | |
52 | |
53 | |
54 // inlines for SPARC assembler -- dmu 5/97 | |
55 | 30 |
56 inline void Assembler::check_delay() { | 31 inline void Assembler::check_delay() { |
57 # ifdef CHECK_DELAY | 32 # ifdef CHECK_DELAY |
58 guarantee( delay_state != at_delay_slot, "must say delayed() when filling delay slot"); | 33 guarantee( delay_state != at_delay_slot, "must say delayed() when filling delay slot"); |
59 delay_state = no_delay; | 34 delay_state = no_delay; |
74 relocate(rspec); | 49 relocate(rspec); |
75 emit_long(x); | 50 emit_long(x); |
76 } | 51 } |
77 | 52 |
78 | 53 |
79 inline void Assembler::add(Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); } | 54 inline void Assembler::add(Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); } |
80 inline void Assembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rtype ); } | 55 inline void Assembler::add(Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } |
81 inline void Assembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec ); } | |
82 | 56 |
83 inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); } | 57 inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); } |
84 inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); } | 58 inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); } |
85 | 59 |
86 inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); } | 60 inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); } |
109 inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } | 83 inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } |
110 | 84 |
111 inline void Assembler::jmpl( Register s1, Register s2, Register d ) { cti(); emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); } | 85 inline void Assembler::jmpl( Register s1, Register s2, Register d ) { cti(); emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); } |
112 inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); } | 86 inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); } |
113 | 87 |
114 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) { | |
115 if (s2.is_register()) ldf(w, s1, s2.as_register(), d); | |
116 else ldf(w, s1, s2.as_constant(), d); | |
117 } | |
118 | |
119 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); } | 88 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); } |
120 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); } | 89 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); } |
121 | |
122 inline void Assembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { relocate(a.rspec(offset)); ldf( w, a.base(), a.disp() + offset, d); } | |
123 | 90 |
124 inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); } | 91 inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); } |
125 inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } | 92 inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } |
126 inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); } | 93 inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); } |
127 inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } | 94 inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } |
150 inline void Assembler::ldx( Register s1, Register s2, Register d) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | rs2(s2) ); } | 117 inline void Assembler::ldx( Register s1, Register s2, Register d) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | rs2(s2) ); } |
151 inline void Assembler::ldx( Register s1, int simm13a, Register d) { v9_only(); emit_data( op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } | 118 inline void Assembler::ldx( Register s1, int simm13a, Register d) { v9_only(); emit_data( op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } |
152 inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); } | 119 inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); } |
153 inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } | 120 inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } |
154 | 121 |
155 #ifdef _LP64 | |
156 // Make all 32 bit loads signed so 64 bit registers maintain proper sign | |
157 inline void Assembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); } | |
158 inline void Assembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); } | |
159 #else | |
160 inline void Assembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); } | |
161 inline void Assembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); } | |
162 #endif | |
163 | |
164 #ifdef ASSERT | |
165 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. | |
166 # ifdef _LP64 | |
167 inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); } | |
168 # else | |
169 inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); } | |
170 # endif | |
171 #endif | |
172 | |
173 inline void Assembler::ld( const Address& a, Register d, int offset) { | |
174 if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); } | |
175 else { ld( a.base(), a.disp() + offset, d); } | |
176 } | |
177 inline void Assembler::ldsb(const Address& a, Register d, int offset) { | |
178 if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); } | |
179 else { ldsb(a.base(), a.disp() + offset, d); } | |
180 } | |
181 inline void Assembler::ldsh(const Address& a, Register d, int offset) { | |
182 if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); } | |
183 else { ldsh(a.base(), a.disp() + offset, d); } | |
184 } | |
185 inline void Assembler::ldsw(const Address& a, Register d, int offset) { | |
186 if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); } | |
187 else { ldsw(a.base(), a.disp() + offset, d); } | |
188 } | |
189 inline void Assembler::ldub(const Address& a, Register d, int offset) { | |
190 if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); } | |
191 else { ldub(a.base(), a.disp() + offset, d); } | |
192 } | |
193 inline void Assembler::lduh(const Address& a, Register d, int offset) { | |
194 if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); } | |
195 else { lduh(a.base(), a.disp() + offset, d); } | |
196 } | |
197 inline void Assembler::lduw(const Address& a, Register d, int offset) { | |
198 if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); } | |
199 else { lduw(a.base(), a.disp() + offset, d); } | |
200 } | |
201 inline void Assembler::ldd( const Address& a, Register d, int offset) { | |
202 if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); } | |
203 else { ldd( a.base(), a.disp() + offset, d); } | |
204 } | |
205 inline void Assembler::ldx( const Address& a, Register d, int offset) { | |
206 if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); } | |
207 else { ldx( a.base(), a.disp() + offset, d); } | |
208 } | |
209 | |
210 inline void Assembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); } | |
211 inline void Assembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); } | |
212 inline void Assembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); } | |
213 inline void Assembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); } | |
214 inline void Assembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); } | |
215 inline void Assembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); } | |
216 inline void Assembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); } | |
217 inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); } | |
218 inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); } | |
219 | |
220 // form effective addresses this way: | |
221 inline void Assembler::add(const Address& a, Register d, int offset) { | |
222 if (a.has_index()) add(a.base(), a.index(), d); | |
223 else { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; } | |
224 if (offset != 0) add(d, offset, d); | |
225 } | |
226 inline void Assembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) { | |
227 if (s2.is_register()) add(s1, s2.as_register(), d); | |
228 else { add(s1, s2.as_constant() + offset, d); offset = 0; } | |
229 if (offset != 0) add(d, offset, d); | |
230 } | |
231 | |
232 inline void Assembler::andn(Register s1, RegisterOrConstant s2, Register d) { | |
233 if (s2.is_register()) andn(s1, s2.as_register(), d); | |
234 else andn(s1, s2.as_constant(), d); | |
235 } | |
236 | |
237 inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); } | 122 inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); } |
238 inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } | 123 inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } |
239 | |
240 | |
241 inline void Assembler::prefetch(Register s1, Register s2, PrefetchFcn f) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | rs2(s2) ); } | |
242 inline void Assembler::prefetch(Register s1, int simm13a, PrefetchFcn f) { v9_only(); emit_data( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } | |
243 | |
244 inline void Assembler::prefetch(const Address& a, PrefetchFcn f, int offset) { v9_only(); relocate(a.rspec(offset)); prefetch(a.base(), a.disp() + offset, f); } | |
245 | |
246 | 124 |
247 inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_long( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); } | 125 inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_long( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); } |
248 inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); } | 126 inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); } |
249 | 127 |
250 inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rspec ) { emit_data( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(imm22a), rspec); } | 128 inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rspec ) { emit_data( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(imm22a), rspec); } |
251 | 129 |
252 // pp 222 | 130 // pp 222 |
253 | 131 |
254 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) { | |
255 if (s2.is_register()) stf(w, d, s1, s2.as_register()); | |
256 else stf(w, d, s1, s2.as_constant()); | |
257 } | |
258 | |
259 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); } | 132 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); } |
260 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); } | 133 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); } |
261 | |
262 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) { | |
263 relocate(a.rspec(offset)); | |
264 if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index() ); } | |
265 else { stf(w, d, a.base(), a.disp() + offset); } | |
266 } | |
267 | 134 |
268 inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); } | 135 inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); } |
269 inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } | 136 inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } |
270 inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); } | 137 inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); } |
271 inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } | 138 inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } |
283 inline void Assembler::stx( Register d, Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | rs2(s2) ); } | 150 inline void Assembler::stx( Register d, Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | rs2(s2) ); } |
284 inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } | 151 inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } |
285 inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); } | 152 inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); } |
286 inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } | 153 inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } |
287 | 154 |
288 inline void Assembler::st( Register d, Register s1, Register s2) { stw(d, s1, s2); } | |
289 inline void Assembler::st( Register d, Register s1, int simm13a) { stw(d, s1, simm13a); } | |
290 | |
291 #ifdef ASSERT | |
292 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. | |
293 inline void Assembler::st( Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); } | |
294 #endif | |
295 | |
296 inline void Assembler::stb(Register d, const Address& a, int offset) { | |
297 if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); } | |
298 else { stb(d, a.base(), a.disp() + offset); } | |
299 } | |
300 inline void Assembler::sth(Register d, const Address& a, int offset) { | |
301 if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); } | |
302 else { sth(d, a.base(), a.disp() + offset); } | |
303 } | |
304 inline void Assembler::stw(Register d, const Address& a, int offset) { | |
305 if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); } | |
306 else { stw(d, a.base(), a.disp() + offset); } | |
307 } | |
308 inline void Assembler::st( Register d, const Address& a, int offset) { | |
309 if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); } | |
310 else { st( d, a.base(), a.disp() + offset); } | |
311 } | |
312 inline void Assembler::std(Register d, const Address& a, int offset) { | |
313 if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); } | |
314 else { std(d, a.base(), a.disp() + offset); } | |
315 } | |
316 inline void Assembler::stx(Register d, const Address& a, int offset) { | |
317 if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); } | |
318 else { stx(d, a.base(), a.disp() + offset); } | |
319 } | |
320 | |
321 inline void Assembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); } | |
322 inline void Assembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); } | |
323 inline void Assembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); } | |
324 inline void Assembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); } | |
325 inline void Assembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); } | |
326 inline void Assembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); } | |
327 | |
328 // v8 p 99 | 155 // v8 p 99 |
329 | 156 |
330 inline void Assembler::stc( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); } | 157 inline void Assembler::stc( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); } |
331 inline void Assembler::stc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); } | 158 inline void Assembler::stc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); } |
332 inline void Assembler::stdc( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); } | 159 inline void Assembler::stdc( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); } |
334 inline void Assembler::stcsr( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); } | 161 inline void Assembler::stcsr( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); } |
335 inline void Assembler::stcsr( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } | 162 inline void Assembler::stcsr( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } |
336 inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); } | 163 inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); } |
337 inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } | 164 inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } |
338 | 165 |
339 inline void Assembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) { | |
340 if (s2.is_register()) sub(s1, s2.as_register(), d); | |
341 else { sub(s1, s2.as_constant() + offset, d); offset = 0; } | |
342 if (offset != 0) sub(d, offset, d); | |
343 } | |
344 | |
345 // pp 231 | 166 // pp 231 |
346 | 167 |
347 inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); } | 168 inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); } |
348 inline void Assembler::swap( Register s1, int simm13a, Register d) { v9_dep(); emit_data( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } | 169 inline void Assembler::swap( Register s1, int simm13a, Register d) { v9_dep(); emit_data( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } |
349 | 170 |
350 inline void Assembler::swap( Address& a, Register d, int offset ) { | |
351 relocate(a.rspec(offset)); | |
352 if (a.has_index()) { assert(offset == 0, ""); swap( a.base(), a.index(), d ); } | |
353 else { swap( a.base(), a.disp() + offset, d ); } | |
354 } | |
355 | |
356 | |
357 // Use the right loads/stores for the platform | |
358 inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) { | |
359 #ifdef _LP64 | |
360 Assembler::ldx(s1, s2, d); | |
361 #else | |
362 Assembler::ld( s1, s2, d); | |
363 #endif | |
364 } | |
365 | |
366 inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) { | |
367 #ifdef _LP64 | |
368 Assembler::ldx(s1, simm13a, d); | |
369 #else | |
370 Assembler::ld( s1, simm13a, d); | |
371 #endif | |
372 } | |
373 | |
374 #ifdef ASSERT | |
375 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. | |
376 inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) { | |
377 ld_ptr(s1, in_bytes(simm13a), d); | |
378 } | |
379 #endif | |
380 | |
381 inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) { | |
382 #ifdef _LP64 | |
383 Assembler::ldx(s1, s2, d); | |
384 #else | |
385 Assembler::ld( s1, s2, d); | |
386 #endif | |
387 } | |
388 | |
389 inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) { | |
390 #ifdef _LP64 | |
391 Assembler::ldx(a, d, offset); | |
392 #else | |
393 Assembler::ld( a, d, offset); | |
394 #endif | |
395 } | |
396 | |
397 inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) { | |
398 #ifdef _LP64 | |
399 Assembler::stx(d, s1, s2); | |
400 #else | |
401 Assembler::st( d, s1, s2); | |
402 #endif | |
403 } | |
404 | |
405 inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) { | |
406 #ifdef _LP64 | |
407 Assembler::stx(d, s1, simm13a); | |
408 #else | |
409 Assembler::st( d, s1, simm13a); | |
410 #endif | |
411 } | |
412 | |
413 #ifdef ASSERT | |
414 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. | |
415 inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) { | |
416 st_ptr(d, s1, in_bytes(simm13a)); | |
417 } | |
418 #endif | |
419 | |
420 inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) { | |
421 #ifdef _LP64 | |
422 Assembler::stx(d, s1, s2); | |
423 #else | |
424 Assembler::st( d, s1, s2); | |
425 #endif | |
426 } | |
427 | |
428 inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) { | |
429 #ifdef _LP64 | |
430 Assembler::stx(d, a, offset); | |
431 #else | |
432 Assembler::st( d, a, offset); | |
433 #endif | |
434 } | |
435 | |
436 // Use the right loads/stores for the platform | |
437 inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) { | |
438 #ifdef _LP64 | |
439 Assembler::ldx(s1, s2, d); | |
440 #else | |
441 Assembler::ldd(s1, s2, d); | |
442 #endif | |
443 } | |
444 | |
445 inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) { | |
446 #ifdef _LP64 | |
447 Assembler::ldx(s1, simm13a, d); | |
448 #else | |
449 Assembler::ldd(s1, simm13a, d); | |
450 #endif | |
451 } | |
452 | |
453 inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) { | |
454 #ifdef _LP64 | |
455 Assembler::ldx(s1, s2, d); | |
456 #else | |
457 Assembler::ldd(s1, s2, d); | |
458 #endif | |
459 } | |
460 | |
461 inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) { | |
462 #ifdef _LP64 | |
463 Assembler::ldx(a, d, offset); | |
464 #else | |
465 Assembler::ldd(a, d, offset); | |
466 #endif | |
467 } | |
468 | |
469 inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) { | |
470 #ifdef _LP64 | |
471 Assembler::stx(d, s1, s2); | |
472 #else | |
473 Assembler::std(d, s1, s2); | |
474 #endif | |
475 } | |
476 | |
477 inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) { | |
478 #ifdef _LP64 | |
479 Assembler::stx(d, s1, simm13a); | |
480 #else | |
481 Assembler::std(d, s1, simm13a); | |
482 #endif | |
483 } | |
484 | |
485 inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) { | |
486 #ifdef _LP64 | |
487 Assembler::stx(d, s1, s2); | |
488 #else | |
489 Assembler::std(d, s1, s2); | |
490 #endif | |
491 } | |
492 | |
493 inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) { | |
494 #ifdef _LP64 | |
495 Assembler::stx(d, a, offset); | |
496 #else | |
497 Assembler::std(d, a, offset); | |
498 #endif | |
499 } | |
500 | |
501 // Functions for isolating 64 bit shifts for LP64 | |
502 | |
503 inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) { | |
504 #ifdef _LP64 | |
505 Assembler::sllx(s1, s2, d); | |
506 #else | |
507 Assembler::sll( s1, s2, d); | |
508 #endif | |
509 } | |
510 | |
511 inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) { | |
512 #ifdef _LP64 | |
513 Assembler::sllx(s1, imm6a, d); | |
514 #else | |
515 Assembler::sll( s1, imm6a, d); | |
516 #endif | |
517 } | |
518 | |
519 inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) { | |
520 #ifdef _LP64 | |
521 Assembler::srlx(s1, s2, d); | |
522 #else | |
523 Assembler::srl( s1, s2, d); | |
524 #endif | |
525 } | |
526 | |
527 inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) { | |
528 #ifdef _LP64 | |
529 Assembler::srlx(s1, imm6a, d); | |
530 #else | |
531 Assembler::srl( s1, imm6a, d); | |
532 #endif | |
533 } | |
534 | |
535 inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) { | |
536 if (s2.is_register()) sll_ptr(s1, s2.as_register(), d); | |
537 else sll_ptr(s1, s2.as_constant(), d); | |
538 } | |
539 | |
540 // Use the right branch for the platform | |
541 | |
542 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { | |
543 if (VM_Version::v9_instructions_work()) | |
544 Assembler::bp(c, a, icc, p, d, rt); | |
545 else | |
546 Assembler::br(c, a, d, rt); | |
547 } | |
548 | |
549 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) { | |
550 br(c, a, p, target(L)); | |
551 } | |
552 | |
553 | |
554 // Branch that tests either xcc or icc depending on the | |
555 // architecture compiled (LP64 or not) | |
556 inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { | |
557 #ifdef _LP64 | |
558 Assembler::bp(c, a, xcc, p, d, rt); | |
559 #else | |
560 MacroAssembler::br(c, a, p, d, rt); | |
561 #endif | |
562 } | |
563 | |
564 inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) { | |
565 brx(c, a, p, target(L)); | |
566 } | |
567 | |
568 inline void MacroAssembler::ba( Label& L ) { | |
569 br(always, false, pt, L); | |
570 } | |
571 | |
572 // Warning: V9 only functions | |
573 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { | |
574 Assembler::bp(c, a, cc, p, d, rt); | |
575 } | |
576 | |
577 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { | |
578 Assembler::bp(c, a, cc, p, L); | |
579 } | |
580 | |
581 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { | |
582 if (VM_Version::v9_instructions_work()) | |
583 fbp(c, a, fcc0, p, d, rt); | |
584 else | |
585 Assembler::fb(c, a, d, rt); | |
586 } | |
587 | |
588 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) { | |
589 fb(c, a, p, target(L)); | |
590 } | |
591 | |
592 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { | |
593 Assembler::fbp(c, a, cc, p, d, rt); | |
594 } | |
595 | |
596 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { | |
597 Assembler::fbp(c, a, cc, p, L); | |
598 } | |
599 | |
600 inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); } | |
601 inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); } | |
602 | |
603 inline bool MacroAssembler::is_far_target(address d) { | |
604 if (ForceUnreachable) { | |
605 // References outside the code cache should be treated as far | |
606 return d < CodeCache::low_bound() || d > CodeCache::high_bound(); | |
607 } | |
608 return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound()); | |
609 } | |
610 | |
611 // Call with a check to see if we need to deal with the added | |
612 // expense of relocation and if we overflow the displacement | |
613 // of the quick call instruction. | |
614 inline void MacroAssembler::call( address d, relocInfo::relocType rt ) { | |
615 #ifdef _LP64 | |
616 intptr_t disp; | |
617 // NULL is ok because it will be relocated later. | |
618 // Must change NULL to a reachable address in order to | |
619 // pass asserts here and in wdisp. | |
620 if ( d == NULL ) | |
621 d = pc(); | |
622 | |
623 // Is this address within range of the call instruction? | |
624 // If not, use the expensive instruction sequence | |
625 if (is_far_target(d)) { | |
626 relocate(rt); | |
627 AddressLiteral dest(d); | |
628 jumpl_to(dest, O7, O7); | |
629 } else { | |
630 Assembler::call(d, rt); | |
631 } | |
632 #else | |
633 Assembler::call( d, rt ); | |
634 #endif | |
635 } | |
636 | |
637 inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) { | |
638 MacroAssembler::call( target(L), rt); | |
639 } | |
640 | |
641 | |
642 | |
643 inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); } | |
644 inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); } | |
645 | |
646 // prefetch instruction | |
647 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) { | |
648 if (VM_Version::v9_instructions_work()) | |
649 Assembler::bp( never, true, xcc, pt, d, rt ); | |
650 } | |
651 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); } | |
652 | |
653 | |
654 // clobbers o7 on V8!! | |
655 // returns delta from gotten pc to addr after | |
656 inline int MacroAssembler::get_pc( Register d ) { | |
657 int x = offset(); | |
658 if (VM_Version::v9_instructions_work()) | |
659 rdpc(d); | |
660 else { | |
661 Label lbl; | |
662 Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8 | |
663 if (d == O7) delayed()->nop(); | |
664 else delayed()->mov(O7, d); | |
665 bind(lbl); | |
666 } | |
667 return offset() - x; | |
668 } | |
669 | |
670 | |
671 // Note: All MacroAssembler::set_foo functions are defined out-of-line. | |
672 | |
673 | |
674 // Loads the current PC of the following instruction as an immediate value in | |
675 // 2 instructions. All PCs in the CodeCache are within 2 Gig of each other. | |
676 inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) { | |
677 intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip; | |
678 #ifdef _LP64 | |
679 Unimplemented(); | |
680 #else | |
681 Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc)); | |
682 Assembler::add(reg,thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc)); | |
683 #endif | |
684 return thepc; | |
685 } | |
686 | |
687 | |
688 inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) { | |
689 assert_not_delayed(); | |
690 if (ForceUnreachable) { | |
691 patchable_sethi(addrlit, d); | |
692 } else { | |
693 sethi(addrlit, d); | |
694 } | |
695 ld(d, addrlit.low10() + offset, d); | |
696 } | |
697 | |
698 | |
699 inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) { | |
700 assert_not_delayed(); | |
701 if (ForceUnreachable) { | |
702 patchable_sethi(addrlit, d); | |
703 } else { | |
704 sethi(addrlit, d); | |
705 } | |
706 ldub(d, addrlit.low10() + offset, d); | |
707 } | |
708 | |
709 | |
710 inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) { | |
711 assert_not_delayed(); | |
712 if (ForceUnreachable) { | |
713 patchable_sethi(addrlit, d); | |
714 } else { | |
715 sethi(addrlit, d); | |
716 } | |
717 ld_ptr(d, addrlit.low10() + offset, d); | |
718 } | |
719 | |
720 | |
721 inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) { | |
722 assert_not_delayed(); | |
723 if (ForceUnreachable) { | |
724 patchable_sethi(addrlit, temp); | |
725 } else { | |
726 sethi(addrlit, temp); | |
727 } | |
728 st(s, temp, addrlit.low10() + offset); | |
729 } | |
730 | |
731 | |
732 inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) { | |
733 assert_not_delayed(); | |
734 if (ForceUnreachable) { | |
735 patchable_sethi(addrlit, temp); | |
736 } else { | |
737 sethi(addrlit, temp); | |
738 } | |
739 st_ptr(s, temp, addrlit.low10() + offset); | |
740 } | |
741 | |
742 | |
743 // This code sequence is relocatable to any address, even on LP64. | |
744 inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) { | |
745 assert_not_delayed(); | |
746 // Force fixed length sethi because NativeJump and NativeFarCall don't handle | |
747 // variable length instruction streams. | |
748 patchable_sethi(addrlit, temp); | |
749 jmpl(temp, addrlit.low10() + offset, d); | |
750 } | |
751 | |
752 | |
753 inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) { | |
754 jumpl_to(addrlit, temp, G0, offset); | |
755 } | |
756 | |
757 | |
758 inline void MacroAssembler::jump_indirect_to(Address& a, Register temp, | |
759 int ld_offset, int jmp_offset) { | |
760 assert_not_delayed(); | |
761 //sethi(al); // sethi is caller responsibility for this one | |
762 ld_ptr(a, temp, ld_offset); | |
763 jmp(temp, jmp_offset); | |
764 } | |
765 | |
766 | |
767 inline void MacroAssembler::set_metadata(Metadata* obj, Register d) { | |
768 set_metadata(allocate_metadata_address(obj), d); | |
769 } | |
770 | |
771 inline void MacroAssembler::set_metadata_constant(Metadata* obj, Register d) { | |
772 set_metadata(constant_metadata_address(obj), d); | |
773 } | |
774 | |
775 inline void MacroAssembler::set_metadata(const AddressLiteral& obj_addr, Register d) { | |
776 assert(obj_addr.rspec().type() == relocInfo::metadata_type, "must be a metadata reloc"); | |
777 set(obj_addr, d); | |
778 } | |
779 | |
780 inline void MacroAssembler::set_oop(jobject obj, Register d) { | |
781 set_oop(allocate_oop_address(obj), d); | |
782 } | |
783 | |
784 | |
785 inline void MacroAssembler::set_oop_constant(jobject obj, Register d) { | |
786 set_oop(constant_oop_address(obj), d); | |
787 } | |
788 | |
789 | |
790 inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) { | |
791 assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); | |
792 set(obj_addr, d); | |
793 } | |
794 | |
795 | |
796 inline void MacroAssembler::load_argument( Argument& a, Register d ) { | |
797 if (a.is_register()) | |
798 mov(a.as_register(), d); | |
799 else | |
800 ld (a.as_address(), d); | |
801 } | |
802 | |
803 inline void MacroAssembler::store_argument( Register s, Argument& a ) { | |
804 if (a.is_register()) | |
805 mov(s, a.as_register()); | |
806 else | |
807 st_ptr (s, a.as_address()); // ABI says everything is right justified. | |
808 } | |
809 | |
810 inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) { | |
811 if (a.is_register()) | |
812 mov(s, a.as_register()); | |
813 else | |
814 st_ptr (s, a.as_address()); | |
815 } | |
816 | |
817 | |
818 #ifdef _LP64 | |
819 inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) { | |
820 if (a.is_float_register()) | |
821 // V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2 | |
822 fmov(FloatRegisterImpl::S, s, a.as_float_register() ); | |
823 else | |
824 // Floats are stored in the high half of the stack entry | |
825 // The low half is undefined per the ABI. | |
826 stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat)); | |
827 } | |
828 | |
829 inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) { | |
830 if (a.is_float_register()) | |
831 // V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2 | |
832 fmov(FloatRegisterImpl::D, s, a.as_double_register() ); | |
833 else | |
834 stf(FloatRegisterImpl::D, s, a.as_address()); | |
835 } | |
836 | |
837 inline void MacroAssembler::store_long_argument( Register s, Argument& a ) { | |
838 if (a.is_register()) | |
839 mov(s, a.as_register()); | |
840 else | |
841 stx(s, a.as_address()); | |
842 } | |
843 #endif | |
844 | |
845 inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); } | |
846 inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); } | |
847 inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); } | |
848 inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); } | |
849 | |
850 inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); } | |
851 inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); } | |
852 inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); } | |
853 inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); } | |
854 | |
855 // returns if membar generates anything, obviously this code should mirror | |
856 // membar below. | |
857 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) { | |
858 if( !os::is_MP() ) return false; // Not needed on single CPU | |
859 if( VM_Version::v9_instructions_work() ) { | |
860 const Membar_mask_bits effective_mask = | |
861 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); | |
862 return (effective_mask != 0); | |
863 } else { | |
864 return true; | |
865 } | |
866 } | |
867 | |
868 inline void MacroAssembler::membar( Membar_mask_bits const7a ) { | |
869 // Uniprocessors do not need memory barriers | |
870 if (!os::is_MP()) return; | |
871 // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3, | |
872 // 8.4.4.3, a.31 and a.50. | |
873 if( VM_Version::v9_instructions_work() ) { | |
874 // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value | |
875 // of the mmask subfield of const7a that does anything that isn't done | |
876 // implicitly is StoreLoad. | |
877 const Membar_mask_bits effective_mask = | |
878 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); | |
879 if ( effective_mask != 0 ) { | |
880 Assembler::membar( effective_mask ); | |
881 } | |
882 } else { | |
883 // stbar is the closest there is on v8. Equivalent to membar(StoreStore). We | |
884 // do not issue the stbar because to my knowledge all v8 machines implement TSO, | |
885 // which guarantees that all stores behave as if an stbar were issued just after | |
886 // each one of them. On these machines, stbar ought to be a nop. There doesn't | |
887 // appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it, | |
888 // it can't be specified by stbar, nor have I come up with a way to simulate it. | |
889 // | |
890 // Addendum. Dave says that ldstub guarantees a write buffer flush to coherent | |
891 // space. Put one here to be on the safe side. | |
892 Assembler::ldstub(SP, 0, G0); | |
893 } | |
894 } | |
895 | |
896 #endif // CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP | 171 #endif // CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP |