7204
|
1 /*
|
|
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 * or visit www.oracle.com if you need additional information or have any
|
|
21 * questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 #ifndef CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
|
|
26 #define CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
|
|
27
|
|
28 #include "asm/assembler.inline.hpp"
|
|
29 #include "asm/macroAssembler.hpp"
|
|
30 #include "asm/codeBuffer.hpp"
|
|
31 #include "code/codeCache.hpp"
|
|
32
|
|
33 inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); }
|
|
34
|
|
35
|
|
36 inline int AddressLiteral::low10() const {
|
|
37 return Assembler::low10(value());
|
|
38 }
|
|
39
|
|
40
|
|
41 inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
|
|
42 jint& stub_inst = *(jint*) branch;
|
|
43 stub_inst = patched_branch(target - branch, stub_inst, 0);
|
|
44 }
|
|
45
|
|
46 #ifndef PRODUCT
|
|
47 inline void MacroAssembler::pd_print_patched_instruction(address branch) {
|
|
48 jint stub_inst = *(jint*) branch;
|
|
49 print_instruction(stub_inst);
|
|
50 ::tty->print("%s", " (unresolved)");
|
|
51 }
|
|
52 #endif // PRODUCT
|
|
53
|
|
54 // Use the right loads/stores for the platform
|
|
55 inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
|
|
56 #ifdef _LP64
|
|
57 Assembler::ldx(s1, s2, d);
|
|
58 #else
|
|
59 ld( s1, s2, d);
|
|
60 #endif
|
|
61 }
|
|
62
|
|
63 inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
|
|
64 #ifdef _LP64
|
|
65 Assembler::ldx(s1, simm13a, d);
|
|
66 #else
|
|
67 ld( s1, simm13a, d);
|
|
68 #endif
|
|
69 }
|
|
70
|
|
71 #ifdef ASSERT
|
|
72 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
|
|
73 inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
|
|
74 ld_ptr(s1, in_bytes(simm13a), d);
|
|
75 }
|
|
76 #endif
|
|
77
|
|
78 inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
|
|
79 #ifdef _LP64
|
|
80 ldx(s1, s2, d);
|
|
81 #else
|
|
82 ld( s1, s2, d);
|
|
83 #endif
|
|
84 }
|
|
85
|
|
86 inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
|
|
87 #ifdef _LP64
|
|
88 ldx(a, d, offset);
|
|
89 #else
|
|
90 ld( a, d, offset);
|
|
91 #endif
|
|
92 }
|
|
93
|
|
94 inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
|
|
95 #ifdef _LP64
|
|
96 Assembler::stx(d, s1, s2);
|
|
97 #else
|
|
98 st( d, s1, s2);
|
|
99 #endif
|
|
100 }
|
|
101
|
|
102 inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
|
|
103 #ifdef _LP64
|
|
104 Assembler::stx(d, s1, simm13a);
|
|
105 #else
|
|
106 st( d, s1, simm13a);
|
|
107 #endif
|
|
108 }
|
|
109
|
|
110 #ifdef ASSERT
|
|
111 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
|
|
112 inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
|
|
113 st_ptr(d, s1, in_bytes(simm13a));
|
|
114 }
|
|
115 #endif
|
|
116
|
|
117 inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
|
|
118 #ifdef _LP64
|
|
119 stx(d, s1, s2);
|
|
120 #else
|
|
121 st( d, s1, s2);
|
|
122 #endif
|
|
123 }
|
|
124
|
|
125 inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
|
|
126 #ifdef _LP64
|
|
127 stx(d, a, offset);
|
|
128 #else
|
|
129 st( d, a, offset);
|
|
130 #endif
|
|
131 }
|
|
132
|
|
133 // Use the right loads/stores for the platform
|
|
134 inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
|
|
135 #ifdef _LP64
|
|
136 Assembler::ldx(s1, s2, d);
|
|
137 #else
|
|
138 Assembler::ldd(s1, s2, d);
|
|
139 #endif
|
|
140 }
|
|
141
|
|
142 inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
|
|
143 #ifdef _LP64
|
|
144 Assembler::ldx(s1, simm13a, d);
|
|
145 #else
|
|
146 Assembler::ldd(s1, simm13a, d);
|
|
147 #endif
|
|
148 }
|
|
149
|
|
150 inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
|
|
151 #ifdef _LP64
|
|
152 ldx(s1, s2, d);
|
|
153 #else
|
|
154 ldd(s1, s2, d);
|
|
155 #endif
|
|
156 }
|
|
157
|
|
158 inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
|
|
159 #ifdef _LP64
|
|
160 ldx(a, d, offset);
|
|
161 #else
|
|
162 ldd(a, d, offset);
|
|
163 #endif
|
|
164 }
|
|
165
|
|
166 inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
|
|
167 #ifdef _LP64
|
|
168 Assembler::stx(d, s1, s2);
|
|
169 #else
|
|
170 Assembler::std(d, s1, s2);
|
|
171 #endif
|
|
172 }
|
|
173
|
|
174 inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
|
|
175 #ifdef _LP64
|
|
176 Assembler::stx(d, s1, simm13a);
|
|
177 #else
|
|
178 Assembler::std(d, s1, simm13a);
|
|
179 #endif
|
|
180 }
|
|
181
|
|
182 inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
|
|
183 #ifdef _LP64
|
|
184 stx(d, s1, s2);
|
|
185 #else
|
|
186 std(d, s1, s2);
|
|
187 #endif
|
|
188 }
|
|
189
|
|
190 inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
|
|
191 #ifdef _LP64
|
|
192 stx(d, a, offset);
|
|
193 #else
|
|
194 std(d, a, offset);
|
|
195 #endif
|
|
196 }
|
|
197
|
|
198 // Functions for isolating 64 bit shifts for LP64
|
|
199
|
|
200 inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
|
|
201 #ifdef _LP64
|
|
202 Assembler::sllx(s1, s2, d);
|
|
203 #else
|
|
204 Assembler::sll( s1, s2, d);
|
|
205 #endif
|
|
206 }
|
|
207
|
|
208 inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
|
|
209 #ifdef _LP64
|
|
210 Assembler::sllx(s1, imm6a, d);
|
|
211 #else
|
|
212 Assembler::sll( s1, imm6a, d);
|
|
213 #endif
|
|
214 }
|
|
215
|
|
216 inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
|
|
217 #ifdef _LP64
|
|
218 Assembler::srlx(s1, s2, d);
|
|
219 #else
|
|
220 Assembler::srl( s1, s2, d);
|
|
221 #endif
|
|
222 }
|
|
223
|
|
224 inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
|
|
225 #ifdef _LP64
|
|
226 Assembler::srlx(s1, imm6a, d);
|
|
227 #else
|
|
228 Assembler::srl( s1, imm6a, d);
|
|
229 #endif
|
|
230 }
|
|
231
|
|
232 inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
|
|
233 if (s2.is_register()) sll_ptr(s1, s2.as_register(), d);
|
|
234 else sll_ptr(s1, s2.as_constant(), d);
|
|
235 }
|
|
236
|
|
237 // Use the right branch for the platform
|
|
238
|
|
239 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
|
|
240 if (VM_Version::v9_instructions_work())
|
|
241 Assembler::bp(c, a, icc, p, d, rt);
|
|
242 else
|
|
243 Assembler::br(c, a, d, rt);
|
|
244 }
|
|
245
|
|
246 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
|
|
247 br(c, a, p, target(L));
|
|
248 }
|
|
249
|
|
250
|
|
251 // Branch that tests either xcc or icc depending on the
|
|
252 // architecture compiled (LP64 or not)
|
|
253 inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
|
|
254 #ifdef _LP64
|
|
255 Assembler::bp(c, a, xcc, p, d, rt);
|
|
256 #else
|
|
257 MacroAssembler::br(c, a, p, d, rt);
|
|
258 #endif
|
|
259 }
|
|
260
|
|
261 inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
|
|
262 brx(c, a, p, target(L));
|
|
263 }
|
|
264
|
|
265 inline void MacroAssembler::ba( Label& L ) {
|
|
266 br(always, false, pt, L);
|
|
267 }
|
|
268
|
|
269 // Warning: V9 only functions
|
|
270 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
|
|
271 Assembler::bp(c, a, cc, p, d, rt);
|
|
272 }
|
|
273
|
|
274 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) {
|
|
275 Assembler::bp(c, a, cc, p, L);
|
|
276 }
|
|
277
|
|
278 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
|
|
279 if (VM_Version::v9_instructions_work())
|
|
280 fbp(c, a, fcc0, p, d, rt);
|
|
281 else
|
|
282 Assembler::fb(c, a, d, rt);
|
|
283 }
|
|
284
|
|
285 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
|
|
286 fb(c, a, p, target(L));
|
|
287 }
|
|
288
|
|
289 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
|
|
290 Assembler::fbp(c, a, cc, p, d, rt);
|
|
291 }
|
|
292
|
|
293 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) {
|
|
294 Assembler::fbp(c, a, cc, p, L);
|
|
295 }
|
|
296
|
|
297 inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
|
|
298 inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
|
|
299
|
|
300 inline bool MacroAssembler::is_far_target(address d) {
|
|
301 if (ForceUnreachable) {
|
|
302 // References outside the code cache should be treated as far
|
|
303 return d < CodeCache::low_bound() || d > CodeCache::high_bound();
|
|
304 }
|
|
305 return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound());
|
|
306 }
|
|
307
|
|
308 // Call with a check to see if we need to deal with the added
|
|
309 // expense of relocation and if we overflow the displacement
|
|
310 // of the quick call instruction.
|
|
311 inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
|
|
312 #ifdef _LP64
|
|
313 intptr_t disp;
|
|
314 // NULL is ok because it will be relocated later.
|
|
315 // Must change NULL to a reachable address in order to
|
|
316 // pass asserts here and in wdisp.
|
|
317 if ( d == NULL )
|
|
318 d = pc();
|
|
319
|
|
320 // Is this address within range of the call instruction?
|
|
321 // If not, use the expensive instruction sequence
|
|
322 if (is_far_target(d)) {
|
|
323 relocate(rt);
|
|
324 AddressLiteral dest(d);
|
|
325 jumpl_to(dest, O7, O7);
|
|
326 } else {
|
|
327 Assembler::call(d, rt);
|
|
328 }
|
|
329 #else
|
|
330 Assembler::call( d, rt );
|
|
331 #endif
|
|
332 }
|
|
333
|
|
334 inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
|
|
335 MacroAssembler::call( target(L), rt);
|
|
336 }
|
|
337
|
|
338
|
|
339
|
|
340 inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
|
|
341 inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
|
|
342
|
|
343 // prefetch instruction
|
|
344 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
|
|
345 if (VM_Version::v9_instructions_work())
|
|
346 Assembler::bp( never, true, xcc, pt, d, rt );
|
|
347 }
|
|
348 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
|
|
349
|
|
350
|
|
351 // clobbers o7 on V8!!
|
|
352 // returns delta from gotten pc to addr after
|
|
353 inline int MacroAssembler::get_pc( Register d ) {
|
|
354 int x = offset();
|
|
355 if (VM_Version::v9_instructions_work())
|
|
356 rdpc(d);
|
|
357 else {
|
|
358 Label lbl;
|
|
359 Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8
|
|
360 if (d == O7) delayed()->nop();
|
|
361 else delayed()->mov(O7, d);
|
|
362 bind(lbl);
|
|
363 }
|
|
364 return offset() - x;
|
|
365 }
|
|
366
|
|
367
|
|
368 // Note: All MacroAssembler::set_foo functions are defined out-of-line.
|
|
369
|
|
370
|
|
371 // Loads the current PC of the following instruction as an immediate value in
|
|
372 // 2 instructions. All PCs in the CodeCache are within 2 Gig of each other.
|
|
373 inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
|
|
374 intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
|
|
375 #ifdef _LP64
|
|
376 Unimplemented();
|
|
377 #else
|
|
378 Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
|
|
379 add(reg, thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc));
|
|
380 #endif
|
|
381 return thepc;
|
|
382 }
|
|
383
|
|
384
|
|
385 inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) {
|
|
386 assert_not_delayed();
|
|
387 if (ForceUnreachable) {
|
|
388 patchable_sethi(addrlit, d);
|
|
389 } else {
|
|
390 sethi(addrlit, d);
|
|
391 }
|
|
392 ld(d, addrlit.low10() + offset, d);
|
|
393 }
|
|
394
|
|
395
|
|
396 inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) {
|
|
397 assert_not_delayed();
|
|
398 if (ForceUnreachable) {
|
|
399 patchable_sethi(addrlit, d);
|
|
400 } else {
|
|
401 sethi(addrlit, d);
|
|
402 }
|
|
403 ldub(d, addrlit.low10() + offset, d);
|
|
404 }
|
|
405
|
|
406
|
|
407 inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
|
|
408 assert_not_delayed();
|
|
409 if (ForceUnreachable) {
|
|
410 patchable_sethi(addrlit, d);
|
|
411 } else {
|
|
412 sethi(addrlit, d);
|
|
413 }
|
|
414 ld_ptr(d, addrlit.low10() + offset, d);
|
|
415 }
|
|
416
|
|
417
|
|
418 inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
|
|
419 assert_not_delayed();
|
|
420 if (ForceUnreachable) {
|
|
421 patchable_sethi(addrlit, temp);
|
|
422 } else {
|
|
423 sethi(addrlit, temp);
|
|
424 }
|
|
425 st(s, temp, addrlit.low10() + offset);
|
|
426 }
|
|
427
|
|
428
|
|
429 inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
|
|
430 assert_not_delayed();
|
|
431 if (ForceUnreachable) {
|
|
432 patchable_sethi(addrlit, temp);
|
|
433 } else {
|
|
434 sethi(addrlit, temp);
|
|
435 }
|
|
436 st_ptr(s, temp, addrlit.low10() + offset);
|
|
437 }
|
|
438
|
|
439
|
|
440 // This code sequence is relocatable to any address, even on LP64.
|
|
441 inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) {
|
|
442 assert_not_delayed();
|
|
443 // Force fixed length sethi because NativeJump and NativeFarCall don't handle
|
|
444 // variable length instruction streams.
|
|
445 patchable_sethi(addrlit, temp);
|
|
446 jmpl(temp, addrlit.low10() + offset, d);
|
|
447 }
|
|
448
|
|
449
|
|
450 inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) {
|
|
451 jumpl_to(addrlit, temp, G0, offset);
|
|
452 }
|
|
453
|
|
454
|
|
455 inline void MacroAssembler::jump_indirect_to(Address& a, Register temp,
|
|
456 int ld_offset, int jmp_offset) {
|
|
457 assert_not_delayed();
|
|
458 //sethi(al); // sethi is caller responsibility for this one
|
|
459 ld_ptr(a, temp, ld_offset);
|
|
460 jmp(temp, jmp_offset);
|
|
461 }
|
|
462
|
|
463
|
|
464 inline void MacroAssembler::set_metadata(Metadata* obj, Register d) {
|
|
465 set_metadata(allocate_metadata_address(obj), d);
|
|
466 }
|
|
467
|
|
468 inline void MacroAssembler::set_metadata_constant(Metadata* obj, Register d) {
|
|
469 set_metadata(constant_metadata_address(obj), d);
|
|
470 }
|
|
471
|
|
472 inline void MacroAssembler::set_metadata(const AddressLiteral& obj_addr, Register d) {
|
|
473 assert(obj_addr.rspec().type() == relocInfo::metadata_type, "must be a metadata reloc");
|
|
474 set(obj_addr, d);
|
|
475 }
|
|
476
|
|
477 inline void MacroAssembler::set_oop(jobject obj, Register d) {
|
|
478 set_oop(allocate_oop_address(obj), d);
|
|
479 }
|
|
480
|
|
481
|
|
482 inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
|
|
483 set_oop(constant_oop_address(obj), d);
|
|
484 }
|
|
485
|
|
486
|
|
487 inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) {
|
|
488 assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
|
|
489 set(obj_addr, d);
|
|
490 }
|
|
491
|
|
492
|
|
493 inline void MacroAssembler::load_argument( Argument& a, Register d ) {
|
|
494 if (a.is_register())
|
|
495 mov(a.as_register(), d);
|
|
496 else
|
|
497 ld (a.as_address(), d);
|
|
498 }
|
|
499
|
|
500 inline void MacroAssembler::store_argument( Register s, Argument& a ) {
|
|
501 if (a.is_register())
|
|
502 mov(s, a.as_register());
|
|
503 else
|
|
504 st_ptr (s, a.as_address()); // ABI says everything is right justified.
|
|
505 }
|
|
506
|
|
507 inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
|
|
508 if (a.is_register())
|
|
509 mov(s, a.as_register());
|
|
510 else
|
|
511 st_ptr (s, a.as_address());
|
|
512 }
|
|
513
|
|
514
|
|
515 #ifdef _LP64
|
|
516 inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
|
|
517 if (a.is_float_register())
|
|
518 // V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
|
|
519 fmov(FloatRegisterImpl::S, s, a.as_float_register() );
|
|
520 else
|
|
521 // Floats are stored in the high half of the stack entry
|
|
522 // The low half is undefined per the ABI.
|
|
523 stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat));
|
|
524 }
|
|
525
|
|
526 inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) {
|
|
527 if (a.is_float_register())
|
|
528 // V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2
|
|
529 fmov(FloatRegisterImpl::D, s, a.as_double_register() );
|
|
530 else
|
|
531 stf(FloatRegisterImpl::D, s, a.as_address());
|
|
532 }
|
|
533
|
|
534 inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
|
|
535 if (a.is_register())
|
|
536 mov(s, a.as_register());
|
|
537 else
|
|
538 stx(s, a.as_address());
|
|
539 }
|
|
540 #endif
|
|
541
|
|
542 inline void MacroAssembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype) {
|
|
543 relocate(rtype);
|
|
544 add(s1, simm13a, d);
|
|
545 }
|
|
546 inline void MacroAssembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec) {
|
|
547 relocate(rspec);
|
|
548 add(s1, simm13a, d);
|
|
549 }
|
|
550
|
|
551 // form effective addresses this way:
|
|
552 inline void MacroAssembler::add(const Address& a, Register d, int offset) {
|
|
553 if (a.has_index()) add(a.base(), a.index(), d);
|
|
554 else { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; }
|
|
555 if (offset != 0) add(d, offset, d);
|
|
556 }
|
|
557 inline void MacroAssembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
|
|
558 if (s2.is_register()) add(s1, s2.as_register(), d);
|
|
559 else { add(s1, s2.as_constant() + offset, d); offset = 0; }
|
|
560 if (offset != 0) add(d, offset, d);
|
|
561 }
|
|
562
|
|
563 inline void MacroAssembler::andn(Register s1, RegisterOrConstant s2, Register d) {
|
|
564 if (s2.is_register()) andn(s1, s2.as_register(), d);
|
|
565 else andn(s1, s2.as_constant(), d);
|
|
566 }
|
|
567
|
|
568 inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); }
|
|
569 inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); }
|
|
570 inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); }
|
|
571 inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); }
|
|
572
|
|
573 inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); }
|
|
574 inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); }
|
|
575 inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); }
|
|
576 inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); }
|
|
577
|
|
578 #ifdef _LP64
|
|
579 // Make all 32 bit loads signed so 64 bit registers maintain proper sign
|
|
580 inline void MacroAssembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
|
|
581 inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
|
|
582 #else
|
|
583 inline void MacroAssembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
|
|
584 inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
|
|
585 #endif
|
|
586
|
|
587 #ifdef ASSERT
|
|
588 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
|
|
589 # ifdef _LP64
|
|
590 inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
|
|
591 # else
|
|
592 inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
|
|
593 # endif
|
|
594 #endif
|
|
595
|
|
596 inline void MacroAssembler::ld( const Address& a, Register d, int offset) {
|
|
597 if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); }
|
|
598 else { ld( a.base(), a.disp() + offset, d); }
|
|
599 }
|
|
600
|
|
601 inline void MacroAssembler::ldsb(const Address& a, Register d, int offset) {
|
|
602 if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); }
|
|
603 else { ldsb(a.base(), a.disp() + offset, d); }
|
|
604 }
|
|
605 inline void MacroAssembler::ldsh(const Address& a, Register d, int offset) {
|
|
606 if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); }
|
|
607 else { ldsh(a.base(), a.disp() + offset, d); }
|
|
608 }
|
|
609 inline void MacroAssembler::ldsw(const Address& a, Register d, int offset) {
|
|
610 if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); }
|
|
611 else { ldsw(a.base(), a.disp() + offset, d); }
|
|
612 }
|
|
613 inline void MacroAssembler::ldub(const Address& a, Register d, int offset) {
|
|
614 if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); }
|
|
615 else { ldub(a.base(), a.disp() + offset, d); }
|
|
616 }
|
|
617 inline void MacroAssembler::lduh(const Address& a, Register d, int offset) {
|
|
618 if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); }
|
|
619 else { lduh(a.base(), a.disp() + offset, d); }
|
|
620 }
|
|
621 inline void MacroAssembler::lduw(const Address& a, Register d, int offset) {
|
|
622 if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); }
|
|
623 else { lduw(a.base(), a.disp() + offset, d); }
|
|
624 }
|
|
625 inline void MacroAssembler::ldd( const Address& a, Register d, int offset) {
|
|
626 if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); }
|
|
627 else { ldd( a.base(), a.disp() + offset, d); }
|
|
628 }
|
|
629 inline void MacroAssembler::ldx( const Address& a, Register d, int offset) {
|
|
630 if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); }
|
|
631 else { ldx( a.base(), a.disp() + offset, d); }
|
|
632 }
|
|
633
|
|
634 inline void MacroAssembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
|
|
635 inline void MacroAssembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); }
|
|
636 inline void MacroAssembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); }
|
|
637 inline void MacroAssembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
|
|
638 inline void MacroAssembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
|
|
639 inline void MacroAssembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
|
|
640 inline void MacroAssembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
|
|
641 inline void MacroAssembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); }
|
|
642 inline void MacroAssembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
|
|
643
|
|
644 inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
|
|
645 if (s2.is_register()) ldf(w, s1, s2.as_register(), d);
|
|
646 else ldf(w, s1, s2.as_constant(), d);
|
|
647 }
|
|
648
|
|
649 inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) {
|
|
650 relocate(a.rspec(offset));
|
|
651 ldf(w, a.base(), a.disp() + offset, d);
|
|
652 }
|
|
653
|
|
654 // returns if membar generates anything, obviously this code should mirror
|
|
655 // membar below.
|
|
656 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
|
|
657 if( !os::is_MP() ) return false; // Not needed on single CPU
|
|
658 if( VM_Version::v9_instructions_work() ) {
|
|
659 const Membar_mask_bits effective_mask =
|
|
660 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
|
|
661 return (effective_mask != 0);
|
|
662 } else {
|
|
663 return true;
|
|
664 }
|
|
665 }
|
|
666
|
|
667 inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
|
|
668 // Uniprocessors do not need memory barriers
|
|
669 if (!os::is_MP()) return;
|
|
670 // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
|
|
671 // 8.4.4.3, a.31 and a.50.
|
|
672 if( VM_Version::v9_instructions_work() ) {
|
|
673 // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
|
|
674 // of the mmask subfield of const7a that does anything that isn't done
|
|
675 // implicitly is StoreLoad.
|
|
676 const Membar_mask_bits effective_mask =
|
|
677 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
|
|
678 if ( effective_mask != 0 ) {
|
|
679 Assembler::membar( effective_mask );
|
|
680 }
|
|
681 } else {
|
|
682 // stbar is the closest there is on v8. Equivalent to membar(StoreStore). We
|
|
683 // do not issue the stbar because to my knowledge all v8 machines implement TSO,
|
|
684 // which guarantees that all stores behave as if an stbar were issued just after
|
|
685 // each one of them. On these machines, stbar ought to be a nop. There doesn't
|
|
686 // appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
|
|
687 // it can't be specified by stbar, nor have I come up with a way to simulate it.
|
|
688 //
|
|
689 // Addendum. Dave says that ldstub guarantees a write buffer flush to coherent
|
|
690 // space. Put one here to be on the safe side.
|
|
691 Assembler::ldstub(SP, 0, G0);
|
|
692 }
|
|
693 }
|
|
694
|
|
695 inline void MacroAssembler::prefetch(const Address& a, PrefetchFcn f, int offset) {
|
|
696 relocate(a.rspec(offset));
|
|
697 assert(!a.has_index(), "");
|
|
698 prefetch(a.base(), a.disp() + offset, f);
|
|
699 }
|
|
700
|
|
701 inline void MacroAssembler::st(Register d, Register s1, Register s2) { stw(d, s1, s2); }
|
|
702 inline void MacroAssembler::st(Register d, Register s1, int simm13a) { stw(d, s1, simm13a); }
|
|
703
|
|
704 #ifdef ASSERT
|
|
705 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
|
|
706 inline void MacroAssembler::st(Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
|
|
707 #endif
|
|
708
|
|
709 inline void MacroAssembler::st(Register d, const Address& a, int offset) {
|
|
710 if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); }
|
|
711 else { st( d, a.base(), a.disp() + offset); }
|
|
712 }
|
|
713
|
|
714 inline void MacroAssembler::stb(Register d, const Address& a, int offset) {
|
|
715 if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); }
|
|
716 else { stb(d, a.base(), a.disp() + offset); }
|
|
717 }
|
|
718 inline void MacroAssembler::sth(Register d, const Address& a, int offset) {
|
|
719 if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); }
|
|
720 else { sth(d, a.base(), a.disp() + offset); }
|
|
721 }
|
|
722 inline void MacroAssembler::stw(Register d, const Address& a, int offset) {
|
|
723 if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); }
|
|
724 else { stw(d, a.base(), a.disp() + offset); }
|
|
725 }
|
|
726 inline void MacroAssembler::std(Register d, const Address& a, int offset) {
|
|
727 if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); }
|
|
728 else { std(d, a.base(), a.disp() + offset); }
|
|
729 }
|
|
730 inline void MacroAssembler::stx(Register d, const Address& a, int offset) {
|
|
731 if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); }
|
|
732 else { stx(d, a.base(), a.disp() + offset); }
|
|
733 }
|
|
734
|
|
735 inline void MacroAssembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
|
|
736 inline void MacroAssembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
|
|
737 inline void MacroAssembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); }
|
|
738 inline void MacroAssembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
|
|
739 inline void MacroAssembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
|
|
740 inline void MacroAssembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
|
|
741
|
|
742 inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) {
|
|
743 if (s2.is_register()) stf(w, d, s1, s2.as_register());
|
|
744 else stf(w, d, s1, s2.as_constant());
|
|
745 }
|
|
746
|
|
747 inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) {
|
|
748 relocate(a.rspec(offset));
|
|
749 if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index() ); }
|
|
750 else { stf(w, d, a.base(), a.disp() + offset); }
|
|
751 }
|
|
752
|
|
753 inline void MacroAssembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) {
|
|
754 if (s2.is_register()) sub(s1, s2.as_register(), d);
|
|
755 else { sub(s1, s2.as_constant() + offset, d); offset = 0; }
|
|
756 if (offset != 0) sub(d, offset, d);
|
|
757 }
|
|
758
|
|
759 inline void MacroAssembler::swap(Address& a, Register d, int offset) {
|
|
760 relocate(a.rspec(offset));
|
|
761 if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d ); }
|
|
762 else { swap(a.base(), a.disp() + offset, d); }
|
|
763 }
|
|
764
|
|
765 #endif // CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
|