Mercurial > hg > truffle
comparison src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp @ 10997:46c544b8fbfc
8008407: remove SPARC V8 support
Summary: Removed most of the SPARC V8 instructions
Reviewed-by: kvn, twisti
author | morris |
---|---|
date | Fri, 07 Jun 2013 16:46:37 -0700 |
parents | 18d56ca3e901 |
children | 87a6f2df28e2 |
comparison
equal
deleted
inserted
replaced
10996:ea60d1de6735 | 10997:46c544b8fbfc |
---|---|
227 } | 227 } |
228 | 228 |
229 // Use the right branch for the platform | 229 // Use the right branch for the platform |
230 | 230 |
231 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { | 231 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { |
232 if (VM_Version::v9_instructions_work()) | 232 Assembler::bp(c, a, icc, p, d, rt); |
233 Assembler::bp(c, a, icc, p, d, rt); | |
234 else | |
235 Assembler::br(c, a, d, rt); | |
236 } | 233 } |
237 | 234 |
238 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) { | 235 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) { |
239 br(c, a, p, target(L)); | 236 br(c, a, p, target(L)); |
240 } | 237 } |
266 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { | 263 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { |
267 Assembler::bp(c, a, cc, p, L); | 264 Assembler::bp(c, a, cc, p, L); |
268 } | 265 } |
269 | 266 |
270 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { | 267 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { |
271 if (VM_Version::v9_instructions_work()) | 268 fbp(c, a, fcc0, p, d, rt); |
272 fbp(c, a, fcc0, p, d, rt); | |
273 else | |
274 Assembler::fb(c, a, d, rt); | |
275 } | 269 } |
276 | 270 |
277 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) { | 271 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) { |
278 fb(c, a, p, target(L)); | 272 fb(c, a, p, target(L)); |
279 } | 273 } |
332 inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); } | 326 inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); } |
333 inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); } | 327 inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); } |
334 | 328 |
335 // prefetch instruction | 329 // prefetch instruction |
336 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) { | 330 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) { |
337 if (VM_Version::v9_instructions_work()) | 331 Assembler::bp( never, true, xcc, pt, d, rt ); |
338 Assembler::bp( never, true, xcc, pt, d, rt ); | 332 Assembler::bp( never, true, xcc, pt, d, rt ); |
339 } | 333 } |
340 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); } | 334 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); } |
341 | 335 |
342 | 336 |
343 // clobbers o7 on V8!! | 337 // clobbers o7 on V8!! |
344 // returns delta from gotten pc to addr after | 338 // returns delta from gotten pc to addr after |
345 inline int MacroAssembler::get_pc( Register d ) { | 339 inline int MacroAssembler::get_pc( Register d ) { |
346 int x = offset(); | 340 int x = offset(); |
347 if (VM_Version::v9_instructions_work()) | 341 rdpc(d); |
348 rdpc(d); | |
349 else { | |
350 Label lbl; | |
351 Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8 | |
352 if (d == O7) delayed()->nop(); | |
353 else delayed()->mov(O7, d); | |
354 bind(lbl); | |
355 } | |
356 return offset() - x; | 342 return offset() - x; |
357 } | 343 } |
358 | 344 |
359 | 345 |
360 // Note: All MacroAssembler::set_foo functions are defined out-of-line. | 346 // Note: All MacroAssembler::set_foo functions are defined out-of-line. |
644 } | 630 } |
645 | 631 |
646 // returns if membar generates anything, obviously this code should mirror | 632 // returns if membar generates anything, obviously this code should mirror |
647 // membar below. | 633 // membar below. |
648 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) { | 634 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) { |
649 if( !os::is_MP() ) return false; // Not needed on single CPU | 635 if (!os::is_MP()) |
650 if( VM_Version::v9_instructions_work() ) { | 636 return false; // Not needed on single CPU |
651 const Membar_mask_bits effective_mask = | 637 const Membar_mask_bits effective_mask = |
652 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); | 638 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); |
653 return (effective_mask != 0); | 639 return (effective_mask != 0); |
654 } else { | |
655 return true; | |
656 } | |
657 } | 640 } |
658 | 641 |
659 inline void MacroAssembler::membar( Membar_mask_bits const7a ) { | 642 inline void MacroAssembler::membar( Membar_mask_bits const7a ) { |
660 // Uniprocessors do not need memory barriers | 643 // Uniprocessors do not need memory barriers |
661 if (!os::is_MP()) return; | 644 if (!os::is_MP()) |
645 return; | |
662 // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3, | 646 // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3, |
663 // 8.4.4.3, a.31 and a.50. | 647 // 8.4.4.3, a.31 and a.50. |
664 if( VM_Version::v9_instructions_work() ) { | 648 // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value |
665 // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value | 649 // of the mmask subfield of const7a that does anything that isn't done |
666 // of the mmask subfield of const7a that does anything that isn't done | 650 // implicitly is StoreLoad. |
667 // implicitly is StoreLoad. | 651 const Membar_mask_bits effective_mask = |
668 const Membar_mask_bits effective_mask = | 652 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); |
669 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); | 653 if (effective_mask != 0) { |
670 if ( effective_mask != 0 ) { | 654 Assembler::membar(effective_mask); |
671 Assembler::membar( effective_mask ); | |
672 } | |
673 } else { | |
674 // stbar is the closest there is on v8. Equivalent to membar(StoreStore). We | |
675 // do not issue the stbar because to my knowledge all v8 machines implement TSO, | |
676 // which guarantees that all stores behave as if an stbar were issued just after | |
677 // each one of them. On these machines, stbar ought to be a nop. There doesn't | |
678 // appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it, | |
679 // it can't be specified by stbar, nor have I come up with a way to simulate it. | |
680 // | |
681 // Addendum. Dave says that ldstub guarantees a write buffer flush to coherent | |
682 // space. Put one here to be on the safe side. | |
683 Assembler::ldstub(SP, 0, G0); | |
684 } | 655 } |
685 } | 656 } |
686 | 657 |
687 inline void MacroAssembler::prefetch(const Address& a, PrefetchFcn f, int offset) { | 658 inline void MacroAssembler::prefetch(const Address& a, PrefetchFcn f, int offset) { |
688 relocate(a.rspec(offset)); | 659 relocate(a.rspec(offset)); |