comparison src/cpu/sparc/vm/assembler_sparc.cpp @ 7212:291ffc492eb6

Merge with http://hg.openjdk.java.net/hsx/hsx25/hotspot/
author Doug Simon <doug.simon@oracle.com>
date Fri, 14 Dec 2012 14:35:13 +0100
parents e522a00b91aa f0c2369fda5a
children
comparison
equal deleted inserted replaced
7163:2ed8d74e5984 7212:291ffc492eb6
22 * 22 *
23 */ 23 */
24 24
25 #include "precompiled.hpp" 25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp" 26 #include "asm/assembler.hpp"
27 #include "assembler_sparc.inline.hpp" 27 #include "asm/assembler.inline.hpp"
28 #include "gc_interface/collectedHeap.inline.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "memory/cardTableModRefBS.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "prims/methodHandles.hpp"
33 #include "runtime/biasedLocking.hpp"
34 #include "runtime/interfaceSupport.hpp"
35 #include "runtime/objectMonitor.hpp"
36 #include "runtime/os.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubRoutines.hpp"
39 #ifndef SERIALGC
40 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
41 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
42 #include "gc_implementation/g1/heapRegion.hpp"
43 #endif
44
45 #ifdef PRODUCT
46 #define BLOCK_COMMENT(str) /* nothing */
47 #define STOP(error) stop(error)
48 #else
49 #define BLOCK_COMMENT(str) block_comment(str)
50 #define STOP(error) block_comment(error); stop(error)
51 #endif
52
53 // Convert the raw encoding form into the form expected by the
54 // constructor for Address.
55 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
56 assert(scale == 0, "not supported");
57 RelocationHolder rspec;
58 if (disp_reloc != relocInfo::none) {
59 rspec = Relocation::spec_simple(disp_reloc);
60 }
61
62 Register rindex = as_Register(index);
63 if (rindex != G0) {
64 Address madr(as_Register(base), rindex);
65 madr._rspec = rspec;
66 return madr;
67 } else {
68 Address madr(as_Register(base), disp);
69 madr._rspec = rspec;
70 return madr;
71 }
72 }
73
74 Address Argument::address_in_frame() const {
75 // Warning: In LP64 mode disp will occupy more than 10 bits, but
76 // op codes such as ld or ldx, only access disp() to get
77 // their simm13 argument.
78 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
79 if (is_in())
80 return Address(FP, disp); // In argument.
81 else
82 return Address(SP, disp); // Out argument.
83 }
84
85 static const char* argumentNames[][2] = {
86 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"},
87 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"},
88 {"A(n>9)","P(n>9)"}
89 };
90
91 const char* Argument::name() const {
92 int nofArgs = sizeof argumentNames / sizeof argumentNames[0];
93 int num = number();
94 if (num >= nofArgs) num = nofArgs - 1;
95 return argumentNames[num][is_in() ? 1 : 0];
96 }
97
98 void Assembler::print_instruction(int inst) {
99 const char* s;
100 switch (inv_op(inst)) {
101 default: s = "????"; break;
102 case call_op: s = "call"; break;
103 case branch_op:
104 switch (inv_op2(inst)) {
105 case fb_op2: s = "fb"; break;
106 case fbp_op2: s = "fbp"; break;
107 case br_op2: s = "br"; break;
108 case bp_op2: s = "bp"; break;
109 case cb_op2: s = "cb"; break;
110 case bpr_op2: {
111 if (is_cbcond(inst)) {
112 s = is_cxb(inst) ? "cxb" : "cwb";
113 } else {
114 s = "bpr";
115 }
116 break;
117 }
118 default: s = "????"; break;
119 }
120 }
121 ::tty->print("%s", s);
122 }
123
124
125 // Patch instruction inst at offset inst_pos to refer to dest_pos
126 // and return the resulting instruction.
127 // We should have pcs, not offsets, but since all is relative, it will work out
128 // OK.
129 int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) {
130
131 int m; // mask for displacement field
132 int v; // new value for displacement field
133 const int word_aligned_ones = -4;
134 switch (inv_op(inst)) {
135 default: ShouldNotReachHere();
136 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break;
137 case branch_op:
138 switch (inv_op2(inst)) {
139 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
140 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
141 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
142 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
143 case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
144 case bpr_op2: {
145 if (is_cbcond(inst)) {
146 m = wdisp10(word_aligned_ones, 0);
147 v = wdisp10(dest_pos, inst_pos);
148 } else {
149 m = wdisp16(word_aligned_ones, 0);
150 v = wdisp16(dest_pos, inst_pos);
151 }
152 break;
153 }
154 default: ShouldNotReachHere();
155 }
156 }
157 return inst & ~m | v;
158 }
159
160 // Return the offset of the branch destionation of instruction inst
161 // at offset pos.
162 // Should have pcs, but since all is relative, it works out.
163 int Assembler::branch_destination(int inst, int pos) {
164 int r;
165 switch (inv_op(inst)) {
166 default: ShouldNotReachHere();
167 case call_op: r = inv_wdisp(inst, pos, 30); break;
168 case branch_op:
169 switch (inv_op2(inst)) {
170 case fbp_op2: r = inv_wdisp( inst, pos, 19); break;
171 case bp_op2: r = inv_wdisp( inst, pos, 19); break;
172 case fb_op2: r = inv_wdisp( inst, pos, 22); break;
173 case br_op2: r = inv_wdisp( inst, pos, 22); break;
174 case cb_op2: r = inv_wdisp( inst, pos, 22); break;
175 case bpr_op2: {
176 if (is_cbcond(inst)) {
177 r = inv_wdisp10(inst, pos);
178 } else {
179 r = inv_wdisp16(inst, pos);
180 }
181 break;
182 }
183 default: ShouldNotReachHere();
184 }
185 }
186 return r;
187 }
188 28
189 int AbstractAssembler::code_fill_byte() { 29 int AbstractAssembler::code_fill_byte() {
190 return 0x00; // illegal instruction 0x00000000 30 return 0x00; // illegal instruction 0x00000000
191 } 31 }
192
193 Assembler::Condition Assembler::reg_cond_to_cc_cond(Assembler::RCondition in) {
194 switch (in) {
195 case rc_z: return equal;
196 case rc_lez: return lessEqual;
197 case rc_lz: return less;
198 case rc_nz: return notEqual;
199 case rc_gz: return greater;
200 case rc_gez: return greaterEqual;
201 default:
202 ShouldNotReachHere();
203 }
204 return equal;
205 }
206
207 // Generate a bunch 'o stuff (including v9's
208 #ifndef PRODUCT
209 void Assembler::test_v9() {
210 add( G0, G1, G2 );
211 add( G3, 0, G4 );
212
213 addcc( G5, G6, G7 );
214 addcc( I0, 1, I1 );
215 addc( I2, I3, I4 );
216 addc( I5, -1, I6 );
217 addccc( I7, L0, L1 );
218 addccc( L2, (1 << 12) - 2, L3 );
219
220 Label lbl1, lbl2, lbl3;
221
222 bind(lbl1);
223
224 bpr( rc_z, true, pn, L4, pc(), relocInfo::oop_type );
225 delayed()->nop();
226 bpr( rc_lez, false, pt, L5, lbl1);
227 delayed()->nop();
228
229 fb( f_never, true, pc() + 4, relocInfo::none);
230 delayed()->nop();
231 fb( f_notEqual, false, lbl2 );
232 delayed()->nop();
233
234 fbp( f_notZero, true, fcc0, pn, pc() - 4, relocInfo::none);
235 delayed()->nop();
236 fbp( f_lessOrGreater, false, fcc1, pt, lbl3 );
237 delayed()->nop();
238
239 br( equal, true, pc() + 1024, relocInfo::none);
240 delayed()->nop();
241 br( lessEqual, false, lbl1 );
242 delayed()->nop();
243 br( never, false, lbl1 );
244 delayed()->nop();
245
246 bp( less, true, icc, pn, pc(), relocInfo::none);
247 delayed()->nop();
248 bp( lessEqualUnsigned, false, xcc, pt, lbl2 );
249 delayed()->nop();
250
251 call( pc(), relocInfo::none);
252 delayed()->nop();
253 call( lbl3 );
254 delayed()->nop();
255
256
257 casa( L6, L7, O0 );
258 casxa( O1, O2, O3, 0 );
259
260 udiv( O4, O5, O7 );
261 udiv( G0, (1 << 12) - 1, G1 );
262 sdiv( G1, G2, G3 );
263 sdiv( G4, -((1 << 12) - 1), G5 );
264 udivcc( G6, G7, I0 );
265 udivcc( I1, -((1 << 12) - 2), I2 );
266 sdivcc( I3, I4, I5 );
267 sdivcc( I6, -((1 << 12) - 0), I7 );
268
269 done();
270 retry();
271
272 fadd( FloatRegisterImpl::S, F0, F1, F2 );
273 fsub( FloatRegisterImpl::D, F34, F0, F62 );
274
275 fcmp( FloatRegisterImpl::Q, fcc0, F0, F60);
276 fcmpe( FloatRegisterImpl::S, fcc1, F31, F30);
277
278 ftox( FloatRegisterImpl::D, F2, F4 );
279 ftoi( FloatRegisterImpl::Q, F4, F8 );
280
281 ftof( FloatRegisterImpl::S, FloatRegisterImpl::Q, F3, F12 );
282
283 fxtof( FloatRegisterImpl::S, F4, F5 );
284 fitof( FloatRegisterImpl::D, F6, F8 );
285
286 fmov( FloatRegisterImpl::Q, F16, F20 );
287 fneg( FloatRegisterImpl::S, F6, F7 );
288 fabs( FloatRegisterImpl::D, F10, F12 );
289
290 fmul( FloatRegisterImpl::Q, F24, F28, F32 );
291 fmul( FloatRegisterImpl::S, FloatRegisterImpl::D, F8, F9, F14 );
292 fdiv( FloatRegisterImpl::S, F10, F11, F12 );
293
294 fsqrt( FloatRegisterImpl::S, F13, F14 );
295
296 flush( L0, L1 );
297 flush( L2, -1 );
298
299 flushw();
300
301 illtrap( (1 << 22) - 2);
302
303 impdep1( 17, (1 << 19) - 1 );
304 impdep2( 3, 0 );
305
306 jmpl( L3, L4, L5 );
307 delayed()->nop();
308 jmpl( L6, -1, L7, Relocation::spec_simple(relocInfo::none));
309 delayed()->nop();
310
311
312 ldf( FloatRegisterImpl::S, O0, O1, F15 );
313 ldf( FloatRegisterImpl::D, O2, -1, F14 );
314
315
316 ldfsr( O3, O4 );
317 ldfsr( O5, -1 );
318 ldxfsr( O6, O7 );
319 ldxfsr( I0, -1 );
320
321 ldfa( FloatRegisterImpl::D, I1, I2, 1, F16 );
322 ldfa( FloatRegisterImpl::Q, I3, -1, F36 );
323
324 ldsb( I4, I5, I6 );
325 ldsb( I7, -1, G0 );
326 ldsh( G1, G3, G4 );
327 ldsh( G5, -1, G6 );
328 ldsw( G7, L0, L1 );
329 ldsw( L2, -1, L3 );
330 ldub( L4, L5, L6 );
331 ldub( L7, -1, O0 );
332 lduh( O1, O2, O3 );
333 lduh( O4, -1, O5 );
334 lduw( O6, O7, G0 );
335 lduw( G1, -1, G2 );
336 ldx( G3, G4, G5 );
337 ldx( G6, -1, G7 );
338 ldd( I0, I1, I2 );
339 ldd( I3, -1, I4 );
340
341 ldsba( I5, I6, 2, I7 );
342 ldsba( L0, -1, L1 );
343 ldsha( L2, L3, 3, L4 );
344 ldsha( L5, -1, L6 );
345 ldswa( L7, O0, (1 << 8) - 1, O1 );
346 ldswa( O2, -1, O3 );
347 lduba( O4, O5, 0, O6 );
348 lduba( O7, -1, I0 );
349 lduha( I1, I2, 1, I3 );
350 lduha( I4, -1, I5 );
351 lduwa( I6, I7, 2, L0 );
352 lduwa( L1, -1, L2 );
353 ldxa( L3, L4, 3, L5 );
354 ldxa( L6, -1, L7 );
355 ldda( G0, G1, 4, G2 );
356 ldda( G3, -1, G4 );
357
358 ldstub( G5, G6, G7 );
359 ldstub( O0, -1, O1 );
360
361 ldstuba( O2, O3, 5, O4 );
362 ldstuba( O5, -1, O6 );
363
364 and3( I0, L0, O0 );
365 and3( G7, -1, O7 );
366 andcc( L2, I2, G2 );
367 andcc( L4, -1, G4 );
368 andn( I5, I6, I7 );
369 andn( I6, -1, I7 );
370 andncc( I5, I6, I7 );
371 andncc( I7, -1, I6 );
372 or3( I5, I6, I7 );
373 or3( I7, -1, I6 );
374 orcc( I5, I6, I7 );
375 orcc( I7, -1, I6 );
376 orn( I5, I6, I7 );
377 orn( I7, -1, I6 );
378 orncc( I5, I6, I7 );
379 orncc( I7, -1, I6 );
380 xor3( I5, I6, I7 );
381 xor3( I7, -1, I6 );
382 xorcc( I5, I6, I7 );
383 xorcc( I7, -1, I6 );
384 xnor( I5, I6, I7 );
385 xnor( I7, -1, I6 );
386 xnorcc( I5, I6, I7 );
387 xnorcc( I7, -1, I6 );
388
389 membar( Membar_mask_bits(StoreStore | LoadStore | StoreLoad | LoadLoad | Sync | MemIssue | Lookaside ) );
390 membar( StoreStore );
391 membar( LoadStore );
392 membar( StoreLoad );
393 membar( LoadLoad );
394 membar( Sync );
395 membar( MemIssue );
396 membar( Lookaside );
397
398 fmov( FloatRegisterImpl::S, f_ordered, true, fcc2, F16, F17 );
399 fmov( FloatRegisterImpl::D, rc_lz, L5, F18, F20 );
400
401 movcc( overflowClear, false, icc, I6, L4 );
402 movcc( f_unorderedOrEqual, true, fcc2, (1 << 10) - 1, O0 );
403
404 movr( rc_nz, I5, I6, I7 );
405 movr( rc_gz, L1, -1, L2 );
406
407 mulx( I5, I6, I7 );
408 mulx( I7, -1, I6 );
409 sdivx( I5, I6, I7 );
410 sdivx( I7, -1, I6 );
411 udivx( I5, I6, I7 );
412 udivx( I7, -1, I6 );
413
414 umul( I5, I6, I7 );
415 umul( I7, -1, I6 );
416 smul( I5, I6, I7 );
417 smul( I7, -1, I6 );
418 umulcc( I5, I6, I7 );
419 umulcc( I7, -1, I6 );
420 smulcc( I5, I6, I7 );
421 smulcc( I7, -1, I6 );
422
423 mulscc( I5, I6, I7 );
424 mulscc( I7, -1, I6 );
425
426 nop();
427
428
429 popc( G0, G1);
430 popc( -1, G2);
431
432 prefetch( L1, L2, severalReads );
433 prefetch( L3, -1, oneRead );
434 prefetcha( O3, O2, 6, severalWritesAndPossiblyReads );
435 prefetcha( G2, -1, oneWrite );
436
437 rett( I7, I7);
438 delayed()->nop();
439 rett( G0, -1, relocInfo::none);
440 delayed()->nop();
441
442 save( I5, I6, I7 );
443 save( I7, -1, I6 );
444 restore( I5, I6, I7 );
445 restore( I7, -1, I6 );
446
447 saved();
448 restored();
449
450 sethi( 0xaaaaaaaa, I3, Relocation::spec_simple(relocInfo::none));
451
452 sll( I5, I6, I7 );
453 sll( I7, 31, I6 );
454 srl( I5, I6, I7 );
455 srl( I7, 0, I6 );
456 sra( I5, I6, I7 );
457 sra( I7, 30, I6 );
458 sllx( I5, I6, I7 );
459 sllx( I7, 63, I6 );
460 srlx( I5, I6, I7 );
461 srlx( I7, 0, I6 );
462 srax( I5, I6, I7 );
463 srax( I7, 62, I6 );
464
465 sir( -1 );
466
467 stbar();
468
469 stf( FloatRegisterImpl::Q, F40, G0, I7 );
470 stf( FloatRegisterImpl::S, F18, I3, -1 );
471
472 stfsr( L1, L2 );
473 stfsr( I7, -1 );
474 stxfsr( I6, I5 );
475 stxfsr( L4, -1 );
476
477 stfa( FloatRegisterImpl::D, F22, I6, I7, 7 );
478 stfa( FloatRegisterImpl::Q, F44, G0, -1 );
479
480 stb( L5, O2, I7 );
481 stb( I7, I6, -1 );
482 sth( L5, O2, I7 );
483 sth( I7, I6, -1 );
484 stw( L5, O2, I7 );
485 stw( I7, I6, -1 );
486 stx( L5, O2, I7 );
487 stx( I7, I6, -1 );
488 std( L5, O2, I7 );
489 std( I7, I6, -1 );
490
491 stba( L5, O2, I7, 8 );
492 stba( I7, I6, -1 );
493 stha( L5, O2, I7, 9 );
494 stha( I7, I6, -1 );
495 stwa( L5, O2, I7, 0 );
496 stwa( I7, I6, -1 );
497 stxa( L5, O2, I7, 11 );
498 stxa( I7, I6, -1 );
499 stda( L5, O2, I7, 12 );
500 stda( I7, I6, -1 );
501
502 sub( I5, I6, I7 );
503 sub( I7, -1, I6 );
504 subcc( I5, I6, I7 );
505 subcc( I7, -1, I6 );
506 subc( I5, I6, I7 );
507 subc( I7, -1, I6 );
508 subccc( I5, I6, I7 );
509 subccc( I7, -1, I6 );
510
511 swap( I5, I6, I7 );
512 swap( I7, -1, I6 );
513
514 swapa( G0, G1, 13, G2 );
515 swapa( I7, -1, I6 );
516
517 taddcc( I5, I6, I7 );
518 taddcc( I7, -1, I6 );
519 taddcctv( I5, I6, I7 );
520 taddcctv( I7, -1, I6 );
521
522 tsubcc( I5, I6, I7 );
523 tsubcc( I7, -1, I6 );
524 tsubcctv( I5, I6, I7 );
525 tsubcctv( I7, -1, I6 );
526
527 trap( overflowClear, xcc, G0, G1 );
528 trap( lessEqual, icc, I7, 17 );
529
530 bind(lbl2);
531 bind(lbl3);
532
533 code()->decode();
534 }
535
536 // Generate a bunch 'o stuff unique to V8
537 void Assembler::test_v8_onlys() {
538 Label lbl1;
539
540 cb( cp_0or1or2, false, pc() - 4, relocInfo::none);
541 delayed()->nop();
542 cb( cp_never, true, lbl1);
543 delayed()->nop();
544
545 cpop1(1, 2, 3, 4);
546 cpop2(5, 6, 7, 8);
547
548 ldc( I0, I1, 31);
549 ldc( I2, -1, 0);
550
551 lddc( I4, I4, 30);
552 lddc( I6, 0, 1 );
553
554 ldcsr( L0, L1, 0);
555 ldcsr( L1, (1 << 12) - 1, 17 );
556
557 stc( 31, L4, L5);
558 stc( 30, L6, -(1 << 12) );
559
560 stdc( 0, L7, G0);
561 stdc( 1, G1, 0 );
562
563 stcsr( 16, G2, G3);
564 stcsr( 17, G4, 1 );
565
566 stdcq( 4, G5, G6);
567 stdcq( 5, G7, -1 );
568
569 bind(lbl1);
570
571 code()->decode();
572 }
573 #endif
574
575 // Implementation of MacroAssembler
576
577 void MacroAssembler::null_check(Register reg, int offset) {
578 if (needs_explicit_null_check((intptr_t)offset)) {
579 // provoke OS NULL exception if reg = NULL by
580 // accessing M[reg] w/o changing any registers
581 ld_ptr(reg, 0, G0);
582 }
583 else {
584 // nothing to do, (later) access of M[reg + offset]
585 // will provoke OS NULL exception if reg = NULL
586 }
587 }
588
589 // Ring buffer jumps
590
591 #ifndef PRODUCT
592 void MacroAssembler::ret( bool trace ) { if (trace) {
593 mov(I7, O7); // traceable register
594 JMP(O7, 2 * BytesPerInstWord);
595 } else {
596 jmpl( I7, 2 * BytesPerInstWord, G0 );
597 }
598 }
599
600 void MacroAssembler::retl( bool trace ) { if (trace) JMP(O7, 2 * BytesPerInstWord);
601 else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
602 #endif /* PRODUCT */
603
604
605 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) {
606 assert_not_delayed();
607 // This can only be traceable if r1 & r2 are visible after a window save
608 if (TraceJumps) {
609 #ifndef PRODUCT
610 save_frame(0);
611 verify_thread();
612 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
613 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
614 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
615 add(O2, O1, O1);
616
617 add(r1->after_save(), r2->after_save(), O2);
618 set((intptr_t)file, O3);
619 set(line, O4);
620 Label L;
621 // get nearby pc, store jmp target
622 call(L, relocInfo::none); // No relocation for call to pc+0x8
623 delayed()->st(O2, O1, 0);
624 bind(L);
625
626 // store nearby pc
627 st(O7, O1, sizeof(intptr_t));
628 // store file
629 st(O3, O1, 2*sizeof(intptr_t));
630 // store line
631 st(O4, O1, 3*sizeof(intptr_t));
632 add(O0, 1, O0);
633 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
634 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
635 restore();
636 #endif /* PRODUCT */
637 }
638 jmpl(r1, r2, G0);
639 }
640 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) {
641 assert_not_delayed();
642 // This can only be traceable if r1 is visible after a window save
643 if (TraceJumps) {
644 #ifndef PRODUCT
645 save_frame(0);
646 verify_thread();
647 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
648 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
649 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
650 add(O2, O1, O1);
651
652 add(r1->after_save(), offset, O2);
653 set((intptr_t)file, O3);
654 set(line, O4);
655 Label L;
656 // get nearby pc, store jmp target
657 call(L, relocInfo::none); // No relocation for call to pc+0x8
658 delayed()->st(O2, O1, 0);
659 bind(L);
660
661 // store nearby pc
662 st(O7, O1, sizeof(intptr_t));
663 // store file
664 st(O3, O1, 2*sizeof(intptr_t));
665 // store line
666 st(O4, O1, 3*sizeof(intptr_t));
667 add(O0, 1, O0);
668 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
669 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
670 restore();
671 #endif /* PRODUCT */
672 }
673 jmp(r1, offset);
674 }
675
676 // This code sequence is relocatable to any address, even on LP64.
677 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) {
678 assert_not_delayed();
679 // Force fixed length sethi because NativeJump and NativeFarCall don't handle
680 // variable length instruction streams.
681 patchable_sethi(addrlit, temp);
682 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement.
683 if (TraceJumps) {
684 #ifndef PRODUCT
685 // Must do the add here so relocation can find the remainder of the
686 // value to be relocated.
687 add(a.base(), a.disp(), a.base(), addrlit.rspec(offset));
688 save_frame(0);
689 verify_thread();
690 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
691 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
692 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
693 add(O2, O1, O1);
694
695 set((intptr_t)file, O3);
696 set(line, O4);
697 Label L;
698
699 // get nearby pc, store jmp target
700 call(L, relocInfo::none); // No relocation for call to pc+0x8
701 delayed()->st(a.base()->after_save(), O1, 0);
702 bind(L);
703
704 // store nearby pc
705 st(O7, O1, sizeof(intptr_t));
706 // store file
707 st(O3, O1, 2*sizeof(intptr_t));
708 // store line
709 st(O4, O1, 3*sizeof(intptr_t));
710 add(O0, 1, O0);
711 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
712 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
713 restore();
714 jmpl(a.base(), G0, d);
715 #else
716 jmpl(a.base(), a.disp(), d);
717 #endif /* PRODUCT */
718 } else {
719 jmpl(a.base(), a.disp(), d);
720 }
721 }
722
723 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) {
724 jumpl(addrlit, temp, G0, offset, file, line);
725 }
726
727
728 // Conditional breakpoint (for assertion checks in assembly code)
729 void MacroAssembler::breakpoint_trap(Condition c, CC cc) {
730 trap(c, cc, G0, ST_RESERVED_FOR_USER_0);
731 }
732
733 // We want to use ST_BREAKPOINT here, but the debugger is confused by it.
734 void MacroAssembler::breakpoint_trap() {
735 trap(ST_RESERVED_FOR_USER_0);
736 }
737
738 // flush windows (except current) using flushw instruction if avail.
739 void MacroAssembler::flush_windows() {
740 if (VM_Version::v9_instructions_work()) flushw();
741 else flush_windows_trap();
742 }
743
744 // Write serialization page so VM thread can do a pseudo remote membar
745 // We use the current thread pointer to calculate a thread specific
746 // offset to write to within the page. This minimizes bus traffic
747 // due to cache line collision.
748 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
749 srl(thread, os::get_serialize_page_shift_count(), tmp2);
750 if (Assembler::is_simm13(os::vm_page_size())) {
751 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2);
752 }
753 else {
754 set((os::vm_page_size() - sizeof(int)), tmp1);
755 and3(tmp2, tmp1, tmp2);
756 }
757 set(os::get_memory_serialize_page(), tmp1);
758 st(G0, tmp1, tmp2);
759 }
760
761
762
763 void MacroAssembler::enter() {
764 Unimplemented();
765 }
766
767 void MacroAssembler::leave() {
768 Unimplemented();
769 }
770
771 void MacroAssembler::mult(Register s1, Register s2, Register d) {
772 if(VM_Version::v9_instructions_work()) {
773 mulx (s1, s2, d);
774 } else {
775 smul (s1, s2, d);
776 }
777 }
778
779 void MacroAssembler::mult(Register s1, int simm13a, Register d) {
780 if(VM_Version::v9_instructions_work()) {
781 mulx (s1, simm13a, d);
782 } else {
783 smul (s1, simm13a, d);
784 }
785 }
786
787
788 #ifdef ASSERT
789 void MacroAssembler::read_ccr_v8_assert(Register ccr_save) {
790 const Register s1 = G3_scratch;
791 const Register s2 = G4_scratch;
792 Label get_psr_test;
793 // Get the condition codes the V8 way.
794 read_ccr_trap(s1);
795 mov(ccr_save, s2);
796 // This is a test of V8 which has icc but not xcc
797 // so mask off the xcc bits
798 and3(s2, 0xf, s2);
799 // Compare condition codes from the V8 and V9 ways.
800 subcc(s2, s1, G0);
801 br(Assembler::notEqual, true, Assembler::pt, get_psr_test);
802 delayed()->breakpoint_trap();
803 bind(get_psr_test);
804 }
805
806 void MacroAssembler::write_ccr_v8_assert(Register ccr_save) {
807 const Register s1 = G3_scratch;
808 const Register s2 = G4_scratch;
809 Label set_psr_test;
810 // Write out the saved condition codes the V8 way
811 write_ccr_trap(ccr_save, s1, s2);
812 // Read back the condition codes using the V9 instruction
813 rdccr(s1);
814 mov(ccr_save, s2);
815 // This is a test of V8 which has icc but not xcc
816 // so mask off the xcc bits
817 and3(s2, 0xf, s2);
818 and3(s1, 0xf, s1);
819 // Compare the V8 way with the V9 way.
820 subcc(s2, s1, G0);
821 br(Assembler::notEqual, true, Assembler::pt, set_psr_test);
822 delayed()->breakpoint_trap();
823 bind(set_psr_test);
824 }
825 #else
826 #define read_ccr_v8_assert(x)
827 #define write_ccr_v8_assert(x)
828 #endif // ASSERT
829
830 void MacroAssembler::read_ccr(Register ccr_save) {
831 if (VM_Version::v9_instructions_work()) {
832 rdccr(ccr_save);
833 // Test code sequence used on V8. Do not move above rdccr.
834 read_ccr_v8_assert(ccr_save);
835 } else {
836 read_ccr_trap(ccr_save);
837 }
838 }
839
840 void MacroAssembler::write_ccr(Register ccr_save) {
841 if (VM_Version::v9_instructions_work()) {
842 // Test code sequence used on V8. Do not move below wrccr.
843 write_ccr_v8_assert(ccr_save);
844 wrccr(ccr_save);
845 } else {
846 const Register temp_reg1 = G3_scratch;
847 const Register temp_reg2 = G4_scratch;
848 write_ccr_trap(ccr_save, temp_reg1, temp_reg2);
849 }
850 }
851
852
853 // Calls to C land
854
855 #ifdef ASSERT
856 // a hook for debugging
857 static Thread* reinitialize_thread() {
858 return ThreadLocalStorage::thread();
859 }
860 #else
861 #define reinitialize_thread ThreadLocalStorage::thread
862 #endif
863
864 #ifdef ASSERT
865 address last_get_thread = NULL;
866 #endif
867
868 // call this when G2_thread is not known to be valid
869 void MacroAssembler::get_thread() {
870 save_frame(0); // to avoid clobbering O0
871 mov(G1, L0); // avoid clobbering G1
872 mov(G5_method, L1); // avoid clobbering G5
873 mov(G3, L2); // avoid clobbering G3 also
874 mov(G4, L5); // avoid clobbering G4
875 #ifdef ASSERT
876 AddressLiteral last_get_thread_addrlit(&last_get_thread);
877 set(last_get_thread_addrlit, L3);
878 inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
879 st_ptr(L4, L3, 0);
880 #endif
881 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
882 delayed()->nop();
883 mov(L0, G1);
884 mov(L1, G5_method);
885 mov(L2, G3);
886 mov(L5, G4);
887 restore(O0, 0, G2_thread);
888 }
889
890 static Thread* verify_thread_subroutine(Thread* gthread_value) {
891 Thread* correct_value = ThreadLocalStorage::thread();
892 guarantee(gthread_value == correct_value, "G2_thread value must be the thread");
893 return correct_value;
894 }
895
896 void MacroAssembler::verify_thread() {
897 if (VerifyThread) {
898 // NOTE: this chops off the heads of the 64-bit O registers.
899 #ifdef CC_INTERP
900 save_frame(0);
901 #else
902 // make sure G2_thread contains the right value
903 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof)
904 mov(G1, L1); // avoid clobbering G1
905 // G2 saved below
906 mov(G3, L3); // avoid clobbering G3
907 mov(G4, L4); // avoid clobbering G4
908 mov(G5_method, L5); // avoid clobbering G5_method
909 #endif /* CC_INTERP */
910 #if defined(COMPILER2) && !defined(_LP64)
911 // Save & restore possible 64-bit Long arguments in G-regs
912 srlx(G1,32,L0);
913 srlx(G4,32,L6);
914 #endif
915 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type);
916 delayed()->mov(G2_thread, O0);
917
918 mov(L1, G1); // Restore G1
919 // G2 restored below
920 mov(L3, G3); // restore G3
921 mov(L4, G4); // restore G4
922 mov(L5, G5_method); // restore G5_method
923 #if defined(COMPILER2) && !defined(_LP64)
924 // Save & restore possible 64-bit Long arguments in G-regs
925 sllx(L0,32,G2); // Move old high G1 bits high in G2
926 srl(G1, 0,G1); // Clear current high G1 bits
927 or3 (G1,G2,G1); // Recover 64-bit G1
928 sllx(L6,32,G2); // Move old high G4 bits high in G2
929 srl(G4, 0,G4); // Clear current high G4 bits
930 or3 (G4,G2,G4); // Recover 64-bit G4
931 #endif
932 restore(O0, 0, G2_thread);
933 }
934 }
935
936
937 void MacroAssembler::save_thread(const Register thread_cache) {
938 verify_thread();
939 if (thread_cache->is_valid()) {
940 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
941 mov(G2_thread, thread_cache);
942 }
943 if (VerifyThread) {
944 // smash G2_thread, as if the VM were about to anyway
945 set(0x67676767, G2_thread);
946 }
947 }
948
949
950 void MacroAssembler::restore_thread(const Register thread_cache) {
951 if (thread_cache->is_valid()) {
952 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
953 mov(thread_cache, G2_thread);
954 verify_thread();
955 } else {
956 // do it the slow way
957 get_thread();
958 }
959 }
960
961
962 // %%% maybe get rid of [re]set_last_Java_frame
963 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) {
964 assert_not_delayed();
965 Address flags(G2_thread, JavaThread::frame_anchor_offset() +
966 JavaFrameAnchor::flags_offset());
967 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset());
968
969 // Always set last_Java_pc and flags first because once last_Java_sp is visible
970 // has_last_Java_frame is true and users will look at the rest of the fields.
971 // (Note: flags should always be zero before we get here so doesn't need to be set.)
972
973 #ifdef ASSERT
974 // Verify that flags was zeroed on return to Java
975 Label PcOk;
976 save_frame(0); // to avoid clobbering O0
977 ld_ptr(pc_addr, L0);
978 br_null_short(L0, Assembler::pt, PcOk);
979 STOP("last_Java_pc not zeroed before leaving Java");
980 bind(PcOk);
981
982 // Verify that flags was zeroed on return to Java
983 Label FlagsOk;
984 ld(flags, L0);
985 tst(L0);
986 br(Assembler::zero, false, Assembler::pt, FlagsOk);
987 delayed() -> restore();
988 STOP("flags not zeroed before leaving Java");
989 bind(FlagsOk);
990 #endif /* ASSERT */
991 //
992 // When returning from calling out from Java mode the frame anchor's last_Java_pc
993 // will always be set to NULL. It is set here so that if we are doing a call to
994 // native (not VM) that we capture the known pc and don't have to rely on the
995 // native call having a standard frame linkage where we can find the pc.
996
997 if (last_Java_pc->is_valid()) {
998 st_ptr(last_Java_pc, pc_addr);
999 }
1000
1001 #ifdef _LP64
1002 #ifdef ASSERT
1003 // Make sure that we have an odd stack
1004 Label StackOk;
1005 andcc(last_java_sp, 0x01, G0);
1006 br(Assembler::notZero, false, Assembler::pt, StackOk);
1007 delayed()->nop();
1008 STOP("Stack Not Biased in set_last_Java_frame");
1009 bind(StackOk);
1010 #endif // ASSERT
1011 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
1012 add( last_java_sp, STACK_BIAS, G4_scratch );
1013 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset());
1014 #else
1015 st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset());
1016 #endif // _LP64
1017 }
1018
1019 void MacroAssembler::reset_last_Java_frame(void) {
1020 assert_not_delayed();
1021
1022 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset());
1023 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
1024 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
1025
1026 #ifdef ASSERT
1027 // check that it WAS previously set
1028 #ifdef CC_INTERP
1029 save_frame(0);
1030 #else
1031 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof
1032 #endif /* CC_INTERP */
1033 ld_ptr(sp_addr, L0);
1034 tst(L0);
1035 breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
1036 restore();
1037 #endif // ASSERT
1038
1039 st_ptr(G0, sp_addr);
1040 // Always return last_Java_pc to zero
1041 st_ptr(G0, pc_addr);
1042 // Always null flags after return to Java
1043 st(G0, flags);
1044 }
1045
1046
1047 void MacroAssembler::call_VM_base(
1048 Register oop_result,
1049 Register thread_cache,
1050 Register last_java_sp,
1051 address entry_point,
1052 int number_of_arguments,
1053 bool check_exceptions)
1054 {
1055 assert_not_delayed();
1056
1057 // determine last_java_sp register
1058 if (!last_java_sp->is_valid()) {
1059 last_java_sp = SP;
1060 }
1061 // debugging support
1062 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
1063
1064 // 64-bit last_java_sp is biased!
1065 set_last_Java_frame(last_java_sp, noreg);
1066 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early
1067 save_thread(thread_cache);
1068 // do the call
1069 call(entry_point, relocInfo::runtime_call_type);
1070 if (!VerifyThread)
1071 delayed()->mov(G2_thread, O0); // pass thread as first argument
1072 else
1073 delayed()->nop(); // (thread already passed)
1074 restore_thread(thread_cache);
1075 reset_last_Java_frame();
1076
1077 // check for pending exceptions. use Gtemp as scratch register.
1078 if (check_exceptions) {
1079 check_and_forward_exception(Gtemp);
1080 }
1081
1082 #ifdef ASSERT
1083 set(badHeapWordVal, G3);
1084 set(badHeapWordVal, G4);
1085 set(badHeapWordVal, G5);
1086 #endif
1087
1088 // get oop result if there is one and reset the value in the thread
1089 if (oop_result->is_valid()) {
1090 get_vm_result(oop_result);
1091 }
1092 }
1093
1094 void MacroAssembler::check_and_forward_exception(Register scratch_reg)
1095 {
1096 Label L;
1097
1098 check_and_handle_popframe(scratch_reg);
1099 check_and_handle_earlyret(scratch_reg);
1100
1101 Address exception_addr(G2_thread, Thread::pending_exception_offset());
1102 ld_ptr(exception_addr, scratch_reg);
1103 br_null_short(scratch_reg, pt, L);
1104 // we use O7 linkage so that forward_exception_entry has the issuing PC
1105 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
1106 delayed()->nop();
1107 bind(L);
1108 }
1109
1110
1111 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) {
1112 }
1113
1114
1115 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
1116 }
1117
1118
1119 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
1120 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
1121 }
1122
1123
1124 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
1125 // O0 is reserved for the thread
1126 mov(arg_1, O1);
1127 call_VM(oop_result, entry_point, 1, check_exceptions);
1128 }
1129
1130
1131 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
1132 // O0 is reserved for the thread
1133 mov(arg_1, O1);
1134 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
1135 call_VM(oop_result, entry_point, 2, check_exceptions);
1136 }
1137
1138
1139 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
1140 // O0 is reserved for the thread
1141 mov(arg_1, O1);
1142 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
1143 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
1144 call_VM(oop_result, entry_point, 3, check_exceptions);
1145 }
1146
1147
1148
1149 // Note: The following call_VM overloadings are useful when a "save"
1150 // has already been performed by a stub, and the last Java frame is
1151 // the previous one. In that case, last_java_sp must be passed as FP
1152 // instead of SP.
1153
1154
1155 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) {
1156 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1157 }
1158
1159
1160 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
1161 // O0 is reserved for the thread
1162 mov(arg_1, O1);
1163 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1164 }
1165
1166
1167 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
1168 // O0 is reserved for the thread
1169 mov(arg_1, O1);
1170 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
1171 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1172 }
1173
1174
1175 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
1176 // O0 is reserved for the thread
1177 mov(arg_1, O1);
1178 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
1179 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
1180 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1181 }
1182
1183
1184
1185 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) {
1186 assert_not_delayed();
1187 save_thread(thread_cache);
1188 // do the call
1189 call(entry_point, relocInfo::runtime_call_type);
1190 delayed()->nop();
1191 restore_thread(thread_cache);
1192 #ifdef ASSERT
1193 set(badHeapWordVal, G3);
1194 set(badHeapWordVal, G4);
1195 set(badHeapWordVal, G5);
1196 #endif
1197 }
1198
1199
1200 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) {
1201 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments);
1202 }
1203
1204
1205 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) {
1206 mov(arg_1, O0);
1207 call_VM_leaf(thread_cache, entry_point, 1);
1208 }
1209
1210
1211 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
1212 mov(arg_1, O0);
1213 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
1214 call_VM_leaf(thread_cache, entry_point, 2);
1215 }
1216
1217
1218 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) {
1219 mov(arg_1, O0);
1220 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
1221 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument");
1222 call_VM_leaf(thread_cache, entry_point, 3);
1223 }
1224
1225
1226 void MacroAssembler::get_vm_result(Register oop_result) {
1227 verify_thread();
1228 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
1229 ld_ptr( vm_result_addr, oop_result);
1230 st_ptr(G0, vm_result_addr);
1231 verify_oop(oop_result);
1232 }
1233
1234
1235 void MacroAssembler::get_vm_result_2(Register metadata_result) {
1236 verify_thread();
1237 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
1238 ld_ptr(vm_result_addr_2, metadata_result);
1239 st_ptr(G0, vm_result_addr_2);
1240 }
1241
1242
1243 // We require that C code which does not return a value in vm_result will
1244 // leave it undisturbed.
1245 void MacroAssembler::set_vm_result(Register oop_result) {
1246 verify_thread();
1247 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
1248 verify_oop(oop_result);
1249
1250 # ifdef ASSERT
1251 // Check that we are not overwriting any other oop.
1252 #ifdef CC_INTERP
1253 save_frame(0);
1254 #else
1255 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof
1256 #endif /* CC_INTERP */
1257 ld_ptr(vm_result_addr, L0);
1258 tst(L0);
1259 restore();
1260 breakpoint_trap(notZero, Assembler::ptr_cc);
1261 // }
1262 # endif
1263
1264 st_ptr(oop_result, vm_result_addr);
1265 }
1266
1267
1268 void MacroAssembler::ic_call(address entry, bool emit_delay) {
1269 RelocationHolder rspec = virtual_call_Relocation::spec(pc());
1270 patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg);
1271 relocate(rspec);
1272 call(entry, relocInfo::none);
1273 if (emit_delay) {
1274 delayed()->nop();
1275 }
1276 }
1277
1278
1279 void MacroAssembler::card_table_write(jbyte* byte_map_base,
1280 Register tmp, Register obj) {
1281 #ifdef _LP64
1282 srlx(obj, CardTableModRefBS::card_shift, obj);
1283 #else
1284 srl(obj, CardTableModRefBS::card_shift, obj);
1285 #endif
1286 assert(tmp != obj, "need separate temp reg");
1287 set((address) byte_map_base, tmp);
1288 stb(G0, tmp, obj);
1289 }
1290
1291
1292 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
1293 address save_pc;
1294 int shiftcnt;
1295 #ifdef _LP64
1296 # ifdef CHECK_DELAY
1297 assert_not_delayed((char*) "cannot put two instructions in delay slot");
1298 # endif
1299 v9_dep();
1300 save_pc = pc();
1301
1302 int msb32 = (int) (addrlit.value() >> 32);
1303 int lsb32 = (int) (addrlit.value());
1304
1305 if (msb32 == 0 && lsb32 >= 0) {
1306 Assembler::sethi(lsb32, d, addrlit.rspec());
1307 }
1308 else if (msb32 == -1) {
1309 Assembler::sethi(~lsb32, d, addrlit.rspec());
1310 xor3(d, ~low10(~0), d);
1311 }
1312 else {
1313 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits
1314 if (msb32 & 0x3ff) // Any bits?
1315 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32
1316 if (lsb32 & 0xFFFFFC00) { // done?
1317 if ((lsb32 >> 20) & 0xfff) { // Any bits set?
1318 sllx(d, 12, d); // Make room for next 12 bits
1319 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12
1320 shiftcnt = 0; // We already shifted
1321 }
1322 else
1323 shiftcnt = 12;
1324 if ((lsb32 >> 10) & 0x3ff) {
1325 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits
1326 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10
1327 shiftcnt = 0;
1328 }
1329 else
1330 shiftcnt = 10;
1331 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd
1332 }
1333 else
1334 sllx(d, 32, d);
1335 }
1336 // Pad out the instruction sequence so it can be patched later.
1337 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none &&
1338 addrlit.rtype() != relocInfo::runtime_call_type)) {
1339 while (pc() < (save_pc + (7 * BytesPerInstWord)))
1340 nop();
1341 }
1342 #else
1343 Assembler::sethi(addrlit.value(), d, addrlit.rspec());
1344 #endif
1345 }
1346
1347
1348 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) {
1349 internal_sethi(addrlit, d, false);
1350 }
1351
1352
1353 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) {
1354 internal_sethi(addrlit, d, true);
1355 }
1356
1357
1358 int MacroAssembler::insts_for_sethi(address a, bool worst_case) {
1359 #ifdef _LP64
1360 if (worst_case) return 7;
1361 intptr_t iaddr = (intptr_t) a;
1362 int msb32 = (int) (iaddr >> 32);
1363 int lsb32 = (int) (iaddr);
1364 int count;
1365 if (msb32 == 0 && lsb32 >= 0)
1366 count = 1;
1367 else if (msb32 == -1)
1368 count = 2;
1369 else {
1370 count = 2;
1371 if (msb32 & 0x3ff)
1372 count++;
1373 if (lsb32 & 0xFFFFFC00 ) {
1374 if ((lsb32 >> 20) & 0xfff) count += 2;
1375 if ((lsb32 >> 10) & 0x3ff) count += 2;
1376 }
1377 }
1378 return count;
1379 #else
1380 return 1;
1381 #endif
1382 }
1383
1384 int MacroAssembler::worst_case_insts_for_set() {
1385 return insts_for_sethi(NULL, true) + 1;
1386 }
1387
1388
1389 // Keep in sync with MacroAssembler::insts_for_internal_set
1390 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
1391 intptr_t value = addrlit.value();
1392
1393 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) {
1394 // can optimize
1395 if (-4096 <= value && value <= 4095) {
1396 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended)
1397 return;
1398 }
1399 if (inv_hi22(hi22(value)) == value) {
1400 sethi(addrlit, d);
1401 return;
1402 }
1403 }
1404 assert_not_delayed((char*) "cannot put two instructions in delay slot");
1405 internal_sethi(addrlit, d, ForceRelocatable);
1406 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) {
1407 add(d, addrlit.low10(), d, addrlit.rspec());
1408 }
1409 }
1410
1411 // Keep in sync with MacroAssembler::internal_set
1412 int MacroAssembler::insts_for_internal_set(intptr_t value) {
1413 // can optimize
1414 if (-4096 <= value && value <= 4095) {
1415 return 1;
1416 }
1417 if (inv_hi22(hi22(value)) == value) {
1418 return insts_for_sethi((address) value);
1419 }
1420 int count = insts_for_sethi((address) value);
1421 AddressLiteral al(value);
1422 if (al.low10() != 0) {
1423 count++;
1424 }
1425 return count;
1426 }
1427
1428 void MacroAssembler::set(const AddressLiteral& al, Register d) {
1429 internal_set(al, d, false);
1430 }
1431
1432 void MacroAssembler::set(intptr_t value, Register d) {
1433 AddressLiteral al(value);
1434 internal_set(al, d, false);
1435 }
1436
1437 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) {
1438 AddressLiteral al(addr, rspec);
1439 internal_set(al, d, false);
1440 }
1441
1442 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) {
1443 internal_set(al, d, true);
1444 }
1445
1446 void MacroAssembler::patchable_set(intptr_t value, Register d) {
1447 AddressLiteral al(value);
1448 internal_set(al, d, true);
1449 }
1450
1451
1452 void MacroAssembler::set64(jlong value, Register d, Register tmp) {
1453 assert_not_delayed();
1454 v9_dep();
1455
1456 int hi = (int)(value >> 32);
1457 int lo = (int)(value & ~0);
1458 // (Matcher::isSimpleConstant64 knows about the following optimizations.)
1459 if (Assembler::is_simm13(lo) && value == lo) {
1460 or3(G0, lo, d);
1461 } else if (hi == 0) {
1462 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32
1463 if (low10(lo) != 0)
1464 or3(d, low10(lo), d);
1465 }
1466 else if (hi == -1) {
1467 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32
1468 xor3(d, low10(lo) ^ ~low10(~0), d);
1469 }
1470 else if (lo == 0) {
1471 if (Assembler::is_simm13(hi)) {
1472 or3(G0, hi, d);
1473 } else {
1474 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32
1475 if (low10(hi) != 0)
1476 or3(d, low10(hi), d);
1477 }
1478 sllx(d, 32, d);
1479 }
1480 else {
1481 Assembler::sethi(hi, tmp);
1482 Assembler::sethi(lo, d); // macro assembler version sign-extends
1483 if (low10(hi) != 0)
1484 or3 (tmp, low10(hi), tmp);
1485 if (low10(lo) != 0)
1486 or3 ( d, low10(lo), d);
1487 sllx(tmp, 32, tmp);
1488 or3 (d, tmp, d);
1489 }
1490 }
1491
1492 int MacroAssembler::insts_for_set64(jlong value) {
1493 v9_dep();
1494
1495 int hi = (int) (value >> 32);
1496 int lo = (int) (value & ~0);
1497 int count = 0;
1498
1499 // (Matcher::isSimpleConstant64 knows about the following optimizations.)
1500 if (Assembler::is_simm13(lo) && value == lo) {
1501 count++;
1502 } else if (hi == 0) {
1503 count++;
1504 if (low10(lo) != 0)
1505 count++;
1506 }
1507 else if (hi == -1) {
1508 count += 2;
1509 }
1510 else if (lo == 0) {
1511 if (Assembler::is_simm13(hi)) {
1512 count++;
1513 } else {
1514 count++;
1515 if (low10(hi) != 0)
1516 count++;
1517 }
1518 count++;
1519 }
1520 else {
1521 count += 2;
1522 if (low10(hi) != 0)
1523 count++;
1524 if (low10(lo) != 0)
1525 count++;
1526 count += 2;
1527 }
1528 return count;
1529 }
1530
1531 // compute size in bytes of sparc frame, given
1532 // number of extraWords
1533 int MacroAssembler::total_frame_size_in_bytes(int extraWords) {
1534
1535 int nWords = frame::memory_parameter_word_sp_offset;
1536
1537 nWords += extraWords;
1538
1539 if (nWords & 1) ++nWords; // round up to double-word
1540
1541 return nWords * BytesPerWord;
1542 }
1543
1544
1545 // save_frame: given number of "extra" words in frame,
1546 // issue approp. save instruction (p 200, v8 manual)
1547
1548 void MacroAssembler::save_frame(int extraWords) {
1549 int delta = -total_frame_size_in_bytes(extraWords);
1550 if (is_simm13(delta)) {
1551 save(SP, delta, SP);
1552 } else {
1553 set(delta, G3_scratch);
1554 save(SP, G3_scratch, SP);
1555 }
1556 }
1557
1558
1559 void MacroAssembler::save_frame_c1(int size_in_bytes) {
1560 if (is_simm13(-size_in_bytes)) {
1561 save(SP, -size_in_bytes, SP);
1562 } else {
1563 set(-size_in_bytes, G3_scratch);
1564 save(SP, G3_scratch, SP);
1565 }
1566 }
1567
1568
1569 void MacroAssembler::save_frame_and_mov(int extraWords,
1570 Register s1, Register d1,
1571 Register s2, Register d2) {
1572 assert_not_delayed();
1573
1574 // The trick here is to use precisely the same memory word
1575 // that trap handlers also use to save the register.
1576 // This word cannot be used for any other purpose, but
1577 // it works fine to save the register's value, whether or not
1578 // an interrupt flushes register windows at any given moment!
1579 Address s1_addr;
1580 if (s1->is_valid() && (s1->is_in() || s1->is_local())) {
1581 s1_addr = s1->address_in_saved_window();
1582 st_ptr(s1, s1_addr);
1583 }
1584
1585 Address s2_addr;
1586 if (s2->is_valid() && (s2->is_in() || s2->is_local())) {
1587 s2_addr = s2->address_in_saved_window();
1588 st_ptr(s2, s2_addr);
1589 }
1590
1591 save_frame(extraWords);
1592
1593 if (s1_addr.base() == SP) {
1594 ld_ptr(s1_addr.after_save(), d1);
1595 } else if (s1->is_valid()) {
1596 mov(s1->after_save(), d1);
1597 }
1598
1599 if (s2_addr.base() == SP) {
1600 ld_ptr(s2_addr.after_save(), d2);
1601 } else if (s2->is_valid()) {
1602 mov(s2->after_save(), d2);
1603 }
1604 }
1605
1606
1607 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
1608 assert(oop_recorder() != NULL, "this assembler needs a Recorder");
1609 int index = oop_recorder()->allocate_metadata_index(obj);
1610 RelocationHolder rspec = metadata_Relocation::spec(index);
1611 return AddressLiteral((address)obj, rspec);
1612 }
1613
1614 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
1615 assert(oop_recorder() != NULL, "this assembler needs a Recorder");
1616 int index = oop_recorder()->find_index(obj);
1617 RelocationHolder rspec = metadata_Relocation::spec(index);
1618 return AddressLiteral((address)obj, rspec);
1619 }
1620
1621
1622 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
1623 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1624 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop");
1625 int oop_index = oop_recorder()->find_index(obj);
1626 return AddressLiteral(obj, oop_Relocation::spec(oop_index));
1627 }
1628
1629 void MacroAssembler::set_narrow_oop(jobject obj, Register d) {
1630 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1631 int oop_index = oop_recorder()->find_index(obj);
1632 RelocationHolder rspec = oop_Relocation::spec(oop_index);
1633
1634 assert_not_delayed();
1635 // Relocation with special format (see relocInfo_sparc.hpp).
1636 relocate(rspec, 1);
1637 // Assembler::sethi(0x3fffff, d);
1638 emit_long( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) );
1639 // Don't add relocation for 'add'. Do patching during 'sethi' processing.
1640 add(d, 0x3ff, d);
1641
1642 }
1643
1644 void MacroAssembler::set_narrow_klass(Klass* k, Register d) {
1645 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1646 int klass_index = oop_recorder()->find_index(k);
1647 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
1648 narrowOop encoded_k = oopDesc::encode_klass(k);
1649
1650 assert_not_delayed();
1651 // Relocation with special format (see relocInfo_sparc.hpp).
1652 relocate(rspec, 1);
1653 // Assembler::sethi(encoded_k, d);
1654 emit_long( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) );
1655 // Don't add relocation for 'add'. Do patching during 'sethi' processing.
1656 add(d, low10(encoded_k), d);
1657
1658 }
1659
1660 void MacroAssembler::align(int modulus) {
1661 while (offset() % modulus != 0) nop();
1662 }
1663
1664
1665 void MacroAssembler::safepoint() {
1666 relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint));
1667 }
1668
1669
1670 void RegistersForDebugging::print(outputStream* s) {
1671 FlagSetting fs(Debugging, true);
1672 int j;
1673 for (j = 0; j < 8; ++j) {
1674 if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); }
1675 else { s->print( "fp = " ); os::print_location(s, i[j]); }
1676 }
1677 s->cr();
1678
1679 for (j = 0; j < 8; ++j) {
1680 s->print("l%d = ", j); os::print_location(s, l[j]);
1681 }
1682 s->cr();
1683
1684 for (j = 0; j < 8; ++j) {
1685 if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); }
1686 else { s->print( "sp = " ); os::print_location(s, o[j]); }
1687 }
1688 s->cr();
1689
1690 for (j = 0; j < 8; ++j) {
1691 s->print("g%d = ", j); os::print_location(s, g[j]);
1692 }
1693 s->cr();
1694
1695 // print out floats with compression
1696 for (j = 0; j < 32; ) {
1697 jfloat val = f[j];
1698 int last = j;
1699 for ( ; last+1 < 32; ++last ) {
1700 char b1[1024], b2[1024];
1701 sprintf(b1, "%f", val);
1702 sprintf(b2, "%f", f[last+1]);
1703 if (strcmp(b1, b2))
1704 break;
1705 }
1706 s->print("f%d", j);
1707 if ( j != last ) s->print(" - f%d", last);
1708 s->print(" = %f", val);
1709 s->fill_to(25);
1710 s->print_cr(" (0x%x)", val);
1711 j = last + 1;
1712 }
1713 s->cr();
1714
1715 // and doubles (evens only)
1716 for (j = 0; j < 32; ) {
1717 jdouble val = d[j];
1718 int last = j;
1719 for ( ; last+1 < 32; ++last ) {
1720 char b1[1024], b2[1024];
1721 sprintf(b1, "%f", val);
1722 sprintf(b2, "%f", d[last+1]);
1723 if (strcmp(b1, b2))
1724 break;
1725 }
1726 s->print("d%d", 2 * j);
1727 if ( j != last ) s->print(" - d%d", last);
1728 s->print(" = %f", val);
1729 s->fill_to(30);
1730 s->print("(0x%x)", *(int*)&val);
1731 s->fill_to(42);
1732 s->print_cr("(0x%x)", *(1 + (int*)&val));
1733 j = last + 1;
1734 }
1735 s->cr();
1736 }
1737
1738 void RegistersForDebugging::save_registers(MacroAssembler* a) {
1739 a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
1740 a->flush_windows();
1741 int i;
1742 for (i = 0; i < 8; ++i) {
1743 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i));
1744 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i));
1745 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i));
1746 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i));
1747 }
1748 for (i = 0; i < 32; ++i) {
1749 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
1750 }
1751 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
1752 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
1753 }
1754 }
1755
1756 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) {
1757 for (int i = 1; i < 8; ++i) {
1758 a->ld_ptr(r, g_offset(i), as_gRegister(i));
1759 }
1760 for (int j = 0; j < 32; ++j) {
1761 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
1762 }
1763 for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) {
1764 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
1765 }
1766 }
1767
1768
1769 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
1770 void MacroAssembler::push_fTOS() {
1771 // %%%%%% need to implement this
1772 }
1773
1774 // pops double TOS element from CPU stack and pushes on FPU stack
1775 void MacroAssembler::pop_fTOS() {
1776 // %%%%%% need to implement this
1777 }
1778
1779 void MacroAssembler::empty_FPU_stack() {
1780 // %%%%%% need to implement this
1781 }
1782
1783 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) {
1784 // plausibility check for oops
1785 if (!VerifyOops) return;
1786
1787 if (reg == G0) return; // always NULL, which is always an oop
1788
1789 BLOCK_COMMENT("verify_oop {");
1790 char buffer[64];
1791 #ifdef COMPILER1
1792 if (CommentedAssembly) {
1793 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset());
1794 block_comment(buffer);
1795 }
1796 #endif
1797
1798 int len = strlen(file) + strlen(msg) + 1 + 4;
1799 sprintf(buffer, "%d", line);
1800 len += strlen(buffer);
1801 sprintf(buffer, " at offset %d ", offset());
1802 len += strlen(buffer);
1803 char * real_msg = new char[len];
1804 sprintf(real_msg, "%s%s(%s:%d)", msg, buffer, file, line);
1805
1806 // Call indirectly to solve generation ordering problem
1807 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
1808
1809 // Make some space on stack above the current register window.
1810 // Enough to hold 8 64-bit registers.
1811 add(SP,-8*8,SP);
1812
1813 // Save some 64-bit registers; a normal 'save' chops the heads off
1814 // of 64-bit longs in the 32-bit build.
1815 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
1816 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
1817 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed
1818 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
1819
1820 // Size of set() should stay the same
1821 patchable_set((intptr_t)real_msg, O1);
1822 // Load address to call to into O7
1823 load_ptr_contents(a, O7);
1824 // Register call to verify_oop_subroutine
1825 callr(O7, G0);
1826 delayed()->nop();
1827 // recover frame size
1828 add(SP, 8*8,SP);
1829 BLOCK_COMMENT("} verify_oop");
1830 }
1831
1832 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) {
1833 // plausibility check for oops
1834 if (!VerifyOops) return;
1835
1836 char buffer[64];
1837 sprintf(buffer, "%d", line);
1838 int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer);
1839 sprintf(buffer, " at SP+%d ", addr.disp());
1840 len += strlen(buffer);
1841 char * real_msg = new char[len];
1842 sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line);
1843
1844 // Call indirectly to solve generation ordering problem
1845 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
1846
1847 // Make some space on stack above the current register window.
1848 // Enough to hold 8 64-bit registers.
1849 add(SP,-8*8,SP);
1850
1851 // Save some 64-bit registers; a normal 'save' chops the heads off
1852 // of 64-bit longs in the 32-bit build.
1853 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
1854 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
1855 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed
1856 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
1857
1858 // Size of set() should stay the same
1859 patchable_set((intptr_t)real_msg, O1);
1860 // Load address to call to into O7
1861 load_ptr_contents(a, O7);
1862 // Register call to verify_oop_subroutine
1863 callr(O7, G0);
1864 delayed()->nop();
1865 // recover frame size
1866 add(SP, 8*8,SP);
1867 }
1868
1869 // side-door communication with signalHandler in os_solaris.cpp
1870 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL };
1871
1872 // This macro is expanded just once; it creates shared code. Contract:
1873 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY
1874 // registers, including flags. May not use a register 'save', as this blows
1875 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf'
1876 // call.
1877 void MacroAssembler::verify_oop_subroutine() {
1878 assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" );
1879
1880 // Leaf call; no frame.
1881 Label succeed, fail, null_or_fail;
1882
1883 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home).
1884 // O0 is now the oop to be checked. O7 is the return address.
1885 Register O0_obj = O0;
1886
1887 // Save some more registers for temps.
1888 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8);
1889 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8);
1890 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8);
1891 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8);
1892
1893 // Save flags
1894 Register O5_save_flags = O5;
1895 rdccr( O5_save_flags );
1896
1897 { // count number of verifies
1898 Register O2_adr = O2;
1899 Register O3_accum = O3;
1900 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum);
1901 }
1902
1903 Register O2_mask = O2;
1904 Register O3_bits = O3;
1905 Register O4_temp = O4;
1906
1907 // mark lower end of faulting range
1908 assert(_verify_oop_implicit_branch[0] == NULL, "set once");
1909 _verify_oop_implicit_branch[0] = pc();
1910
1911 // We can't check the mark oop because it could be in the process of
1912 // locking or unlocking while this is running.
1913 set(Universe::verify_oop_mask (), O2_mask);
1914 set(Universe::verify_oop_bits (), O3_bits);
1915
1916 // assert((obj & oop_mask) == oop_bits);
1917 and3(O0_obj, O2_mask, O4_temp);
1918 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail);
1919
1920 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) {
1921 // the null_or_fail case is useless; must test for null separately
1922 br_null_short(O0_obj, pn, succeed);
1923 }
1924
1925 // Check the Klass* of this object for being in the right area of memory.
1926 // Cannot do the load in the delay above slot in case O0 is null
1927 load_klass(O0_obj, O0_obj);
1928 // assert((klass != NULL)
1929 br_null_short(O0_obj, pn, fail);
1930 // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
1931
1932 wrccr( O5_save_flags ); // Restore CCR's
1933
1934 // mark upper end of faulting range
1935 _verify_oop_implicit_branch[1] = pc();
1936
1937 //-----------------------
1938 // all tests pass
1939 bind(succeed);
1940
1941 // Restore prior 64-bit registers
1942 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0);
1943 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1);
1944 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2);
1945 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3);
1946 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4);
1947 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5);
1948
1949 retl(); // Leaf return; restore prior O7 in delay slot
1950 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7);
1951
1952 //-----------------------
1953 bind(null_or_fail); // nulls are less common but OK
1954 br_null(O0_obj, false, pt, succeed);
1955 delayed()->wrccr( O5_save_flags ); // Restore CCR's
1956
1957 //-----------------------
1958 // report failure:
1959 bind(fail);
1960 _verify_oop_implicit_branch[2] = pc();
1961
1962 wrccr( O5_save_flags ); // Restore CCR's
1963
1964 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
1965
1966 // stop_subroutine expects message pointer in I1.
1967 mov(I1, O1);
1968
1969 // Restore prior 64-bit registers
1970 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0);
1971 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1);
1972 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2);
1973 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3);
1974 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4);
1975 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5);
1976
1977 // factor long stop-sequence into subroutine to save space
1978 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
1979
1980 // call indirectly to solve generation ordering problem
1981 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address());
1982 load_ptr_contents(al, O5);
1983 jmpl(O5, 0, O7);
1984 delayed()->nop();
1985 }
1986
1987
1988 void MacroAssembler::stop(const char* msg) {
1989 // save frame first to get O7 for return address
1990 // add one word to size in case struct is odd number of words long
1991 // It must be doubleword-aligned for storing doubles into it.
1992
1993 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
1994
1995 // stop_subroutine expects message pointer in I1.
1996 // Size of set() should stay the same
1997 patchable_set((intptr_t)msg, O1);
1998
1999 // factor long stop-sequence into subroutine to save space
2000 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
2001
2002 // call indirectly to solve generation ordering problem
2003 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address());
2004 load_ptr_contents(a, O5);
2005 jmpl(O5, 0, O7);
2006 delayed()->nop();
2007
2008 breakpoint_trap(); // make stop actually stop rather than writing
2009 // unnoticeable results in the output files.
2010
2011 // restore(); done in callee to save space!
2012 }
2013
2014
2015 void MacroAssembler::warn(const char* msg) {
2016 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
2017 RegistersForDebugging::save_registers(this);
2018 mov(O0, L0);
2019 // Size of set() should stay the same
2020 patchable_set((intptr_t)msg, O0);
2021 call( CAST_FROM_FN_PTR(address, warning) );
2022 delayed()->nop();
2023 // ret();
2024 // delayed()->restore();
2025 RegistersForDebugging::restore_registers(this, L0);
2026 restore();
2027 }
2028
2029
2030 void MacroAssembler::untested(const char* what) {
2031 // We must be able to turn interactive prompting off
2032 // in order to run automated test scripts on the VM
2033 // Use the flag ShowMessageBoxOnError
2034
2035 char* b = new char[1024];
2036 sprintf(b, "untested: %s", what);
2037
2038 if (ShowMessageBoxOnError) { STOP(b); }
2039 else { warn(b); }
2040 }
2041
2042
2043 void MacroAssembler::stop_subroutine() {
2044 RegistersForDebugging::save_registers(this);
2045
2046 // for the sake of the debugger, stick a PC on the current frame
2047 // (this assumes that the caller has performed an extra "save")
2048 mov(I7, L7);
2049 add(O7, -7 * BytesPerInt, I7);
2050
2051 save_frame(); // one more save to free up another O7 register
2052 mov(I0, O1); // addr of reg save area
2053
2054 // We expect pointer to message in I1. Caller must set it up in O1
2055 mov(I1, O0); // get msg
2056 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
2057 delayed()->nop();
2058
2059 restore();
2060
2061 RegistersForDebugging::restore_registers(this, O0);
2062
2063 save_frame(0);
2064 call(CAST_FROM_FN_PTR(address,breakpoint));
2065 delayed()->nop();
2066 restore();
2067
2068 mov(L7, I7);
2069 retl();
2070 delayed()->restore(); // see stop above
2071 }
2072
2073
2074 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) {
2075 if ( ShowMessageBoxOnError ) {
2076 JavaThread* thread = JavaThread::current();
2077 JavaThreadState saved_state = thread->thread_state();
2078 thread->set_thread_state(_thread_in_vm);
2079 {
2080 // In order to get locks work, we need to fake a in_VM state
2081 ttyLocker ttyl;
2082 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg);
2083 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
2084 BytecodeCounter::print();
2085 }
2086 if (os::message_box(msg, "Execution stopped, print registers?"))
2087 regs->print(::tty);
2088 }
2089 BREAKPOINT;
2090 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state);
2091 }
2092 else {
2093 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
2094 }
2095 assert(false, err_msg("DEBUG MESSAGE: %s", msg));
2096 }
2097
2098 #ifndef PRODUCT
2099 void MacroAssembler::test() {
2100 ResourceMark rm;
2101
2102 CodeBuffer cb("test", 10000, 10000);
2103 MacroAssembler* a = new MacroAssembler(&cb);
2104 VM_Version::allow_all();
2105 a->test_v9();
2106 a->test_v8_onlys();
2107 VM_Version::revert();
2108
2109 StubRoutines::Sparc::test_stop_entry()();
2110 }
2111 #endif
2112
2113
2114 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) {
2115 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words?
2116 Label no_extras;
2117 br( negative, true, pt, no_extras ); // if neg, clear reg
2118 delayed()->set(0, Rresult); // annuled, so only if taken
2119 bind( no_extras );
2120 }
2121
2122
2123 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) {
2124 #ifdef _LP64
2125 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult);
2126 #else
2127 add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult);
2128 #endif
2129 bclr(1, Rresult);
2130 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes
2131 }
2132
2133
2134 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) {
2135 calc_frame_size(Rextra_words, Rresult);
2136 neg(Rresult);
2137 save(SP, Rresult, SP);
2138 }
2139
2140
2141 // ---------------------------------------------------------
2142 Assembler::RCondition cond2rcond(Assembler::Condition c) {
2143 switch (c) {
2144 /*case zero: */
2145 case Assembler::equal: return Assembler::rc_z;
2146 case Assembler::lessEqual: return Assembler::rc_lez;
2147 case Assembler::less: return Assembler::rc_lz;
2148 /*case notZero:*/
2149 case Assembler::notEqual: return Assembler::rc_nz;
2150 case Assembler::greater: return Assembler::rc_gz;
2151 case Assembler::greaterEqual: return Assembler::rc_gez;
2152 }
2153 ShouldNotReachHere();
2154 return Assembler::rc_z;
2155 }
2156
2157 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS
2158 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) {
2159 tst(s1);
2160 br (c, a, p, L);
2161 }
2162
2163 // Compares a pointer register with zero and branches on null.
2164 // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
2165 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) {
2166 assert_not_delayed();
2167 #ifdef _LP64
2168 bpr( rc_z, a, p, s1, L );
2169 #else
2170 tst(s1);
2171 br ( zero, a, p, L );
2172 #endif
2173 }
2174
2175 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) {
2176 assert_not_delayed();
2177 #ifdef _LP64
2178 bpr( rc_nz, a, p, s1, L );
2179 #else
2180 tst(s1);
2181 br ( notZero, a, p, L );
2182 #endif
2183 }
2184
2185 // Compare registers and branch with nop in delay slot or cbcond without delay slot.
2186
2187 // Compare integer (32 bit) values (icc only).
2188 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c,
2189 Predict p, Label& L) {
2190 assert_not_delayed();
2191 if (use_cbcond(L)) {
2192 Assembler::cbcond(c, icc, s1, s2, L);
2193 } else {
2194 cmp(s1, s2);
2195 br(c, false, p, L);
2196 delayed()->nop();
2197 }
2198 }
2199
2200 // Compare integer (32 bit) values (icc only).
2201 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c,
2202 Predict p, Label& L) {
2203 assert_not_delayed();
2204 if (is_simm(simm13a,5) && use_cbcond(L)) {
2205 Assembler::cbcond(c, icc, s1, simm13a, L);
2206 } else {
2207 cmp(s1, simm13a);
2208 br(c, false, p, L);
2209 delayed()->nop();
2210 }
2211 }
2212
2213 // Branch that tests xcc in LP64 and icc in !LP64
2214 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c,
2215 Predict p, Label& L) {
2216 assert_not_delayed();
2217 if (use_cbcond(L)) {
2218 Assembler::cbcond(c, ptr_cc, s1, s2, L);
2219 } else {
2220 cmp(s1, s2);
2221 brx(c, false, p, L);
2222 delayed()->nop();
2223 }
2224 }
2225
2226 // Branch that tests xcc in LP64 and icc in !LP64
2227 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c,
2228 Predict p, Label& L) {
2229 assert_not_delayed();
2230 if (is_simm(simm13a,5) && use_cbcond(L)) {
2231 Assembler::cbcond(c, ptr_cc, s1, simm13a, L);
2232 } else {
2233 cmp(s1, simm13a);
2234 brx(c, false, p, L);
2235 delayed()->nop();
2236 }
2237 }
2238
2239 // Short branch version for compares a pointer with zero.
2240
2241 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) {
2242 assert_not_delayed();
2243 if (use_cbcond(L)) {
2244 Assembler::cbcond(zero, ptr_cc, s1, 0, L);
2245 return;
2246 }
2247 br_null(s1, false, p, L);
2248 delayed()->nop();
2249 }
2250
2251 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) {
2252 assert_not_delayed();
2253 if (use_cbcond(L)) {
2254 Assembler::cbcond(notZero, ptr_cc, s1, 0, L);
2255 return;
2256 }
2257 br_notnull(s1, false, p, L);
2258 delayed()->nop();
2259 }
2260
2261 // Unconditional short branch
2262 void MacroAssembler::ba_short(Label& L) {
2263 if (use_cbcond(L)) {
2264 Assembler::cbcond(equal, icc, G0, G0, L);
2265 return;
2266 }
2267 br(always, false, pt, L);
2268 delayed()->nop();
2269 }
2270
2271 // instruction sequences factored across compiler & interpreter
2272
2273
2274 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low,
2275 Register Rb_hi, Register Rb_low,
2276 Register Rresult) {
2277
2278 Label check_low_parts, done;
2279
2280 cmp(Ra_hi, Rb_hi ); // compare hi parts
2281 br(equal, true, pt, check_low_parts);
2282 delayed()->cmp(Ra_low, Rb_low); // test low parts
2283
2284 // And, with an unsigned comparison, it does not matter if the numbers
2285 // are negative or not.
2286 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff.
2287 // The second one is bigger (unsignedly).
2288
2289 // Other notes: The first move in each triplet can be unconditional
2290 // (and therefore probably prefetchable).
2291 // And the equals case for the high part does not need testing,
2292 // since that triplet is reached only after finding the high halves differ.
2293
2294 if (VM_Version::v9_instructions_work()) {
2295 mov(-1, Rresult);
2296 ba(done); delayed()-> movcc(greater, false, icc, 1, Rresult);
2297 } else {
2298 br(less, true, pt, done); delayed()-> set(-1, Rresult);
2299 br(greater, true, pt, done); delayed()-> set( 1, Rresult);
2300 }
2301
2302 bind( check_low_parts );
2303
2304 if (VM_Version::v9_instructions_work()) {
2305 mov( -1, Rresult);
2306 movcc(equal, false, icc, 0, Rresult);
2307 movcc(greaterUnsigned, false, icc, 1, Rresult);
2308 } else {
2309 set(-1, Rresult);
2310 br(equal, true, pt, done); delayed()->set( 0, Rresult);
2311 br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult);
2312 }
2313 bind( done );
2314 }
2315
2316 void MacroAssembler::lneg( Register Rhi, Register Rlow ) {
2317 subcc( G0, Rlow, Rlow );
2318 subc( G0, Rhi, Rhi );
2319 }
2320
2321 void MacroAssembler::lshl( Register Rin_high, Register Rin_low,
2322 Register Rcount,
2323 Register Rout_high, Register Rout_low,
2324 Register Rtemp ) {
2325
2326
2327 Register Ralt_count = Rtemp;
2328 Register Rxfer_bits = Rtemp;
2329
2330 assert( Ralt_count != Rin_high
2331 && Ralt_count != Rin_low
2332 && Ralt_count != Rcount
2333 && Rxfer_bits != Rin_low
2334 && Rxfer_bits != Rin_high
2335 && Rxfer_bits != Rcount
2336 && Rxfer_bits != Rout_low
2337 && Rout_low != Rin_high,
2338 "register alias checks");
2339
2340 Label big_shift, done;
2341
2342 // This code can be optimized to use the 64 bit shifts in V9.
2343 // Here we use the 32 bit shifts.
2344
2345 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
2346 subcc(Rcount, 31, Ralt_count);
2347 br(greater, true, pn, big_shift);
2348 delayed()->dec(Ralt_count);
2349
2350 // shift < 32 bits, Ralt_count = Rcount-31
2351
2352 // We get the transfer bits by shifting right by 32-count the low
2353 // register. This is done by shifting right by 31-count and then by one
2354 // more to take care of the special (rare) case where count is zero
2355 // (shifting by 32 would not work).
2356
2357 neg(Ralt_count);
2358
2359 // The order of the next two instructions is critical in the case where
2360 // Rin and Rout are the same and should not be reversed.
2361
2362 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count
2363 if (Rcount != Rout_low) {
2364 sll(Rin_low, Rcount, Rout_low); // low half
2365 }
2366 sll(Rin_high, Rcount, Rout_high);
2367 if (Rcount == Rout_low) {
2368 sll(Rin_low, Rcount, Rout_low); // low half
2369 }
2370 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more
2371 ba(done);
2372 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low
2373
2374 // shift >= 32 bits, Ralt_count = Rcount-32
2375 bind(big_shift);
2376 sll(Rin_low, Ralt_count, Rout_high );
2377 clr(Rout_low);
2378
2379 bind(done);
2380 }
2381
2382
2383 void MacroAssembler::lshr( Register Rin_high, Register Rin_low,
2384 Register Rcount,
2385 Register Rout_high, Register Rout_low,
2386 Register Rtemp ) {
2387
2388 Register Ralt_count = Rtemp;
2389 Register Rxfer_bits = Rtemp;
2390
2391 assert( Ralt_count != Rin_high
2392 && Ralt_count != Rin_low
2393 && Ralt_count != Rcount
2394 && Rxfer_bits != Rin_low
2395 && Rxfer_bits != Rin_high
2396 && Rxfer_bits != Rcount
2397 && Rxfer_bits != Rout_high
2398 && Rout_high != Rin_low,
2399 "register alias checks");
2400
2401 Label big_shift, done;
2402
2403 // This code can be optimized to use the 64 bit shifts in V9.
2404 // Here we use the 32 bit shifts.
2405
2406 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
2407 subcc(Rcount, 31, Ralt_count);
2408 br(greater, true, pn, big_shift);
2409 delayed()->dec(Ralt_count);
2410
2411 // shift < 32 bits, Ralt_count = Rcount-31
2412
2413 // We get the transfer bits by shifting left by 32-count the high
2414 // register. This is done by shifting left by 31-count and then by one
2415 // more to take care of the special (rare) case where count is zero
2416 // (shifting by 32 would not work).
2417
2418 neg(Ralt_count);
2419 if (Rcount != Rout_low) {
2420 srl(Rin_low, Rcount, Rout_low);
2421 }
2422
2423 // The order of the next two instructions is critical in the case where
2424 // Rin and Rout are the same and should not be reversed.
2425
2426 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count
2427 sra(Rin_high, Rcount, Rout_high ); // high half
2428 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more
2429 if (Rcount == Rout_low) {
2430 srl(Rin_low, Rcount, Rout_low);
2431 }
2432 ba(done);
2433 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high
2434
2435 // shift >= 32 bits, Ralt_count = Rcount-32
2436 bind(big_shift);
2437
2438 sra(Rin_high, Ralt_count, Rout_low);
2439 sra(Rin_high, 31, Rout_high); // sign into hi
2440
2441 bind( done );
2442 }
2443
2444
2445
2446 void MacroAssembler::lushr( Register Rin_high, Register Rin_low,
2447 Register Rcount,
2448 Register Rout_high, Register Rout_low,
2449 Register Rtemp ) {
2450
2451 Register Ralt_count = Rtemp;
2452 Register Rxfer_bits = Rtemp;
2453
2454 assert( Ralt_count != Rin_high
2455 && Ralt_count != Rin_low
2456 && Ralt_count != Rcount
2457 && Rxfer_bits != Rin_low
2458 && Rxfer_bits != Rin_high
2459 && Rxfer_bits != Rcount
2460 && Rxfer_bits != Rout_high
2461 && Rout_high != Rin_low,
2462 "register alias checks");
2463
2464 Label big_shift, done;
2465
2466 // This code can be optimized to use the 64 bit shifts in V9.
2467 // Here we use the 32 bit shifts.
2468
2469 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
2470 subcc(Rcount, 31, Ralt_count);
2471 br(greater, true, pn, big_shift);
2472 delayed()->dec(Ralt_count);
2473
2474 // shift < 32 bits, Ralt_count = Rcount-31
2475
2476 // We get the transfer bits by shifting left by 32-count the high
2477 // register. This is done by shifting left by 31-count and then by one
2478 // more to take care of the special (rare) case where count is zero
2479 // (shifting by 32 would not work).
2480
2481 neg(Ralt_count);
2482 if (Rcount != Rout_low) {
2483 srl(Rin_low, Rcount, Rout_low);
2484 }
2485
2486 // The order of the next two instructions is critical in the case where
2487 // Rin and Rout are the same and should not be reversed.
2488
2489 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count
2490 srl(Rin_high, Rcount, Rout_high ); // high half
2491 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more
2492 if (Rcount == Rout_low) {
2493 srl(Rin_low, Rcount, Rout_low);
2494 }
2495 ba(done);
2496 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high
2497
2498 // shift >= 32 bits, Ralt_count = Rcount-32
2499 bind(big_shift);
2500
2501 srl(Rin_high, Ralt_count, Rout_low);
2502 clr(Rout_high);
2503
2504 bind( done );
2505 }
2506
2507 #ifdef _LP64
2508 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
2509 cmp(Ra, Rb);
2510 mov(-1, Rresult);
2511 movcc(equal, false, xcc, 0, Rresult);
2512 movcc(greater, false, xcc, 1, Rresult);
2513 }
2514 #endif
2515
2516
2517 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) {
2518 switch (size_in_bytes) {
2519 case 8: ld_long(src, dst); break;
2520 case 4: ld( src, dst); break;
2521 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break;
2522 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break;
2523 default: ShouldNotReachHere();
2524 }
2525 }
2526
2527 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
2528 switch (size_in_bytes) {
2529 case 8: st_long(src, dst); break;
2530 case 4: st( src, dst); break;
2531 case 2: sth( src, dst); break;
2532 case 1: stb( src, dst); break;
2533 default: ShouldNotReachHere();
2534 }
2535 }
2536
2537
2538 void MacroAssembler::float_cmp( bool is_float, int unordered_result,
2539 FloatRegister Fa, FloatRegister Fb,
2540 Register Rresult) {
2541
2542 fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
2543
2544 Condition lt = unordered_result == -1 ? f_unorderedOrLess : f_less;
2545 Condition eq = f_equal;
2546 Condition gt = unordered_result == 1 ? f_unorderedOrGreater : f_greater;
2547
2548 if (VM_Version::v9_instructions_work()) {
2549
2550 mov(-1, Rresult);
2551 movcc(eq, true, fcc0, 0, Rresult);
2552 movcc(gt, true, fcc0, 1, Rresult);
2553
2554 } else {
2555 Label done;
2556
2557 set( -1, Rresult );
2558 //fb(lt, true, pn, done); delayed()->set( -1, Rresult );
2559 fb( eq, true, pn, done); delayed()->set( 0, Rresult );
2560 fb( gt, true, pn, done); delayed()->set( 1, Rresult );
2561
2562 bind (done);
2563 }
2564 }
2565
2566
2567 void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
2568 {
2569 if (VM_Version::v9_instructions_work()) {
2570 Assembler::fneg(w, s, d);
2571 } else {
2572 if (w == FloatRegisterImpl::S) {
2573 Assembler::fneg(w, s, d);
2574 } else if (w == FloatRegisterImpl::D) {
2575 // number() does a sanity check on the alignment.
2576 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
2577 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
2578
2579 Assembler::fneg(FloatRegisterImpl::S, s, d);
2580 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2581 } else {
2582 assert(w == FloatRegisterImpl::Q, "Invalid float register width");
2583
2584 // number() does a sanity check on the alignment.
2585 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
2586 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
2587
2588 Assembler::fneg(FloatRegisterImpl::S, s, d);
2589 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2590 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
2591 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
2592 }
2593 }
2594 }
2595
2596 void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
2597 {
2598 if (VM_Version::v9_instructions_work()) {
2599 Assembler::fmov(w, s, d);
2600 } else {
2601 if (w == FloatRegisterImpl::S) {
2602 Assembler::fmov(w, s, d);
2603 } else if (w == FloatRegisterImpl::D) {
2604 // number() does a sanity check on the alignment.
2605 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
2606 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
2607
2608 Assembler::fmov(FloatRegisterImpl::S, s, d);
2609 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2610 } else {
2611 assert(w == FloatRegisterImpl::Q, "Invalid float register width");
2612
2613 // number() does a sanity check on the alignment.
2614 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
2615 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
2616
2617 Assembler::fmov(FloatRegisterImpl::S, s, d);
2618 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2619 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
2620 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
2621 }
2622 }
2623 }
2624
2625 void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
2626 {
2627 if (VM_Version::v9_instructions_work()) {
2628 Assembler::fabs(w, s, d);
2629 } else {
2630 if (w == FloatRegisterImpl::S) {
2631 Assembler::fabs(w, s, d);
2632 } else if (w == FloatRegisterImpl::D) {
2633 // number() does a sanity check on the alignment.
2634 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
2635 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
2636
2637 Assembler::fabs(FloatRegisterImpl::S, s, d);
2638 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2639 } else {
2640 assert(w == FloatRegisterImpl::Q, "Invalid float register width");
2641
2642 // number() does a sanity check on the alignment.
2643 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
2644 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
2645
2646 Assembler::fabs(FloatRegisterImpl::S, s, d);
2647 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2648 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
2649 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
2650 }
2651 }
2652 }
2653
2654 void MacroAssembler::save_all_globals_into_locals() {
2655 mov(G1,L1);
2656 mov(G2,L2);
2657 mov(G3,L3);
2658 mov(G4,L4);
2659 mov(G5,L5);
2660 mov(G6,L6);
2661 mov(G7,L7);
2662 }
2663
2664 void MacroAssembler::restore_globals_from_locals() {
2665 mov(L1,G1);
2666 mov(L2,G2);
2667 mov(L3,G3);
2668 mov(L4,G4);
2669 mov(L5,G5);
2670 mov(L6,G6);
2671 mov(L7,G7);
2672 }
2673
2674 // Use for 64 bit operation.
2675 void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
2676 {
2677 // store ptr_reg as the new top value
2678 #ifdef _LP64
2679 casx(top_ptr_reg, top_reg, ptr_reg);
2680 #else
2681 cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm);
2682 #endif // _LP64
2683 }
2684
2685 // [RGV] This routine does not handle 64 bit operations.
2686 // use casx_under_lock() or casx directly!!!
2687 void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
2688 {
2689 // store ptr_reg as the new top value
2690 if (VM_Version::v9_instructions_work()) {
2691 cas(top_ptr_reg, top_reg, ptr_reg);
2692 } else {
2693
2694 // If the register is not an out nor global, it is not visible
2695 // after the save. Allocate a register for it, save its
2696 // value in the register save area (the save may not flush
2697 // registers to the save area).
2698
2699 Register top_ptr_reg_after_save;
2700 Register top_reg_after_save;
2701 Register ptr_reg_after_save;
2702
2703 if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) {
2704 top_ptr_reg_after_save = top_ptr_reg->after_save();
2705 } else {
2706 Address reg_save_addr = top_ptr_reg->address_in_saved_window();
2707 top_ptr_reg_after_save = L0;
2708 st(top_ptr_reg, reg_save_addr);
2709 }
2710
2711 if (top_reg->is_out() || top_reg->is_global()) {
2712 top_reg_after_save = top_reg->after_save();
2713 } else {
2714 Address reg_save_addr = top_reg->address_in_saved_window();
2715 top_reg_after_save = L1;
2716 st(top_reg, reg_save_addr);
2717 }
2718
2719 if (ptr_reg->is_out() || ptr_reg->is_global()) {
2720 ptr_reg_after_save = ptr_reg->after_save();
2721 } else {
2722 Address reg_save_addr = ptr_reg->address_in_saved_window();
2723 ptr_reg_after_save = L2;
2724 st(ptr_reg, reg_save_addr);
2725 }
2726
2727 const Register& lock_reg = L3;
2728 const Register& lock_ptr_reg = L4;
2729 const Register& value_reg = L5;
2730 const Register& yield_reg = L6;
2731 const Register& yieldall_reg = L7;
2732
2733 save_frame();
2734
2735 if (top_ptr_reg_after_save == L0) {
2736 ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save);
2737 }
2738
2739 if (top_reg_after_save == L1) {
2740 ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save);
2741 }
2742
2743 if (ptr_reg_after_save == L2) {
2744 ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save);
2745 }
2746
2747 Label(retry_get_lock);
2748 Label(not_same);
2749 Label(dont_yield);
2750
2751 assert(lock_addr, "lock_address should be non null for v8");
2752 set((intptr_t)lock_addr, lock_ptr_reg);
2753 // Initialize yield counter
2754 mov(G0,yield_reg);
2755 mov(G0, yieldall_reg);
2756 set(StubRoutines::Sparc::locked, lock_reg);
2757
2758 bind(retry_get_lock);
2759 cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dont_yield);
2760
2761 if(use_call_vm) {
2762 Untested("Need to verify global reg consistancy");
2763 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg);
2764 } else {
2765 // Save the regs and make space for a C call
2766 save(SP, -96, SP);
2767 save_all_globals_into_locals();
2768 call(CAST_FROM_FN_PTR(address,os::yield_all));
2769 delayed()->mov(yieldall_reg, O0);
2770 restore_globals_from_locals();
2771 restore();
2772 }
2773
2774 // reset the counter
2775 mov(G0,yield_reg);
2776 add(yieldall_reg, 1, yieldall_reg);
2777
2778 bind(dont_yield);
2779 // try to get lock
2780 swap(lock_ptr_reg, 0, lock_reg);
2781
2782 // did we get the lock?
2783 cmp(lock_reg, StubRoutines::Sparc::unlocked);
2784 br(Assembler::notEqual, true, Assembler::pn, retry_get_lock);
2785 delayed()->add(yield_reg,1,yield_reg);
2786
2787 // yes, got lock. do we have the same top?
2788 ld(top_ptr_reg_after_save, 0, value_reg);
2789 cmp_and_br_short(value_reg, top_reg_after_save, Assembler::notEqual, Assembler::pn, not_same);
2790
2791 // yes, same top.
2792 st(ptr_reg_after_save, top_ptr_reg_after_save, 0);
2793 membar(Assembler::StoreStore);
2794
2795 bind(not_same);
2796 mov(value_reg, ptr_reg_after_save);
2797 st(lock_reg, lock_ptr_reg, 0); // unlock
2798
2799 restore();
2800 }
2801 }
2802
2803 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
2804 Register tmp,
2805 int offset) {
2806 intptr_t value = *delayed_value_addr;
2807 if (value != 0)
2808 return RegisterOrConstant(value + offset);
2809
2810 // load indirectly to solve generation ordering problem
2811 AddressLiteral a(delayed_value_addr);
2812 load_ptr_contents(a, tmp);
2813
2814 #ifdef ASSERT
2815 tst(tmp);
2816 breakpoint_trap(zero, xcc);
2817 #endif
2818
2819 if (offset != 0)
2820 add(tmp, offset, tmp);
2821
2822 return RegisterOrConstant(tmp);
2823 }
2824
2825
2826 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
2827 assert(d.register_or_noreg() != G0, "lost side effect");
2828 if ((s2.is_constant() && s2.as_constant() == 0) ||
2829 (s2.is_register() && s2.as_register() == G0)) {
2830 // Do nothing, just move value.
2831 if (s1.is_register()) {
2832 if (d.is_constant()) d = temp;
2833 mov(s1.as_register(), d.as_register());
2834 return d;
2835 } else {
2836 return s1;
2837 }
2838 }
2839
2840 if (s1.is_register()) {
2841 assert_different_registers(s1.as_register(), temp);
2842 if (d.is_constant()) d = temp;
2843 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
2844 return d;
2845 } else {
2846 if (s2.is_register()) {
2847 assert_different_registers(s2.as_register(), temp);
2848 if (d.is_constant()) d = temp;
2849 set(s1.as_constant(), temp);
2850 andn(temp, s2.as_register(), d.as_register());
2851 return d;
2852 } else {
2853 intptr_t res = s1.as_constant() & ~s2.as_constant();
2854 return res;
2855 }
2856 }
2857 }
2858
2859 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
2860 assert(d.register_or_noreg() != G0, "lost side effect");
2861 if ((s2.is_constant() && s2.as_constant() == 0) ||
2862 (s2.is_register() && s2.as_register() == G0)) {
2863 // Do nothing, just move value.
2864 if (s1.is_register()) {
2865 if (d.is_constant()) d = temp;
2866 mov(s1.as_register(), d.as_register());
2867 return d;
2868 } else {
2869 return s1;
2870 }
2871 }
2872
2873 if (s1.is_register()) {
2874 assert_different_registers(s1.as_register(), temp);
2875 if (d.is_constant()) d = temp;
2876 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
2877 return d;
2878 } else {
2879 if (s2.is_register()) {
2880 assert_different_registers(s2.as_register(), temp);
2881 if (d.is_constant()) d = temp;
2882 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register());
2883 return d;
2884 } else {
2885 intptr_t res = s1.as_constant() + s2.as_constant();
2886 return res;
2887 }
2888 }
2889 }
2890
2891 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
2892 assert(d.register_or_noreg() != G0, "lost side effect");
2893 if (!is_simm13(s2.constant_or_zero()))
2894 s2 = (s2.as_constant() & 0xFF);
2895 if ((s2.is_constant() && s2.as_constant() == 0) ||
2896 (s2.is_register() && s2.as_register() == G0)) {
2897 // Do nothing, just move value.
2898 if (s1.is_register()) {
2899 if (d.is_constant()) d = temp;
2900 mov(s1.as_register(), d.as_register());
2901 return d;
2902 } else {
2903 return s1;
2904 }
2905 }
2906
2907 if (s1.is_register()) {
2908 assert_different_registers(s1.as_register(), temp);
2909 if (d.is_constant()) d = temp;
2910 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
2911 return d;
2912 } else {
2913 if (s2.is_register()) {
2914 assert_different_registers(s2.as_register(), temp);
2915 if (d.is_constant()) d = temp;
2916 set(s1.as_constant(), temp);
2917 sll_ptr(temp, s2.as_register(), d.as_register());
2918 return d;
2919 } else {
2920 intptr_t res = s1.as_constant() << s2.as_constant();
2921 return res;
2922 }
2923 }
2924 }
2925
2926
2927 // Look up the method for a megamorphic invokeinterface call.
2928 // The target method is determined by <intf_klass, itable_index>.
2929 // The receiver klass is in recv_klass.
2930 // On success, the result will be in method_result, and execution falls through.
2931 // On failure, execution transfers to the given label.
2932 void MacroAssembler::lookup_interface_method(Register recv_klass,
2933 Register intf_klass,
2934 RegisterOrConstant itable_index,
2935 Register method_result,
2936 Register scan_temp,
2937 Register sethi_temp,
2938 Label& L_no_such_interface) {
2939 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
2940 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
2941 "caller must use same register for non-constant itable index as for method");
2942
2943 Label L_no_such_interface_restore;
2944 bool did_save = false;
2945 if (scan_temp == noreg || sethi_temp == noreg) {
2946 Register recv_2 = recv_klass->is_global() ? recv_klass : L0;
2947 Register intf_2 = intf_klass->is_global() ? intf_klass : L1;
2948 assert(method_result->is_global(), "must be able to return value");
2949 scan_temp = L2;
2950 sethi_temp = L3;
2951 save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2);
2952 recv_klass = recv_2;
2953 intf_klass = intf_2;
2954 did_save = true;
2955 }
2956
2957 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
2958 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
2959 int scan_step = itableOffsetEntry::size() * wordSize;
2960 int vte_size = vtableEntry::size() * wordSize;
2961
2962 lduw(recv_klass, InstanceKlass::vtable_length_offset() * wordSize, scan_temp);
2963 // %%% We should store the aligned, prescaled offset in the klassoop.
2964 // Then the next several instructions would fold away.
2965
2966 int round_to_unit = ((HeapWordsPerLong > 1) ? BytesPerLong : 0);
2967 int itb_offset = vtable_base;
2968 if (round_to_unit != 0) {
2969 // hoist first instruction of round_to(scan_temp, BytesPerLong):
2970 itb_offset += round_to_unit - wordSize;
2971 }
2972 int itb_scale = exact_log2(vtableEntry::size() * wordSize);
2973 sll(scan_temp, itb_scale, scan_temp);
2974 add(scan_temp, itb_offset, scan_temp);
2975 if (round_to_unit != 0) {
2976 // Round up to align_object_offset boundary
2977 // see code for InstanceKlass::start_of_itable!
2978 // Was: round_to(scan_temp, BytesPerLong);
2979 // Hoisted: add(scan_temp, BytesPerLong-1, scan_temp);
2980 and3(scan_temp, -round_to_unit, scan_temp);
2981 }
2982 add(recv_klass, scan_temp, scan_temp);
2983
2984 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
2985 RegisterOrConstant itable_offset = itable_index;
2986 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
2987 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
2988 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
2989
2990 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
2991 // if (scan->interface() == intf) {
2992 // result = (klass + scan->offset() + itable_index);
2993 // }
2994 // }
2995 Label L_search, L_found_method;
2996
2997 for (int peel = 1; peel >= 0; peel--) {
2998 // %%%% Could load both offset and interface in one ldx, if they were
2999 // in the opposite order. This would save a load.
3000 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result);
3001
3002 // Check that this entry is non-null. A null entry means that
3003 // the receiver class doesn't implement the interface, and wasn't the
3004 // same as when the caller was compiled.
3005 bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface);
3006 delayed()->cmp(method_result, intf_klass);
3007
3008 if (peel) {
3009 brx(Assembler::equal, false, Assembler::pt, L_found_method);
3010 } else {
3011 brx(Assembler::notEqual, false, Assembler::pn, L_search);
3012 // (invert the test to fall through to found_method...)
3013 }
3014 delayed()->add(scan_temp, scan_step, scan_temp);
3015
3016 if (!peel) break;
3017
3018 bind(L_search);
3019 }
3020
3021 bind(L_found_method);
3022
3023 // Got a hit.
3024 int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
3025 // scan_temp[-scan_step] points to the vtable offset we need
3026 ito_offset -= scan_step;
3027 lduw(scan_temp, ito_offset, scan_temp);
3028 ld_ptr(recv_klass, scan_temp, method_result);
3029
3030 if (did_save) {
3031 Label L_done;
3032 ba(L_done);
3033 delayed()->restore();
3034
3035 bind(L_no_such_interface_restore);
3036 ba(L_no_such_interface);
3037 delayed()->restore();
3038
3039 bind(L_done);
3040 }
3041 }
3042
3043
3044 // virtual method calling
3045 void MacroAssembler::lookup_virtual_method(Register recv_klass,
3046 RegisterOrConstant vtable_index,
3047 Register method_result) {
3048 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
3049 Register sethi_temp = method_result;
3050 const int base = (InstanceKlass::vtable_start_offset() * wordSize +
3051 // method pointer offset within the vtable entry:
3052 vtableEntry::method_offset_in_bytes());
3053 RegisterOrConstant vtable_offset = vtable_index;
3054 // Each of the following three lines potentially generates an instruction.
3055 // But the total number of address formation instructions will always be
3056 // at most two, and will often be zero. In any case, it will be optimal.
3057 // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x).
3058 // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t).
3059 vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size() * wordSize), vtable_offset);
3060 vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp);
3061 Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp));
3062 ld_ptr(vtable_entry_addr, method_result);
3063 }
3064
3065
3066 void MacroAssembler::check_klass_subtype(Register sub_klass,
3067 Register super_klass,
3068 Register temp_reg,
3069 Register temp2_reg,
3070 Label& L_success) {
3071 Register sub_2 = sub_klass;
3072 Register sup_2 = super_klass;
3073 if (!sub_2->is_global()) sub_2 = L0;
3074 if (!sup_2->is_global()) sup_2 = L1;
3075 bool did_save = false;
3076 if (temp_reg == noreg || temp2_reg == noreg) {
3077 temp_reg = L2;
3078 temp2_reg = L3;
3079 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
3080 sub_klass = sub_2;
3081 super_klass = sup_2;
3082 did_save = true;
3083 }
3084 Label L_failure, L_pop_to_failure, L_pop_to_success;
3085 check_klass_subtype_fast_path(sub_klass, super_klass,
3086 temp_reg, temp2_reg,
3087 (did_save ? &L_pop_to_success : &L_success),
3088 (did_save ? &L_pop_to_failure : &L_failure), NULL);
3089
3090 if (!did_save)
3091 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
3092 check_klass_subtype_slow_path(sub_2, sup_2,
3093 L2, L3, L4, L5,
3094 NULL, &L_pop_to_failure);
3095
3096 // on success:
3097 bind(L_pop_to_success);
3098 restore();
3099 ba_short(L_success);
3100
3101 // on failure:
3102 bind(L_pop_to_failure);
3103 restore();
3104 bind(L_failure);
3105 }
3106
3107
3108 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
3109 Register super_klass,
3110 Register temp_reg,
3111 Register temp2_reg,
3112 Label* L_success,
3113 Label* L_failure,
3114 Label* L_slow_path,
3115 RegisterOrConstant super_check_offset) {
3116 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
3117 int sco_offset = in_bytes(Klass::super_check_offset_offset());
3118
3119 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
3120 bool need_slow_path = (must_load_sco ||
3121 super_check_offset.constant_or_zero() == sco_offset);
3122
3123 assert_different_registers(sub_klass, super_klass, temp_reg);
3124 if (super_check_offset.is_register()) {
3125 assert_different_registers(sub_klass, super_klass, temp_reg,
3126 super_check_offset.as_register());
3127 } else if (must_load_sco) {
3128 assert(temp2_reg != noreg, "supply either a temp or a register offset");
3129 }
3130
3131 Label L_fallthrough;
3132 int label_nulls = 0;
3133 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3134 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3135 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
3136 assert(label_nulls <= 1 ||
3137 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
3138 "at most one NULL in the batch, usually");
3139
3140 // If the pointers are equal, we are done (e.g., String[] elements).
3141 // This self-check enables sharing of secondary supertype arrays among
3142 // non-primary types such as array-of-interface. Otherwise, each such
3143 // type would need its own customized SSA.
3144 // We move this check to the front of the fast path because many
3145 // type checks are in fact trivially successful in this manner,
3146 // so we get a nicely predicted branch right at the start of the check.
3147 cmp(super_klass, sub_klass);
3148 brx(Assembler::equal, false, Assembler::pn, *L_success);
3149 delayed()->nop();
3150
3151 // Check the supertype display:
3152 if (must_load_sco) {
3153 // The super check offset is always positive...
3154 lduw(super_klass, sco_offset, temp2_reg);
3155 super_check_offset = RegisterOrConstant(temp2_reg);
3156 // super_check_offset is register.
3157 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register());
3158 }
3159 ld_ptr(sub_klass, super_check_offset, temp_reg);
3160 cmp(super_klass, temp_reg);
3161
3162 // This check has worked decisively for primary supers.
3163 // Secondary supers are sought in the super_cache ('super_cache_addr').
3164 // (Secondary supers are interfaces and very deeply nested subtypes.)
3165 // This works in the same check above because of a tricky aliasing
3166 // between the super_cache and the primary super display elements.
3167 // (The 'super_check_addr' can address either, as the case requires.)
3168 // Note that the cache is updated below if it does not help us find
3169 // what we need immediately.
3170 // So if it was a primary super, we can just fail immediately.
3171 // Otherwise, it's the slow path for us (no success at this point).
3172
3173 // Hacked ba(), which may only be used just before L_fallthrough.
3174 #define FINAL_JUMP(label) \
3175 if (&(label) != &L_fallthrough) { \
3176 ba(label); delayed()->nop(); \
3177 }
3178
3179 if (super_check_offset.is_register()) {
3180 brx(Assembler::equal, false, Assembler::pn, *L_success);
3181 delayed()->cmp(super_check_offset.as_register(), sc_offset);
3182
3183 if (L_failure == &L_fallthrough) {
3184 brx(Assembler::equal, false, Assembler::pt, *L_slow_path);
3185 delayed()->nop();
3186 } else {
3187 brx(Assembler::notEqual, false, Assembler::pn, *L_failure);
3188 delayed()->nop();
3189 FINAL_JUMP(*L_slow_path);
3190 }
3191 } else if (super_check_offset.as_constant() == sc_offset) {
3192 // Need a slow path; fast failure is impossible.
3193 if (L_slow_path == &L_fallthrough) {
3194 brx(Assembler::equal, false, Assembler::pt, *L_success);
3195 delayed()->nop();
3196 } else {
3197 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path);
3198 delayed()->nop();
3199 FINAL_JUMP(*L_success);
3200 }
3201 } else {
3202 // No slow path; it's a fast decision.
3203 if (L_failure == &L_fallthrough) {
3204 brx(Assembler::equal, false, Assembler::pt, *L_success);
3205 delayed()->nop();
3206 } else {
3207 brx(Assembler::notEqual, false, Assembler::pn, *L_failure);
3208 delayed()->nop();
3209 FINAL_JUMP(*L_success);
3210 }
3211 }
3212
3213 bind(L_fallthrough);
3214
3215 #undef FINAL_JUMP
3216 }
3217
3218
3219 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
3220 Register super_klass,
3221 Register count_temp,
3222 Register scan_temp,
3223 Register scratch_reg,
3224 Register coop_reg,
3225 Label* L_success,
3226 Label* L_failure) {
3227 assert_different_registers(sub_klass, super_klass,
3228 count_temp, scan_temp, scratch_reg, coop_reg);
3229
3230 Label L_fallthrough, L_loop;
3231 int label_nulls = 0;
3232 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3233 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3234 assert(label_nulls <= 1, "at most one NULL in the batch");
3235
3236 // a couple of useful fields in sub_klass:
3237 int ss_offset = in_bytes(Klass::secondary_supers_offset());
3238 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
3239
3240 // Do a linear scan of the secondary super-klass chain.
3241 // This code is rarely used, so simplicity is a virtue here.
3242
3243 #ifndef PRODUCT
3244 int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
3245 inc_counter((address) pst_counter, count_temp, scan_temp);
3246 #endif
3247
3248 // We will consult the secondary-super array.
3249 ld_ptr(sub_klass, ss_offset, scan_temp);
3250
3251 Register search_key = super_klass;
3252
3253 // Load the array length. (Positive movl does right thing on LP64.)
3254 lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp);
3255
3256 // Check for empty secondary super list
3257 tst(count_temp);
3258
3259 // In the array of super classes elements are pointer sized.
3260 int element_size = wordSize;
3261
3262 // Top of search loop
3263 bind(L_loop);
3264 br(Assembler::equal, false, Assembler::pn, *L_failure);
3265 delayed()->add(scan_temp, element_size, scan_temp);
3266
3267 // Skip the array header in all array accesses.
3268 int elem_offset = Array<Klass*>::base_offset_in_bytes();
3269 elem_offset -= element_size; // the scan pointer was pre-incremented also
3270
3271 // Load next super to check
3272 ld_ptr( scan_temp, elem_offset, scratch_reg );
3273
3274 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
3275 cmp(scratch_reg, search_key);
3276
3277 // A miss means we are NOT a subtype and need to keep looping
3278 brx(Assembler::notEqual, false, Assembler::pn, L_loop);
3279 delayed()->deccc(count_temp); // decrement trip counter in delay slot
3280
3281 // Success. Cache the super we found and proceed in triumph.
3282 st_ptr(super_klass, sub_klass, sc_offset);
3283
3284 if (L_success != &L_fallthrough) {
3285 ba(*L_success);
3286 delayed()->nop();
3287 }
3288
3289 bind(L_fallthrough);
3290 }
3291
3292
3293 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
3294 Register temp_reg,
3295 int extra_slot_offset) {
3296 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
3297 int stackElementSize = Interpreter::stackElementSize;
3298 int offset = extra_slot_offset * stackElementSize;
3299 if (arg_slot.is_constant()) {
3300 offset += arg_slot.as_constant() * stackElementSize;
3301 return offset;
3302 } else {
3303 assert(temp_reg != noreg, "must specify");
3304 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg);
3305 if (offset != 0)
3306 add(temp_reg, offset, temp_reg);
3307 return temp_reg;
3308 }
3309 }
3310
3311
3312 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
3313 Register temp_reg,
3314 int extra_slot_offset) {
3315 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset));
3316 }
3317
3318
3319 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
3320 Register temp_reg,
3321 Label& done, Label* slow_case,
3322 BiasedLockingCounters* counters) {
3323 assert(UseBiasedLocking, "why call this otherwise?");
3324
3325 if (PrintBiasedLockingStatistics) {
3326 assert_different_registers(obj_reg, mark_reg, temp_reg, O7);
3327 if (counters == NULL)
3328 counters = BiasedLocking::counters();
3329 }
3330
3331 Label cas_label;
3332
3333 // Biased locking
3334 // See whether the lock is currently biased toward our thread and
3335 // whether the epoch is still valid
3336 // Note that the runtime guarantees sufficient alignment of JavaThread
3337 // pointers to allow age to be placed into low bits
3338 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
3339 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
3340 cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
3341
3342 load_klass(obj_reg, temp_reg);
3343 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
3344 or3(G2_thread, temp_reg, temp_reg);
3345 xor3(mark_reg, temp_reg, temp_reg);
3346 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
3347 if (counters != NULL) {
3348 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
3349 // Reload mark_reg as we may need it later
3350 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg);
3351 }
3352 brx(Assembler::equal, true, Assembler::pt, done);
3353 delayed()->nop();
3354
3355 Label try_revoke_bias;
3356 Label try_rebias;
3357 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
3358 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
3359
3360 // At this point we know that the header has the bias pattern and
3361 // that we are not the bias owner in the current epoch. We need to
3362 // figure out more details about the state of the header in order to
3363 // know what operations can be legally performed on the object's
3364 // header.
3365
3366 // If the low three bits in the xor result aren't clear, that means
3367 // the prototype header is no longer biased and we have to revoke
3368 // the bias on this object.
3369 btst(markOopDesc::biased_lock_mask_in_place, temp_reg);
3370 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias);
3371
3372 // Biasing is still enabled for this data type. See whether the
3373 // epoch of the current bias is still valid, meaning that the epoch
3374 // bits of the mark word are equal to the epoch bits of the
3375 // prototype header. (Note that the prototype header's epoch bits
3376 // only change at a safepoint.) If not, attempt to rebias the object
3377 // toward the current thread. Note that we must be absolutely sure
3378 // that the current epoch is invalid in order to do this because
3379 // otherwise the manipulations it performs on the mark word are
3380 // illegal.
3381 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg);
3382 brx(Assembler::notZero, false, Assembler::pn, try_rebias);
3383
3384 // The epoch of the current bias is still valid but we know nothing
3385 // about the owner; it might be set or it might be clear. Try to
3386 // acquire the bias of the object using an atomic operation. If this
3387 // fails we will go in to the runtime to revoke the object's bias.
3388 // Note that we first construct the presumed unbiased header so we
3389 // don't accidentally blow away another thread's valid bias.
3390 delayed()->and3(mark_reg,
3391 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
3392 mark_reg);
3393 or3(G2_thread, mark_reg, temp_reg);
3394 casn(mark_addr.base(), mark_reg, temp_reg);
3395 // If the biasing toward our thread failed, this means that
3396 // another thread succeeded in biasing it toward itself and we
3397 // need to revoke that bias. The revocation will occur in the
3398 // interpreter runtime in the slow case.
3399 cmp(mark_reg, temp_reg);
3400 if (counters != NULL) {
3401 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg);
3402 }
3403 if (slow_case != NULL) {
3404 brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
3405 delayed()->nop();
3406 }
3407 ba_short(done);
3408
3409 bind(try_rebias);
3410 // At this point we know the epoch has expired, meaning that the
3411 // current "bias owner", if any, is actually invalid. Under these
3412 // circumstances _only_, we are allowed to use the current header's
3413 // value as the comparison value when doing the cas to acquire the
3414 // bias in the current epoch. In other words, we allow transfer of
3415 // the bias from one thread to another directly in this situation.
3416 //
3417 // FIXME: due to a lack of registers we currently blow away the age
3418 // bits in this situation. Should attempt to preserve them.
3419 load_klass(obj_reg, temp_reg);
3420 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
3421 or3(G2_thread, temp_reg, temp_reg);
3422 casn(mark_addr.base(), mark_reg, temp_reg);
3423 // If the biasing toward our thread failed, this means that
3424 // another thread succeeded in biasing it toward itself and we
3425 // need to revoke that bias. The revocation will occur in the
3426 // interpreter runtime in the slow case.
3427 cmp(mark_reg, temp_reg);
3428 if (counters != NULL) {
3429 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg);
3430 }
3431 if (slow_case != NULL) {
3432 brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
3433 delayed()->nop();
3434 }
3435 ba_short(done);
3436
3437 bind(try_revoke_bias);
3438 // The prototype mark in the klass doesn't have the bias bit set any
3439 // more, indicating that objects of this data type are not supposed
3440 // to be biased any more. We are going to try to reset the mark of
3441 // this object to the prototype value and fall through to the
3442 // CAS-based locking scheme. Note that if our CAS fails, it means
3443 // that another thread raced us for the privilege of revoking the
3444 // bias of this particular object, so it's okay to continue in the
3445 // normal locking code.
3446 //
3447 // FIXME: due to a lack of registers we currently blow away the age
3448 // bits in this situation. Should attempt to preserve them.
3449 load_klass(obj_reg, temp_reg);
3450 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
3451 casn(mark_addr.base(), mark_reg, temp_reg);
3452 // Fall through to the normal CAS-based lock, because no matter what
3453 // the result of the above CAS, some thread must have succeeded in
3454 // removing the bias bit from the object's header.
3455 if (counters != NULL) {
3456 cmp(mark_reg, temp_reg);
3457 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg);
3458 }
3459
3460 bind(cas_label);
3461 }
3462
3463 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done,
3464 bool allow_delay_slot_filling) {
3465 // Check for biased locking unlock case, which is a no-op
3466 // Note: we do not have to check the thread ID for two reasons.
3467 // First, the interpreter checks for IllegalMonitorStateException at
3468 // a higher level. Second, if the bias was revoked while we held the
3469 // lock, the object could not be rebiased toward another thread, so
3470 // the bias bit would be clear.
3471 ld_ptr(mark_addr, temp_reg);
3472 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
3473 cmp(temp_reg, markOopDesc::biased_lock_pattern);
3474 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done);
3475 delayed();
3476 if (!allow_delay_slot_filling) {
3477 nop();
3478 }
3479 }
3480
3481
3482 // CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by
3483 // Solaris/SPARC's "as". Another apt name would be cas_ptr()
3484
3485 void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) {
3486 casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3487 }
3488
3489
3490
3491 // compiler_lock_object() and compiler_unlock_object() are direct transliterations
3492 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments.
3493 // The code could be tightened up considerably.
3494 //
3495 // box->dhw disposition - post-conditions at DONE_LABEL.
3496 // - Successful inflated lock: box->dhw != 0.
3497 // Any non-zero value suffices.
3498 // Consider G2_thread, rsp, boxReg, or unused_mark()
3499 // - Successful Stack-lock: box->dhw == mark.
3500 // box->dhw must contain the displaced mark word value
3501 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined.
3502 // The slow-path fast_enter() and slow_enter() operators
3503 // are responsible for setting box->dhw = NonZero (typically ::unused_mark).
3504 // - Biased: box->dhw is undefined
3505 //
3506 // SPARC refworkload performance - specifically jetstream and scimark - are
3507 // extremely sensitive to the size of the code emitted by compiler_lock_object
3508 // and compiler_unlock_object. Critically, the key factor is code size, not path
3509 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the
3510 // effect).
3511
3512
3513 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
3514 Register Rbox, Register Rscratch,
3515 BiasedLockingCounters* counters,
3516 bool try_bias) {
3517 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
3518
3519 verify_oop(Roop);
3520 Label done ;
3521
3522 if (counters != NULL) {
3523 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch);
3524 }
3525
3526 if (EmitSync & 1) {
3527 mov(3, Rscratch);
3528 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
3529 cmp(SP, G0);
3530 return ;
3531 }
3532
3533 if (EmitSync & 2) {
3534
3535 // Fetch object's markword
3536 ld_ptr(mark_addr, Rmark);
3537
3538 if (try_bias) {
3539 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
3540 }
3541
3542 // Save Rbox in Rscratch to be used for the cas operation
3543 mov(Rbox, Rscratch);
3544
3545 // set Rmark to markOop | markOopDesc::unlocked_value
3546 or3(Rmark, markOopDesc::unlocked_value, Rmark);
3547
3548 // Initialize the box. (Must happen before we update the object mark!)
3549 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
3550
3551 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
3552 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
3553 casx_under_lock(mark_addr.base(), Rmark, Rscratch,
3554 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3555
3556 // if compare/exchange succeeded we found an unlocked object and we now have locked it
3557 // hence we are done
3558 cmp(Rmark, Rscratch);
3559 #ifdef _LP64
3560 sub(Rscratch, STACK_BIAS, Rscratch);
3561 #endif
3562 brx(Assembler::equal, false, Assembler::pt, done);
3563 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot
3564
3565 // we did not find an unlocked object so see if this is a recursive case
3566 // sub(Rscratch, SP, Rscratch);
3567 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
3568 andcc(Rscratch, 0xfffff003, Rscratch);
3569 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
3570 bind (done);
3571 return ;
3572 }
3573
3574 Label Egress ;
3575
3576 if (EmitSync & 256) {
3577 Label IsInflated ;
3578
3579 ld_ptr(mark_addr, Rmark); // fetch obj->mark
3580 // Triage: biased, stack-locked, neutral, inflated
3581 if (try_bias) {
3582 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
3583 // Invariant: if control reaches this point in the emitted stream
3584 // then Rmark has not been modified.
3585 }
3586
3587 // Store mark into displaced mark field in the on-stack basic-lock "box"
3588 // Critically, this must happen before the CAS
3589 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty.
3590 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
3591 andcc(Rmark, 2, G0);
3592 brx(Assembler::notZero, false, Assembler::pn, IsInflated);
3593 delayed()->
3594
3595 // Try stack-lock acquisition.
3596 // Beware: the 1st instruction is in a delay slot
3597 mov(Rbox, Rscratch);
3598 or3(Rmark, markOopDesc::unlocked_value, Rmark);
3599 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
3600 casn(mark_addr.base(), Rmark, Rscratch);
3601 cmp(Rmark, Rscratch);
3602 brx(Assembler::equal, false, Assembler::pt, done);
3603 delayed()->sub(Rscratch, SP, Rscratch);
3604
3605 // Stack-lock attempt failed - check for recursive stack-lock.
3606 // See the comments below about how we might remove this case.
3607 #ifdef _LP64
3608 sub(Rscratch, STACK_BIAS, Rscratch);
3609 #endif
3610 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
3611 andcc(Rscratch, 0xfffff003, Rscratch);
3612 br(Assembler::always, false, Assembler::pt, done);
3613 delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
3614
3615 bind(IsInflated);
3616 if (EmitSync & 64) {
3617 // If m->owner != null goto IsLocked
3618 // Pessimistic form: Test-and-CAS vs CAS
3619 // The optimistic form avoids RTS->RTO cache line upgrades.
3620 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
3621 andcc(Rscratch, Rscratch, G0);
3622 brx(Assembler::notZero, false, Assembler::pn, done);
3623 delayed()->nop();
3624 // m->owner == null : it's unlocked.
3625 }
3626
3627 // Try to CAS m->owner from null to Self
3628 // Invariant: if we acquire the lock then _recursions should be 0.
3629 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
3630 mov(G2_thread, Rscratch);
3631 casn(Rmark, G0, Rscratch);
3632 cmp(Rscratch, G0);
3633 // Intentional fall-through into done
3634 } else {
3635 // Aggressively avoid the Store-before-CAS penalty
3636 // Defer the store into box->dhw until after the CAS
3637 Label IsInflated, Recursive ;
3638
3639 // Anticipate CAS -- Avoid RTS->RTO upgrade
3640 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
3641
3642 ld_ptr(mark_addr, Rmark); // fetch obj->mark
3643 // Triage: biased, stack-locked, neutral, inflated
3644
3645 if (try_bias) {
3646 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
3647 // Invariant: if control reaches this point in the emitted stream
3648 // then Rmark has not been modified.
3649 }
3650 andcc(Rmark, 2, G0);
3651 brx(Assembler::notZero, false, Assembler::pn, IsInflated);
3652 delayed()-> // Beware - dangling delay-slot
3653
3654 // Try stack-lock acquisition.
3655 // Transiently install BUSY (0) encoding in the mark word.
3656 // if the CAS of 0 into the mark was successful then we execute:
3657 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box
3658 // ST obj->mark = box -- overwrite transient 0 value
3659 // This presumes TSO, of course.
3660
3661 mov(0, Rscratch);
3662 or3(Rmark, markOopDesc::unlocked_value, Rmark);
3663 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
3664 casn(mark_addr.base(), Rmark, Rscratch);
3665 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
3666 cmp(Rscratch, Rmark);
3667 brx(Assembler::notZero, false, Assembler::pn, Recursive);
3668 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
3669 if (counters != NULL) {
3670 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
3671 }
3672 ba(done);
3673 delayed()->st_ptr(Rbox, mark_addr);
3674
3675 bind(Recursive);
3676 // Stack-lock attempt failed - check for recursive stack-lock.
3677 // Tests show that we can remove the recursive case with no impact
3678 // on refworkload 0.83. If we need to reduce the size of the code
3679 // emitted by compiler_lock_object() the recursive case is perfect
3680 // candidate.
3681 //
3682 // A more extreme idea is to always inflate on stack-lock recursion.
3683 // This lets us eliminate the recursive checks in compiler_lock_object
3684 // and compiler_unlock_object and the (box->dhw == 0) encoding.
3685 // A brief experiment - requiring changes to synchronizer.cpp, interpreter,
3686 // and showed a performance *increase*. In the same experiment I eliminated
3687 // the fast-path stack-lock code from the interpreter and always passed
3688 // control to the "slow" operators in synchronizer.cpp.
3689
3690 // RScratch contains the fetched obj->mark value from the failed CASN.
3691 #ifdef _LP64
3692 sub(Rscratch, STACK_BIAS, Rscratch);
3693 #endif
3694 sub(Rscratch, SP, Rscratch);
3695 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
3696 andcc(Rscratch, 0xfffff003, Rscratch);
3697 if (counters != NULL) {
3698 // Accounting needs the Rscratch register
3699 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
3700 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
3701 ba_short(done);
3702 } else {
3703 ba(done);
3704 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
3705 }
3706
3707 bind (IsInflated);
3708 if (EmitSync & 64) {
3709 // If m->owner != null goto IsLocked
3710 // Test-and-CAS vs CAS
3711 // Pessimistic form avoids futile (doomed) CAS attempts
3712 // The optimistic form avoids RTS->RTO cache line upgrades.
3713 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
3714 andcc(Rscratch, Rscratch, G0);
3715 brx(Assembler::notZero, false, Assembler::pn, done);
3716 delayed()->nop();
3717 // m->owner == null : it's unlocked.
3718 }
3719
3720 // Try to CAS m->owner from null to Self
3721 // Invariant: if we acquire the lock then _recursions should be 0.
3722 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
3723 mov(G2_thread, Rscratch);
3724 casn(Rmark, G0, Rscratch);
3725 cmp(Rscratch, G0);
3726 // ST box->displaced_header = NonZero.
3727 // Any non-zero value suffices:
3728 // unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
3729 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
3730 // Intentional fall-through into done
3731 }
3732
3733 bind (done);
3734 }
3735
3736 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
3737 Register Rbox, Register Rscratch,
3738 bool try_bias) {
3739 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
3740
3741 Label done ;
3742
3743 if (EmitSync & 4) {
3744 cmp(SP, G0);
3745 return ;
3746 }
3747
3748 if (EmitSync & 8) {
3749 if (try_bias) {
3750 biased_locking_exit(mark_addr, Rscratch, done);
3751 }
3752
3753 // Test first if it is a fast recursive unlock
3754 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark);
3755 br_null_short(Rmark, Assembler::pt, done);
3756
3757 // Check if it is still a light weight lock, this is is true if we see
3758 // the stack address of the basicLock in the markOop of the object
3759 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
3760 casx_under_lock(mark_addr.base(), Rbox, Rmark,
3761 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3762 ba(done);
3763 delayed()->cmp(Rbox, Rmark);
3764 bind(done);
3765 return ;
3766 }
3767
3768 // Beware ... If the aggregate size of the code emitted by CLO and CUO is
3769 // is too large performance rolls abruptly off a cliff.
3770 // This could be related to inlining policies, code cache management, or
3771 // I$ effects.
3772 Label LStacked ;
3773
3774 if (try_bias) {
3775 // TODO: eliminate redundant LDs of obj->mark
3776 biased_locking_exit(mark_addr, Rscratch, done);
3777 }
3778
3779 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark);
3780 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch);
3781 andcc(Rscratch, Rscratch, G0);
3782 brx(Assembler::zero, false, Assembler::pn, done);
3783 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS
3784 andcc(Rmark, 2, G0);
3785 brx(Assembler::zero, false, Assembler::pt, LStacked);
3786 delayed()->nop();
3787
3788 // It's inflated
3789 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
3790 // the ST of 0 into _owner which releases the lock. This prevents loads
3791 // and stores within the critical section from reordering (floating)
3792 // past the store that releases the lock. But TSO is a strong memory model
3793 // and that particular flavor of barrier is a noop, so we can safely elide it.
3794 // Note that we use 1-0 locking by default for the inflated case. We
3795 // close the resultant (and rare) race by having contented threads in
3796 // monitorenter periodically poll _owner.
3797 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
3798 ld_ptr(Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox);
3799 xor3(Rscratch, G2_thread, Rscratch);
3800 orcc(Rbox, Rscratch, Rbox);
3801 brx(Assembler::notZero, false, Assembler::pn, done);
3802 delayed()->
3803 ld_ptr(Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch);
3804 ld_ptr(Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox);
3805 orcc(Rbox, Rscratch, G0);
3806 if (EmitSync & 65536) {
3807 Label LSucc ;
3808 brx(Assembler::notZero, false, Assembler::pn, LSucc);
3809 delayed()->nop();
3810 ba(done);
3811 delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
3812
3813 bind(LSucc);
3814 st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
3815 if (os::is_MP()) { membar (StoreLoad); }
3816 ld_ptr(Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch);
3817 andcc(Rscratch, Rscratch, G0);
3818 brx(Assembler::notZero, false, Assembler::pt, done);
3819 delayed()->andcc(G0, G0, G0);
3820 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
3821 mov(G2_thread, Rscratch);
3822 casn(Rmark, G0, Rscratch);
3823 // invert icc.zf and goto done
3824 br_notnull(Rscratch, false, Assembler::pt, done);
3825 delayed()->cmp(G0, G0);
3826 ba(done);
3827 delayed()->cmp(G0, 1);
3828 } else {
3829 brx(Assembler::notZero, false, Assembler::pn, done);
3830 delayed()->nop();
3831 ba(done);
3832 delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
3833 }
3834
3835 bind (LStacked);
3836 // Consider: we could replace the expensive CAS in the exit
3837 // path with a simple ST of the displaced mark value fetched from
3838 // the on-stack basiclock box. That admits a race where a thread T2
3839 // in the slow lock path -- inflating with monitor M -- could race a
3840 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2.
3841 // More precisely T1 in the stack-lock unlock path could "stomp" the
3842 // inflated mark value M installed by T2, resulting in an orphan
3843 // object monitor M and T2 becoming stranded. We can remedy that situation
3844 // by having T2 periodically poll the object's mark word using timed wait
3845 // operations. If T2 discovers that a stomp has occurred it vacates
3846 // the monitor M and wakes any other threads stranded on the now-orphan M.
3847 // In addition the monitor scavenger, which performs deflation,
3848 // would also need to check for orpan monitors and stranded threads.
3849 //
3850 // Finally, inflation is also used when T2 needs to assign a hashCode
3851 // to O and O is stack-locked by T1. The "stomp" race could cause
3852 // an assigned hashCode value to be lost. We can avoid that condition
3853 // and provide the necessary hashCode stability invariants by ensuring
3854 // that hashCode generation is idempotent between copying GCs.
3855 // For example we could compute the hashCode of an object O as
3856 // O's heap address XOR some high quality RNG value that is refreshed
3857 // at GC-time. The monitor scavenger would install the hashCode
3858 // found in any orphan monitors. Again, the mechanism admits a
3859 // lost-update "stomp" WAW race but detects and recovers as needed.
3860 //
3861 // A prototype implementation showed excellent results, although
3862 // the scavenger and timeout code was rather involved.
3863
3864 casn(mark_addr.base(), Rbox, Rscratch);
3865 cmp(Rbox, Rscratch);
3866 // Intentional fall through into done ...
3867
3868 bind(done);
3869 }
3870
3871
3872
3873 void MacroAssembler::print_CPU_state() {
3874 // %%%%% need to implement this
3875 }
3876
3877 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
3878 // %%%%% need to implement this
3879 }
3880
3881 void MacroAssembler::push_IU_state() {
3882 // %%%%% need to implement this
3883 }
3884
3885
3886 void MacroAssembler::pop_IU_state() {
3887 // %%%%% need to implement this
3888 }
3889
3890
3891 void MacroAssembler::push_FPU_state() {
3892 // %%%%% need to implement this
3893 }
3894
3895
3896 void MacroAssembler::pop_FPU_state() {
3897 // %%%%% need to implement this
3898 }
3899
3900
3901 void MacroAssembler::push_CPU_state() {
3902 // %%%%% need to implement this
3903 }
3904
3905
3906 void MacroAssembler::pop_CPU_state() {
3907 // %%%%% need to implement this
3908 }
3909
3910
3911
3912 void MacroAssembler::verify_tlab() {
3913 #ifdef ASSERT
3914 if (UseTLAB && VerifyOops) {
3915 Label next, next2, ok;
3916 Register t1 = L0;
3917 Register t2 = L1;
3918 Register t3 = L2;
3919
3920 save_frame(0);
3921 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
3922 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2);
3923 or3(t1, t2, t3);
3924 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next);
3925 STOP("assert(top >= start)");
3926 should_not_reach_here();
3927
3928 bind(next);
3929 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
3930 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2);
3931 or3(t3, t2, t3);
3932 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2);
3933 STOP("assert(top <= end)");
3934 should_not_reach_here();
3935
3936 bind(next2);
3937 and3(t3, MinObjAlignmentInBytesMask, t3);
3938 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok);
3939 STOP("assert(aligned)");
3940 should_not_reach_here();
3941
3942 bind(ok);
3943 restore();
3944 }
3945 #endif
3946 }
3947
3948
3949 void MacroAssembler::eden_allocate(
3950 Register obj, // result: pointer to object after successful allocation
3951 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
3952 int con_size_in_bytes, // object size in bytes if known at compile time
3953 Register t1, // temp register
3954 Register t2, // temp register
3955 Label& slow_case // continuation point if fast allocation fails
3956 ){
3957 // make sure arguments make sense
3958 assert_different_registers(obj, var_size_in_bytes, t1, t2);
3959 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
3960 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
3961
3962 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
3963 // No allocation in the shared eden.
3964 ba_short(slow_case);
3965 } else {
3966 // get eden boundaries
3967 // note: we need both top & top_addr!
3968 const Register top_addr = t1;
3969 const Register end = t2;
3970
3971 CollectedHeap* ch = Universe::heap();
3972 set((intx)ch->top_addr(), top_addr);
3973 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr();
3974 ld_ptr(top_addr, delta, end);
3975 ld_ptr(top_addr, 0, obj);
3976
3977 // try to allocate
3978 Label retry;
3979 bind(retry);
3980 #ifdef ASSERT
3981 // make sure eden top is properly aligned
3982 {
3983 Label L;
3984 btst(MinObjAlignmentInBytesMask, obj);
3985 br(Assembler::zero, false, Assembler::pt, L);
3986 delayed()->nop();
3987 STOP("eden top is not properly aligned");
3988 bind(L);
3989 }
3990 #endif // ASSERT
3991 const Register free = end;
3992 sub(end, obj, free); // compute amount of free space
3993 if (var_size_in_bytes->is_valid()) {
3994 // size is unknown at compile time
3995 cmp(free, var_size_in_bytes);
3996 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
3997 delayed()->add(obj, var_size_in_bytes, end);
3998 } else {
3999 // size is known at compile time
4000 cmp(free, con_size_in_bytes);
4001 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
4002 delayed()->add(obj, con_size_in_bytes, end);
4003 }
4004 // Compare obj with the value at top_addr; if still equal, swap the value of
4005 // end with the value at top_addr. If not equal, read the value at top_addr
4006 // into end.
4007 casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
4008 // if someone beat us on the allocation, try again, otherwise continue
4009 cmp(obj, end);
4010 brx(Assembler::notEqual, false, Assembler::pn, retry);
4011 delayed()->mov(end, obj); // nop if successfull since obj == end
4012
4013 #ifdef ASSERT
4014 // make sure eden top is properly aligned
4015 {
4016 Label L;
4017 const Register top_addr = t1;
4018
4019 set((intx)ch->top_addr(), top_addr);
4020 ld_ptr(top_addr, 0, top_addr);
4021 btst(MinObjAlignmentInBytesMask, top_addr);
4022 br(Assembler::zero, false, Assembler::pt, L);
4023 delayed()->nop();
4024 STOP("eden top is not properly aligned");
4025 bind(L);
4026 }
4027 #endif // ASSERT
4028 }
4029 }
4030
4031
4032 void MacroAssembler::tlab_allocate(
4033 Register obj, // result: pointer to object after successful allocation
4034 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
4035 int con_size_in_bytes, // object size in bytes if known at compile time
4036 Register t1, // temp register
4037 Label& slow_case // continuation point if fast allocation fails
4038 ){
4039 // make sure arguments make sense
4040 assert_different_registers(obj, var_size_in_bytes, t1);
4041 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size");
4042 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
4043
4044 const Register free = t1;
4045
4046 verify_tlab();
4047
4048 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj);
4049
4050 // calculate amount of free space
4051 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free);
4052 sub(free, obj, free);
4053
4054 Label done;
4055 if (var_size_in_bytes == noreg) {
4056 cmp(free, con_size_in_bytes);
4057 } else {
4058 cmp(free, var_size_in_bytes);
4059 }
4060 br(Assembler::less, false, Assembler::pn, slow_case);
4061 // calculate the new top pointer
4062 if (var_size_in_bytes == noreg) {
4063 delayed()->add(obj, con_size_in_bytes, free);
4064 } else {
4065 delayed()->add(obj, var_size_in_bytes, free);
4066 }
4067
4068 bind(done);
4069
4070 #ifdef ASSERT
4071 // make sure new free pointer is properly aligned
4072 {
4073 Label L;
4074 btst(MinObjAlignmentInBytesMask, free);
4075 br(Assembler::zero, false, Assembler::pt, L);
4076 delayed()->nop();
4077 STOP("updated TLAB free is not properly aligned");
4078 bind(L);
4079 }
4080 #endif // ASSERT
4081
4082 // update the tlab top pointer
4083 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
4084 verify_tlab();
4085 }
4086
4087
4088 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) {
4089 Register top = O0;
4090 Register t1 = G1;
4091 Register t2 = G3;
4092 Register t3 = O1;
4093 assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
4094 Label do_refill, discard_tlab;
4095
4096 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
4097 // No allocation in the shared eden.
4098 ba_short(slow_case);
4099 }
4100
4101 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top);
4102 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1);
4103 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2);
4104
4105 // calculate amount of free space
4106 sub(t1, top, t1);
4107 srl_ptr(t1, LogHeapWordSize, t1);
4108
4109 // Retain tlab and allocate object in shared space if
4110 // the amount free in the tlab is too large to discard.
4111 cmp(t1, t2);
4112 brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab);
4113
4114 // increment waste limit to prevent getting stuck on this slow path
4115 delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2);
4116 st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
4117 if (TLABStats) {
4118 // increment number of slow_allocations
4119 ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2);
4120 add(t2, 1, t2);
4121 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()));
4122 }
4123 ba_short(try_eden);
4124
4125 bind(discard_tlab);
4126 if (TLABStats) {
4127 // increment number of refills
4128 ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2);
4129 add(t2, 1, t2);
4130 stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()));
4131 // accumulate wastage
4132 ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2);
4133 add(t2, t1, t2);
4134 stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()));
4135 }
4136
4137 // if tlab is currently allocated (top or end != null) then
4138 // fill [top, end + alignment_reserve) with array object
4139 br_null_short(top, Assembler::pn, do_refill);
4140
4141 set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2);
4142 st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word
4143 // set klass to intArrayKlass
4144 sub(t1, typeArrayOopDesc::header_size(T_INT), t1);
4145 add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1);
4146 sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1);
4147 st(t1, top, arrayOopDesc::length_offset_in_bytes());
4148 set((intptr_t)Universe::intArrayKlassObj_addr(), t2);
4149 ld_ptr(t2, 0, t2);
4150 // store klass last. concurrent gcs assumes klass length is valid if
4151 // klass field is not null.
4152 store_klass(t2, top);
4153 verify_oop(top);
4154
4155 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1);
4156 sub(top, t1, t1); // size of tlab's allocated portion
4157 incr_allocated_bytes(t1, t2, t3);
4158
4159 // refill the tlab with an eden allocation
4160 bind(do_refill);
4161 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1);
4162 sll_ptr(t1, LogHeapWordSize, t1);
4163 // allocate new tlab, address returned in top
4164 eden_allocate(top, t1, 0, t2, t3, slow_case);
4165
4166 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset()));
4167 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
4168 #ifdef ASSERT
4169 // check that tlab_size (t1) is still valid
4170 {
4171 Label ok;
4172 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2);
4173 sll_ptr(t2, LogHeapWordSize, t2);
4174 cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok);
4175 STOP("assert(t1 == tlab_size)");
4176 should_not_reach_here();
4177
4178 bind(ok);
4179 }
4180 #endif // ASSERT
4181 add(top, t1, top); // t1 is tlab_size
4182 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top);
4183 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset()));
4184 verify_tlab();
4185 ba_short(retry);
4186 }
4187
4188 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes,
4189 Register t1, Register t2) {
4190 // Bump total bytes allocated by this thread
4191 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch
4192 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2);
4193 // v8 support has gone the way of the dodo
4194 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1);
4195 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1);
4196 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset()));
4197 }
4198
4199 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
4200 switch (cond) {
4201 // Note some conditions are synonyms for others
4202 case Assembler::never: return Assembler::always;
4203 case Assembler::zero: return Assembler::notZero;
4204 case Assembler::lessEqual: return Assembler::greater;
4205 case Assembler::less: return Assembler::greaterEqual;
4206 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned;
4207 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned;
4208 case Assembler::negative: return Assembler::positive;
4209 case Assembler::overflowSet: return Assembler::overflowClear;
4210 case Assembler::always: return Assembler::never;
4211 case Assembler::notZero: return Assembler::zero;
4212 case Assembler::greater: return Assembler::lessEqual;
4213 case Assembler::greaterEqual: return Assembler::less;
4214 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned;
4215 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned;
4216 case Assembler::positive: return Assembler::negative;
4217 case Assembler::overflowClear: return Assembler::overflowSet;
4218 }
4219
4220 ShouldNotReachHere(); return Assembler::overflowClear;
4221 }
4222
4223 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr,
4224 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) {
4225 Condition negated_cond = negate_condition(cond);
4226 Label L;
4227 brx(negated_cond, false, Assembler::pt, L);
4228 delayed()->nop();
4229 inc_counter(counter_ptr, Rtmp1, Rtmp2);
4230 bind(L);
4231 }
4232
4233 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) {
4234 AddressLiteral addrlit(counter_addr);
4235 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register.
4236 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits.
4237 ld(addr, Rtmp2);
4238 inc(Rtmp2);
4239 st(Rtmp2, addr);
4240 }
4241
4242 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) {
4243 inc_counter((address) counter_addr, Rtmp1, Rtmp2);
4244 }
4245
4246 SkipIfEqual::SkipIfEqual(
4247 MacroAssembler* masm, Register temp, const bool* flag_addr,
4248 Assembler::Condition condition) {
4249 _masm = masm;
4250 AddressLiteral flag(flag_addr);
4251 _masm->sethi(flag, temp);
4252 _masm->ldub(temp, flag.low10(), temp);
4253 _masm->tst(temp);
4254 _masm->br(condition, false, Assembler::pt, _label);
4255 _masm->delayed()->nop();
4256 }
4257
4258 SkipIfEqual::~SkipIfEqual() {
4259 _masm->bind(_label);
4260 }
4261
4262
4263 // Writes to stack successive pages until offset reached to check for
4264 // stack overflow + shadow pages. This clobbers tsp and scratch.
4265 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
4266 Register Rscratch) {
4267 // Use stack pointer in temp stack pointer
4268 mov(SP, Rtsp);
4269
4270 // Bang stack for total size given plus stack shadow page size.
4271 // Bang one page at a time because a large size can overflow yellow and
4272 // red zones (the bang will fail but stack overflow handling can't tell that
4273 // it was a stack overflow bang vs a regular segv).
4274 int offset = os::vm_page_size();
4275 Register Roffset = Rscratch;
4276
4277 Label loop;
4278 bind(loop);
4279 set((-offset)+STACK_BIAS, Rscratch);
4280 st(G0, Rtsp, Rscratch);
4281 set(offset, Roffset);
4282 sub(Rsize, Roffset, Rsize);
4283 cmp(Rsize, G0);
4284 br(Assembler::greater, false, Assembler::pn, loop);
4285 delayed()->sub(Rtsp, Roffset, Rtsp);
4286
4287 // Bang down shadow pages too.
4288 // The -1 because we already subtracted 1 page.
4289 for (int i = 0; i< StackShadowPages-1; i++) {
4290 set((-i*offset)+STACK_BIAS, Rscratch);
4291 st(G0, Rtsp, Rscratch);
4292 }
4293 }
4294
4295 ///////////////////////////////////////////////////////////////////////////////////
4296 #ifndef SERIALGC
4297
4298 static address satb_log_enqueue_with_frame = NULL;
4299 static u_char* satb_log_enqueue_with_frame_end = NULL;
4300
4301 static address satb_log_enqueue_frameless = NULL;
4302 static u_char* satb_log_enqueue_frameless_end = NULL;
4303
4304 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions?
4305
4306 static void generate_satb_log_enqueue(bool with_frame) {
4307 BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize);
4308 CodeBuffer buf(bb);
4309 MacroAssembler masm(&buf);
4310
4311 #define __ masm.
4312
4313 address start = __ pc();
4314 Register pre_val;
4315
4316 Label refill, restart;
4317 if (with_frame) {
4318 __ save_frame(0);
4319 pre_val = I0; // Was O0 before the save.
4320 } else {
4321 pre_val = O0;
4322 }
4323
4324 int satb_q_index_byte_offset =
4325 in_bytes(JavaThread::satb_mark_queue_offset() +
4326 PtrQueue::byte_offset_of_index());
4327
4328 int satb_q_buf_byte_offset =
4329 in_bytes(JavaThread::satb_mark_queue_offset() +
4330 PtrQueue::byte_offset_of_buf());
4331
4332 assert(in_bytes(PtrQueue::byte_width_of_index()) == sizeof(intptr_t) &&
4333 in_bytes(PtrQueue::byte_width_of_buf()) == sizeof(intptr_t),
4334 "check sizes in assembly below");
4335
4336 __ bind(restart);
4337
4338 // Load the index into the SATB buffer. PtrQueue::_index is a size_t
4339 // so ld_ptr is appropriate.
4340 __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0);
4341
4342 // index == 0?
4343 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill);
4344
4345 __ ld_ptr(G2_thread, satb_q_buf_byte_offset, L1);
4346 __ sub(L0, oopSize, L0);
4347
4348 __ st_ptr(pre_val, L1, L0); // [_buf + index] := I0
4349 if (!with_frame) {
4350 // Use return-from-leaf
4351 __ retl();
4352 __ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset);
4353 } else {
4354 // Not delayed.
4355 __ st_ptr(L0, G2_thread, satb_q_index_byte_offset);
4356 }
4357 if (with_frame) {
4358 __ ret();
4359 __ delayed()->restore();
4360 }
4361 __ bind(refill);
4362
4363 address handle_zero =
4364 CAST_FROM_FN_PTR(address,
4365 &SATBMarkQueueSet::handle_zero_index_for_thread);
4366 // This should be rare enough that we can afford to save all the
4367 // scratch registers that the calling context might be using.
4368 __ mov(G1_scratch, L0);
4369 __ mov(G3_scratch, L1);
4370 __ mov(G4, L2);
4371 // We need the value of O0 above (for the write into the buffer), so we
4372 // save and restore it.
4373 __ mov(O0, L3);
4374 // Since the call will overwrite O7, we save and restore that, as well.
4375 __ mov(O7, L4);
4376 __ call_VM_leaf(L5, handle_zero, G2_thread);
4377 __ mov(L0, G1_scratch);
4378 __ mov(L1, G3_scratch);
4379 __ mov(L2, G4);
4380 __ mov(L3, O0);
4381 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
4382 __ delayed()->mov(L4, O7);
4383
4384 if (with_frame) {
4385 satb_log_enqueue_with_frame = start;
4386 satb_log_enqueue_with_frame_end = __ pc();
4387 } else {
4388 satb_log_enqueue_frameless = start;
4389 satb_log_enqueue_frameless_end = __ pc();
4390 }
4391
4392 #undef __
4393 }
4394
4395 static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) {
4396 if (with_frame) {
4397 if (satb_log_enqueue_with_frame == 0) {
4398 generate_satb_log_enqueue(with_frame);
4399 assert(satb_log_enqueue_with_frame != 0, "postcondition.");
4400 if (G1SATBPrintStubs) {
4401 tty->print_cr("Generated with-frame satb enqueue:");
4402 Disassembler::decode((u_char*)satb_log_enqueue_with_frame,
4403 satb_log_enqueue_with_frame_end,
4404 tty);
4405 }
4406 }
4407 } else {
4408 if (satb_log_enqueue_frameless == 0) {
4409 generate_satb_log_enqueue(with_frame);
4410 assert(satb_log_enqueue_frameless != 0, "postcondition.");
4411 if (G1SATBPrintStubs) {
4412 tty->print_cr("Generated frameless satb enqueue:");
4413 Disassembler::decode((u_char*)satb_log_enqueue_frameless,
4414 satb_log_enqueue_frameless_end,
4415 tty);
4416 }
4417 }
4418 }
4419 }
4420
4421 void MacroAssembler::g1_write_barrier_pre(Register obj,
4422 Register index,
4423 int offset,
4424 Register pre_val,
4425 Register tmp,
4426 bool preserve_o_regs) {
4427 Label filtered;
4428
4429 if (obj == noreg) {
4430 // We are not loading the previous value so make
4431 // sure that we don't trash the value in pre_val
4432 // with the code below.
4433 assert_different_registers(pre_val, tmp);
4434 } else {
4435 // We will be loading the previous value
4436 // in this code so...
4437 assert(offset == 0 || index == noreg, "choose one");
4438 assert(pre_val == noreg, "check this code");
4439 }
4440
4441 // Is marking active?
4442 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
4443 ld(G2,
4444 in_bytes(JavaThread::satb_mark_queue_offset() +
4445 PtrQueue::byte_offset_of_active()),
4446 tmp);
4447 } else {
4448 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
4449 "Assumption");
4450 ldsb(G2,
4451 in_bytes(JavaThread::satb_mark_queue_offset() +
4452 PtrQueue::byte_offset_of_active()),
4453 tmp);
4454 }
4455
4456 // Is marking active?
4457 cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
4458
4459 // Do we need to load the previous value?
4460 if (obj != noreg) {
4461 // Load the previous value...
4462 if (index == noreg) {
4463 if (Assembler::is_simm13(offset)) {
4464 load_heap_oop(obj, offset, tmp);
4465 } else {
4466 set(offset, tmp);
4467 load_heap_oop(obj, tmp, tmp);
4468 }
4469 } else {
4470 load_heap_oop(obj, index, tmp);
4471 }
4472 // Previous value has been loaded into tmp
4473 pre_val = tmp;
4474 }
4475
4476 assert(pre_val != noreg, "must have a real register");
4477
4478 // Is the previous value null?
4479 cmp_and_brx_short(pre_val, G0, Assembler::equal, Assembler::pt, filtered);
4480
4481 // OK, it's not filtered, so we'll need to call enqueue. In the normal
4482 // case, pre_val will be a scratch G-reg, but there are some cases in
4483 // which it's an O-reg. In the first case, do a normal call. In the
4484 // latter, do a save here and call the frameless version.
4485
4486 guarantee(pre_val->is_global() || pre_val->is_out(),
4487 "Or we need to think harder.");
4488
4489 if (pre_val->is_global() && !preserve_o_regs) {
4490 generate_satb_log_enqueue_if_necessary(true); // with frame
4491
4492 call(satb_log_enqueue_with_frame);
4493 delayed()->mov(pre_val, O0);
4494 } else {
4495 generate_satb_log_enqueue_if_necessary(false); // frameless
4496
4497 save_frame(0);
4498 call(satb_log_enqueue_frameless);
4499 delayed()->mov(pre_val->after_save(), O0);
4500 restore();
4501 }
4502
4503 bind(filtered);
4504 }
4505
4506 static address dirty_card_log_enqueue = 0;
4507 static u_char* dirty_card_log_enqueue_end = 0;
4508
4509 // This gets to assume that o0 contains the object address.
4510 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
4511 BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2);
4512 CodeBuffer buf(bb);
4513 MacroAssembler masm(&buf);
4514 #define __ masm.
4515 address start = __ pc();
4516
4517 Label not_already_dirty, restart, refill;
4518
4519 #ifdef _LP64
4520 __ srlx(O0, CardTableModRefBS::card_shift, O0);
4521 #else
4522 __ srl(O0, CardTableModRefBS::card_shift, O0);
4523 #endif
4524 AddressLiteral addrlit(byte_map_base);
4525 __ set(addrlit, O1); // O1 := <card table base>
4526 __ ldub(O0, O1, O2); // O2 := [O0 + O1]
4527
4528 assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
4529 __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
4530
4531 // We didn't take the branch, so we're already dirty: return.
4532 // Use return-from-leaf
4533 __ retl();
4534 __ delayed()->nop();
4535
4536 // Not dirty.
4537 __ bind(not_already_dirty);
4538
4539 // Get O0 + O1 into a reg by itself
4540 __ add(O0, O1, O3);
4541
4542 // First, dirty it.
4543 __ stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty).
4544
4545 int dirty_card_q_index_byte_offset =
4546 in_bytes(JavaThread::dirty_card_queue_offset() +
4547 PtrQueue::byte_offset_of_index());
4548 int dirty_card_q_buf_byte_offset =
4549 in_bytes(JavaThread::dirty_card_queue_offset() +
4550 PtrQueue::byte_offset_of_buf());
4551 __ bind(restart);
4552
4553 // Load the index into the update buffer. PtrQueue::_index is
4554 // a size_t so ld_ptr is appropriate here.
4555 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0);
4556
4557 // index == 0?
4558 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill);
4559
4560 __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1);
4561 __ sub(L0, oopSize, L0);
4562
4563 __ st_ptr(O3, L1, L0); // [_buf + index] := I0
4564 // Use return-from-leaf
4565 __ retl();
4566 __ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset);
4567
4568 __ bind(refill);
4569 address handle_zero =
4570 CAST_FROM_FN_PTR(address,
4571 &DirtyCardQueueSet::handle_zero_index_for_thread);
4572 // This should be rare enough that we can afford to save all the
4573 // scratch registers that the calling context might be using.
4574 __ mov(G1_scratch, L3);
4575 __ mov(G3_scratch, L5);
4576 // We need the value of O3 above (for the write into the buffer), so we
4577 // save and restore it.
4578 __ mov(O3, L6);
4579 // Since the call will overwrite O7, we save and restore that, as well.
4580 __ mov(O7, L4);
4581
4582 __ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread);
4583 __ mov(L3, G1_scratch);
4584 __ mov(L5, G3_scratch);
4585 __ mov(L6, O3);
4586 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
4587 __ delayed()->mov(L4, O7);
4588
4589 dirty_card_log_enqueue = start;
4590 dirty_card_log_enqueue_end = __ pc();
4591 // XXX Should have a guarantee here about not going off the end!
4592 // Does it already do so? Do an experiment...
4593
4594 #undef __
4595
4596 }
4597
4598 static inline void
4599 generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) {
4600 if (dirty_card_log_enqueue == 0) {
4601 generate_dirty_card_log_enqueue(byte_map_base);
4602 assert(dirty_card_log_enqueue != 0, "postcondition.");
4603 if (G1SATBPrintStubs) {
4604 tty->print_cr("Generated dirty_card enqueue:");
4605 Disassembler::decode((u_char*)dirty_card_log_enqueue,
4606 dirty_card_log_enqueue_end,
4607 tty);
4608 }
4609 }
4610 }
4611
4612
4613 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
4614
4615 Label filtered;
4616 MacroAssembler* post_filter_masm = this;
4617
4618 if (new_val == G0) return;
4619
4620 G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
4621 assert(bs->kind() == BarrierSet::G1SATBCT ||
4622 bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
4623
4624 if (G1RSBarrierRegionFilter) {
4625 xor3(store_addr, new_val, tmp);
4626 #ifdef _LP64
4627 srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
4628 #else
4629 srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
4630 #endif
4631
4632 // XXX Should I predict this taken or not? Does it matter?
4633 cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
4634 }
4635
4636 // If the "store_addr" register is an "in" or "local" register, move it to
4637 // a scratch reg so we can pass it as an argument.
4638 bool use_scr = !(store_addr->is_global() || store_addr->is_out());
4639 // Pick a scratch register different from "tmp".
4640 Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch);
4641 // Make sure we use up the delay slot!
4642 if (use_scr) {
4643 post_filter_masm->mov(store_addr, scr);
4644 } else {
4645 post_filter_masm->nop();
4646 }
4647 generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base);
4648 save_frame(0);
4649 call(dirty_card_log_enqueue);
4650 if (use_scr) {
4651 delayed()->mov(scr, O0);
4652 } else {
4653 delayed()->mov(store_addr->after_save(), O0);
4654 }
4655 restore();
4656
4657 bind(filtered);
4658 }
4659
4660 #endif // SERIALGC
4661 ///////////////////////////////////////////////////////////////////////////////////
4662
4663 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
4664 // If we're writing constant NULL, we can skip the write barrier.
4665 if (new_val == G0) return;
4666 CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
4667 assert(bs->kind() == BarrierSet::CardTableModRef ||
4668 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
4669 card_table_write(bs->byte_map_base, tmp, store_addr);
4670 }
4671
4672 void MacroAssembler::load_klass(Register src_oop, Register klass) {
4673 // The number of bytes in this code is used by
4674 // MachCallDynamicJavaNode::ret_addr_offset()
4675 // if this changes, change that.
4676 if (UseCompressedKlassPointers) {
4677 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
4678 decode_klass_not_null(klass);
4679 } else {
4680 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass);
4681 }
4682 }
4683
4684 void MacroAssembler::store_klass(Register klass, Register dst_oop) {
4685 if (UseCompressedKlassPointers) {
4686 assert(dst_oop != klass, "not enough registers");
4687 encode_klass_not_null(klass);
4688 st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
4689 } else {
4690 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes());
4691 }
4692 }
4693
4694 void MacroAssembler::store_klass_gap(Register s, Register d) {
4695 if (UseCompressedKlassPointers) {
4696 assert(s != d, "not enough registers");
4697 st(s, d, oopDesc::klass_gap_offset_in_bytes());
4698 }
4699 }
4700
4701 void MacroAssembler::load_heap_oop(const Address& s, Register d) {
4702 if (UseCompressedOops) {
4703 lduw(s, d);
4704 decode_heap_oop(d);
4705 } else {
4706 ld_ptr(s, d);
4707 }
4708 }
4709
4710 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) {
4711 if (UseCompressedOops) {
4712 lduw(s1, s2, d);
4713 decode_heap_oop(d, d);
4714 } else {
4715 ld_ptr(s1, s2, d);
4716 }
4717 }
4718
4719 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) {
4720 if (UseCompressedOops) {
4721 lduw(s1, simm13a, d);
4722 decode_heap_oop(d, d);
4723 } else {
4724 ld_ptr(s1, simm13a, d);
4725 }
4726 }
4727
4728 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) {
4729 if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d);
4730 else load_heap_oop(s1, s2.as_register(), d);
4731 }
4732
4733 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) {
4734 if (UseCompressedOops) {
4735 assert(s1 != d && s2 != d, "not enough registers");
4736 encode_heap_oop(d);
4737 st(d, s1, s2);
4738 } else {
4739 st_ptr(d, s1, s2);
4740 }
4741 }
4742
4743 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) {
4744 if (UseCompressedOops) {
4745 assert(s1 != d, "not enough registers");
4746 encode_heap_oop(d);
4747 st(d, s1, simm13a);
4748 } else {
4749 st_ptr(d, s1, simm13a);
4750 }
4751 }
4752
4753 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) {
4754 if (UseCompressedOops) {
4755 assert(a.base() != d, "not enough registers");
4756 encode_heap_oop(d);
4757 st(d, a, offset);
4758 } else {
4759 st_ptr(d, a, offset);
4760 }
4761 }
4762
4763
4764 void MacroAssembler::encode_heap_oop(Register src, Register dst) {
4765 assert (UseCompressedOops, "must be compressed");
4766 assert (Universe::heap() != NULL, "java heap should be initialized");
4767 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4768 verify_oop(src);
4769 if (Universe::narrow_oop_base() == NULL) {
4770 srlx(src, LogMinObjAlignmentInBytes, dst);
4771 return;
4772 }
4773 Label done;
4774 if (src == dst) {
4775 // optimize for frequent case src == dst
4776 bpr(rc_nz, true, Assembler::pt, src, done);
4777 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken
4778 bind(done);
4779 srlx(src, LogMinObjAlignmentInBytes, dst);
4780 } else {
4781 bpr(rc_z, false, Assembler::pn, src, done);
4782 delayed() -> mov(G0, dst);
4783 // could be moved before branch, and annulate delay,
4784 // but may add some unneeded work decoding null
4785 sub(src, G6_heapbase, dst);
4786 srlx(dst, LogMinObjAlignmentInBytes, dst);
4787 bind(done);
4788 }
4789 }
4790
4791
4792 void MacroAssembler::encode_heap_oop_not_null(Register r) {
4793 assert (UseCompressedOops, "must be compressed");
4794 assert (Universe::heap() != NULL, "java heap should be initialized");
4795 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4796 verify_oop(r);
4797 if (Universe::narrow_oop_base() != NULL)
4798 sub(r, G6_heapbase, r);
4799 srlx(r, LogMinObjAlignmentInBytes, r);
4800 }
4801
4802 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) {
4803 assert (UseCompressedOops, "must be compressed");
4804 assert (Universe::heap() != NULL, "java heap should be initialized");
4805 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4806 verify_oop(src);
4807 if (Universe::narrow_oop_base() == NULL) {
4808 srlx(src, LogMinObjAlignmentInBytes, dst);
4809 } else {
4810 sub(src, G6_heapbase, dst);
4811 srlx(dst, LogMinObjAlignmentInBytes, dst);
4812 }
4813 }
4814
4815 // Same algorithm as oops.inline.hpp decode_heap_oop.
4816 void MacroAssembler::decode_heap_oop(Register src, Register dst) {
4817 assert (UseCompressedOops, "must be compressed");
4818 assert (Universe::heap() != NULL, "java heap should be initialized");
4819 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4820 sllx(src, LogMinObjAlignmentInBytes, dst);
4821 if (Universe::narrow_oop_base() != NULL) {
4822 Label done;
4823 bpr(rc_nz, true, Assembler::pt, dst, done);
4824 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken
4825 bind(done);
4826 }
4827 verify_oop(dst);
4828 }
4829
4830 void MacroAssembler::decode_heap_oop_not_null(Register r) {
4831 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4832 // pd_code_size_limit.
4833 // Also do not verify_oop as this is called by verify_oop.
4834 assert (UseCompressedOops, "must be compressed");
4835 assert (Universe::heap() != NULL, "java heap should be initialized");
4836 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4837 sllx(r, LogMinObjAlignmentInBytes, r);
4838 if (Universe::narrow_oop_base() != NULL)
4839 add(r, G6_heapbase, r);
4840 }
4841
4842 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
4843 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4844 // pd_code_size_limit.
4845 // Also do not verify_oop as this is called by verify_oop.
4846 assert (UseCompressedOops, "must be compressed");
4847 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4848 sllx(src, LogMinObjAlignmentInBytes, dst);
4849 if (Universe::narrow_oop_base() != NULL)
4850 add(dst, G6_heapbase, dst);
4851 }
4852
4853 void MacroAssembler::encode_klass_not_null(Register r) {
4854 assert(Metaspace::is_initialized(), "metaspace should be initialized");
4855 assert (UseCompressedKlassPointers, "must be compressed");
4856 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
4857 if (Universe::narrow_klass_base() != NULL)
4858 sub(r, G6_heapbase, r);
4859 srlx(r, LogKlassAlignmentInBytes, r);
4860 }
4861
4862 void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
4863 assert(Metaspace::is_initialized(), "metaspace should be initialized");
4864 assert (UseCompressedKlassPointers, "must be compressed");
4865 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
4866 if (Universe::narrow_klass_base() == NULL) {
4867 srlx(src, LogKlassAlignmentInBytes, dst);
4868 } else {
4869 sub(src, G6_heapbase, dst);
4870 srlx(dst, LogKlassAlignmentInBytes, dst);
4871 }
4872 }
4873
4874 void MacroAssembler::decode_klass_not_null(Register r) {
4875 assert(Metaspace::is_initialized(), "metaspace should be initialized");
4876 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4877 // pd_code_size_limit.
4878 assert (UseCompressedKlassPointers, "must be compressed");
4879 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
4880 sllx(r, LogKlassAlignmentInBytes, r);
4881 if (Universe::narrow_klass_base() != NULL)
4882 add(r, G6_heapbase, r);
4883 }
4884
4885 void MacroAssembler::decode_klass_not_null(Register src, Register dst) {
4886 assert(Metaspace::is_initialized(), "metaspace should be initialized");
4887 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4888 // pd_code_size_limit.
4889 assert (UseCompressedKlassPointers, "must be compressed");
4890 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
4891 sllx(src, LogKlassAlignmentInBytes, dst);
4892 if (Universe::narrow_klass_base() != NULL)
4893 add(dst, G6_heapbase, dst);
4894 }
4895
4896 void MacroAssembler::reinit_heapbase() {
4897 if (UseCompressedOops || UseCompressedKlassPointers) {
4898 AddressLiteral base(Universe::narrow_ptrs_base_addr());
4899 load_ptr_contents(base, G6_heapbase);
4900 }
4901 }
4902
4903 // Compare char[] arrays aligned to 4 bytes.
4904 void MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
4905 Register limit, Register result,
4906 Register chr1, Register chr2, Label& Ldone) {
4907 Label Lvector, Lloop;
4908 assert(chr1 == result, "should be the same");
4909
4910 // Note: limit contains number of bytes (2*char_elements) != 0.
4911 andcc(limit, 0x2, chr1); // trailing character ?
4912 br(Assembler::zero, false, Assembler::pt, Lvector);
4913 delayed()->nop();
4914
4915 // compare the trailing char
4916 sub(limit, sizeof(jchar), limit);
4917 lduh(ary1, limit, chr1);
4918 lduh(ary2, limit, chr2);
4919 cmp(chr1, chr2);
4920 br(Assembler::notEqual, true, Assembler::pt, Ldone);
4921 delayed()->mov(G0, result); // not equal
4922
4923 // only one char ?
4924 cmp_zero_and_br(zero, limit, Ldone, true, Assembler::pn);
4925 delayed()->add(G0, 1, result); // zero-length arrays are equal
4926
4927 // word by word compare, dont't need alignment check
4928 bind(Lvector);
4929 // Shift ary1 and ary2 to the end of the arrays, negate limit
4930 add(ary1, limit, ary1);
4931 add(ary2, limit, ary2);
4932 neg(limit, limit);
4933
4934 lduw(ary1, limit, chr1);
4935 bind(Lloop);
4936 lduw(ary2, limit, chr2);
4937 cmp(chr1, chr2);
4938 br(Assembler::notEqual, true, Assembler::pt, Ldone);
4939 delayed()->mov(G0, result); // not equal
4940 inccc(limit, 2*sizeof(jchar));
4941 // annul LDUW if branch is not taken to prevent access past end of array
4942 br(Assembler::notZero, true, Assembler::pt, Lloop);
4943 delayed()->lduw(ary1, limit, chr1); // hoisted
4944
4945 // Caller should set it:
4946 // add(G0, 1, result); // equals
4947 }
4948
4949 // Use BIS for zeroing (count is in bytes).
4950 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) {
4951 assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing");
4952 Register end = count;
4953 int cache_line_size = VM_Version::prefetch_data_size();
4954 // Minimum count when BIS zeroing can be used since
4955 // it needs membar which is expensive.
4956 int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit);
4957
4958 Label small_loop;
4959 // Check if count is negative (dead code) or zero.
4960 // Note, count uses 64bit in 64 bit VM.
4961 cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone);
4962
4963 // Use BIS zeroing only for big arrays since it requires membar.
4964 if (Assembler::is_simm13(block_zero_size)) { // < 4096
4965 cmp(count, block_zero_size);
4966 } else {
4967 set(block_zero_size, temp);
4968 cmp(count, temp);
4969 }
4970 br(Assembler::lessUnsigned, false, Assembler::pt, small_loop);
4971 delayed()->add(to, count, end);
4972
4973 // Note: size is >= three (32 bytes) cache lines.
4974
4975 // Clean the beginning of space up to next cache line.
4976 for (int offs = 0; offs < cache_line_size; offs += 8) {
4977 stx(G0, to, offs);
4978 }
4979
4980 // align to next cache line
4981 add(to, cache_line_size, to);
4982 and3(to, -cache_line_size, to);
4983
4984 // Note: size left >= two (32 bytes) cache lines.
4985
4986 // BIS should not be used to zero tail (64 bytes)
4987 // to avoid zeroing a header of the following object.
4988 sub(end, (cache_line_size*2)-8, end);
4989
4990 Label bis_loop;
4991 bind(bis_loop);
4992 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
4993 add(to, cache_line_size, to);
4994 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop);
4995
4996 // BIS needs membar.
4997 membar(Assembler::StoreLoad);
4998
4999 add(end, (cache_line_size*2)-8, end); // restore end
5000 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone);
5001
5002 // Clean the tail.
5003 bind(small_loop);
5004 stx(G0, to, 0);
5005 add(to, 8, to);
5006 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop);
5007 nop(); // Separate short branches
5008 }