Mercurial > hg > graal-compiler
annotate src/cpu/sparc/vm/assembler_sparc.cpp @ 238:3df2fe7c4451
Merge
author | trims |
---|---|
date | Fri, 25 Jul 2008 11:29:03 -0700 |
parents | d1605aabd0a1 |
children | 1ee8caae33af |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_assembler_sparc.cpp.incl" | |
27 | |
28 // Implementation of Address | |
29 | |
30 Address::Address( addr_type t, int which ) { | |
31 switch (t) { | |
32 case extra_in_argument: | |
33 case extra_out_argument: | |
34 _base = t == extra_in_argument ? FP : SP; | |
35 _hi = 0; | |
36 // Warning: In LP64 mode, _disp will occupy more than 10 bits. | |
37 // This is inconsistent with the other constructors but op | |
38 // codes such as ld or ldx, only access disp() to get their | |
39 // simm13 argument. | |
40 _disp = ((which - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; | |
41 break; | |
42 default: | |
43 ShouldNotReachHere(); | |
44 break; | |
45 } | |
46 } | |
47 | |
48 static const char* argumentNames[][2] = { | |
49 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, | |
50 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, | |
51 {"A(n>9)","P(n>9)"} | |
52 }; | |
53 | |
54 const char* Argument::name() const { | |
55 int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; | |
56 int num = number(); | |
57 if (num >= nofArgs) num = nofArgs - 1; | |
58 return argumentNames[num][is_in() ? 1 : 0]; | |
59 } | |
60 | |
61 void Assembler::print_instruction(int inst) { | |
62 const char* s; | |
63 switch (inv_op(inst)) { | |
64 default: s = "????"; break; | |
65 case call_op: s = "call"; break; | |
66 case branch_op: | |
67 switch (inv_op2(inst)) { | |
68 case bpr_op2: s = "bpr"; break; | |
69 case fb_op2: s = "fb"; break; | |
70 case fbp_op2: s = "fbp"; break; | |
71 case br_op2: s = "br"; break; | |
72 case bp_op2: s = "bp"; break; | |
73 case cb_op2: s = "cb"; break; | |
74 default: s = "????"; break; | |
75 } | |
76 } | |
77 ::tty->print("%s", s); | |
78 } | |
79 | |
80 | |
81 // Patch instruction inst at offset inst_pos to refer to dest_pos | |
82 // and return the resulting instruction. | |
83 // We should have pcs, not offsets, but since all is relative, it will work out | |
84 // OK. | |
85 int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) { | |
86 | |
87 int m; // mask for displacement field | |
88 int v; // new value for displacement field | |
89 const int word_aligned_ones = -4; | |
90 switch (inv_op(inst)) { | |
91 default: ShouldNotReachHere(); | |
92 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; | |
93 case branch_op: | |
94 switch (inv_op2(inst)) { | |
95 case bpr_op2: m = wdisp16(word_aligned_ones, 0); v = wdisp16(dest_pos, inst_pos); break; | |
96 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; | |
97 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; | |
98 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; | |
99 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; | |
100 case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; | |
101 default: ShouldNotReachHere(); | |
102 } | |
103 } | |
104 return inst & ~m | v; | |
105 } | |
106 | |
107 // Return the offset of the branch destionation of instruction inst | |
108 // at offset pos. | |
109 // Should have pcs, but since all is relative, it works out. | |
110 int Assembler::branch_destination(int inst, int pos) { | |
111 int r; | |
112 switch (inv_op(inst)) { | |
113 default: ShouldNotReachHere(); | |
114 case call_op: r = inv_wdisp(inst, pos, 30); break; | |
115 case branch_op: | |
116 switch (inv_op2(inst)) { | |
117 case bpr_op2: r = inv_wdisp16(inst, pos); break; | |
118 case fbp_op2: r = inv_wdisp( inst, pos, 19); break; | |
119 case bp_op2: r = inv_wdisp( inst, pos, 19); break; | |
120 case fb_op2: r = inv_wdisp( inst, pos, 22); break; | |
121 case br_op2: r = inv_wdisp( inst, pos, 22); break; | |
122 case cb_op2: r = inv_wdisp( inst, pos, 22); break; | |
123 default: ShouldNotReachHere(); | |
124 } | |
125 } | |
126 return r; | |
127 } | |
128 | |
129 int AbstractAssembler::code_fill_byte() { | |
130 return 0x00; // illegal instruction 0x00000000 | |
131 } | |
132 | |
133 // Generate a bunch 'o stuff (including v9's | |
134 #ifndef PRODUCT | |
135 void Assembler::test_v9() { | |
136 add( G0, G1, G2 ); | |
137 add( G3, 0, G4 ); | |
138 | |
139 addcc( G5, G6, G7 ); | |
140 addcc( I0, 1, I1 ); | |
141 addc( I2, I3, I4 ); | |
142 addc( I5, -1, I6 ); | |
143 addccc( I7, L0, L1 ); | |
144 addccc( L2, (1 << 12) - 2, L3 ); | |
145 | |
146 Label lbl1, lbl2, lbl3; | |
147 | |
148 bind(lbl1); | |
149 | |
150 bpr( rc_z, true, pn, L4, pc(), relocInfo::oop_type ); | |
151 delayed()->nop(); | |
152 bpr( rc_lez, false, pt, L5, lbl1); | |
153 delayed()->nop(); | |
154 | |
155 fb( f_never, true, pc() + 4, relocInfo::none); | |
156 delayed()->nop(); | |
157 fb( f_notEqual, false, lbl2 ); | |
158 delayed()->nop(); | |
159 | |
160 fbp( f_notZero, true, fcc0, pn, pc() - 4, relocInfo::none); | |
161 delayed()->nop(); | |
162 fbp( f_lessOrGreater, false, fcc1, pt, lbl3 ); | |
163 delayed()->nop(); | |
164 | |
165 br( equal, true, pc() + 1024, relocInfo::none); | |
166 delayed()->nop(); | |
167 br( lessEqual, false, lbl1 ); | |
168 delayed()->nop(); | |
169 br( never, false, lbl1 ); | |
170 delayed()->nop(); | |
171 | |
172 bp( less, true, icc, pn, pc(), relocInfo::none); | |
173 delayed()->nop(); | |
174 bp( lessEqualUnsigned, false, xcc, pt, lbl2 ); | |
175 delayed()->nop(); | |
176 | |
177 call( pc(), relocInfo::none); | |
178 delayed()->nop(); | |
179 call( lbl3 ); | |
180 delayed()->nop(); | |
181 | |
182 | |
183 casa( L6, L7, O0 ); | |
184 casxa( O1, O2, O3, 0 ); | |
185 | |
186 udiv( O4, O5, O7 ); | |
187 udiv( G0, (1 << 12) - 1, G1 ); | |
188 sdiv( G1, G2, G3 ); | |
189 sdiv( G4, -((1 << 12) - 1), G5 ); | |
190 udivcc( G6, G7, I0 ); | |
191 udivcc( I1, -((1 << 12) - 2), I2 ); | |
192 sdivcc( I3, I4, I5 ); | |
193 sdivcc( I6, -((1 << 12) - 0), I7 ); | |
194 | |
195 done(); | |
196 retry(); | |
197 | |
198 fadd( FloatRegisterImpl::S, F0, F1, F2 ); | |
199 fsub( FloatRegisterImpl::D, F34, F0, F62 ); | |
200 | |
201 fcmp( FloatRegisterImpl::Q, fcc0, F0, F60); | |
202 fcmpe( FloatRegisterImpl::S, fcc1, F31, F30); | |
203 | |
204 ftox( FloatRegisterImpl::D, F2, F4 ); | |
205 ftoi( FloatRegisterImpl::Q, F4, F8 ); | |
206 | |
207 ftof( FloatRegisterImpl::S, FloatRegisterImpl::Q, F3, F12 ); | |
208 | |
209 fxtof( FloatRegisterImpl::S, F4, F5 ); | |
210 fitof( FloatRegisterImpl::D, F6, F8 ); | |
211 | |
212 fmov( FloatRegisterImpl::Q, F16, F20 ); | |
213 fneg( FloatRegisterImpl::S, F6, F7 ); | |
214 fabs( FloatRegisterImpl::D, F10, F12 ); | |
215 | |
216 fmul( FloatRegisterImpl::Q, F24, F28, F32 ); | |
217 fmul( FloatRegisterImpl::S, FloatRegisterImpl::D, F8, F9, F14 ); | |
218 fdiv( FloatRegisterImpl::S, F10, F11, F12 ); | |
219 | |
220 fsqrt( FloatRegisterImpl::S, F13, F14 ); | |
221 | |
222 flush( L0, L1 ); | |
223 flush( L2, -1 ); | |
224 | |
225 flushw(); | |
226 | |
227 illtrap( (1 << 22) - 2); | |
228 | |
229 impdep1( 17, (1 << 19) - 1 ); | |
230 impdep2( 3, 0 ); | |
231 | |
232 jmpl( L3, L4, L5 ); | |
233 delayed()->nop(); | |
234 jmpl( L6, -1, L7, Relocation::spec_simple(relocInfo::none)); | |
235 delayed()->nop(); | |
236 | |
237 | |
238 ldf( FloatRegisterImpl::S, O0, O1, F15 ); | |
239 ldf( FloatRegisterImpl::D, O2, -1, F14 ); | |
240 | |
241 | |
242 ldfsr( O3, O4 ); | |
243 ldfsr( O5, -1 ); | |
244 ldxfsr( O6, O7 ); | |
245 ldxfsr( I0, -1 ); | |
246 | |
247 ldfa( FloatRegisterImpl::D, I1, I2, 1, F16 ); | |
248 ldfa( FloatRegisterImpl::Q, I3, -1, F36 ); | |
249 | |
250 ldsb( I4, I5, I6 ); | |
251 ldsb( I7, -1, G0 ); | |
252 ldsh( G1, G3, G4 ); | |
253 ldsh( G5, -1, G6 ); | |
254 ldsw( G7, L0, L1 ); | |
255 ldsw( L2, -1, L3 ); | |
256 ldub( L4, L5, L6 ); | |
257 ldub( L7, -1, O0 ); | |
258 lduh( O1, O2, O3 ); | |
259 lduh( O4, -1, O5 ); | |
260 lduw( O6, O7, G0 ); | |
261 lduw( G1, -1, G2 ); | |
262 ldx( G3, G4, G5 ); | |
263 ldx( G6, -1, G7 ); | |
264 ldd( I0, I1, I2 ); | |
265 ldd( I3, -1, I4 ); | |
266 | |
267 ldsba( I5, I6, 2, I7 ); | |
268 ldsba( L0, -1, L1 ); | |
269 ldsha( L2, L3, 3, L4 ); | |
270 ldsha( L5, -1, L6 ); | |
271 ldswa( L7, O0, (1 << 8) - 1, O1 ); | |
272 ldswa( O2, -1, O3 ); | |
273 lduba( O4, O5, 0, O6 ); | |
274 lduba( O7, -1, I0 ); | |
275 lduha( I1, I2, 1, I3 ); | |
276 lduha( I4, -1, I5 ); | |
277 lduwa( I6, I7, 2, L0 ); | |
278 lduwa( L1, -1, L2 ); | |
279 ldxa( L3, L4, 3, L5 ); | |
280 ldxa( L6, -1, L7 ); | |
281 ldda( G0, G1, 4, G2 ); | |
282 ldda( G3, -1, G4 ); | |
283 | |
284 ldstub( G5, G6, G7 ); | |
285 ldstub( O0, -1, O1 ); | |
286 | |
287 ldstuba( O2, O3, 5, O4 ); | |
288 ldstuba( O5, -1, O6 ); | |
289 | |
290 and3( I0, L0, O0 ); | |
291 and3( G7, -1, O7 ); | |
292 andcc( L2, I2, G2 ); | |
293 andcc( L4, -1, G4 ); | |
294 andn( I5, I6, I7 ); | |
295 andn( I6, -1, I7 ); | |
296 andncc( I5, I6, I7 ); | |
297 andncc( I7, -1, I6 ); | |
298 or3( I5, I6, I7 ); | |
299 or3( I7, -1, I6 ); | |
300 orcc( I5, I6, I7 ); | |
301 orcc( I7, -1, I6 ); | |
302 orn( I5, I6, I7 ); | |
303 orn( I7, -1, I6 ); | |
304 orncc( I5, I6, I7 ); | |
305 orncc( I7, -1, I6 ); | |
306 xor3( I5, I6, I7 ); | |
307 xor3( I7, -1, I6 ); | |
308 xorcc( I5, I6, I7 ); | |
309 xorcc( I7, -1, I6 ); | |
310 xnor( I5, I6, I7 ); | |
311 xnor( I7, -1, I6 ); | |
312 xnorcc( I5, I6, I7 ); | |
313 xnorcc( I7, -1, I6 ); | |
314 | |
315 membar( Membar_mask_bits(StoreStore | LoadStore | StoreLoad | LoadLoad | Sync | MemIssue | Lookaside ) ); | |
316 membar( StoreStore ); | |
317 membar( LoadStore ); | |
318 membar( StoreLoad ); | |
319 membar( LoadLoad ); | |
320 membar( Sync ); | |
321 membar( MemIssue ); | |
322 membar( Lookaside ); | |
323 | |
324 fmov( FloatRegisterImpl::S, f_ordered, true, fcc2, F16, F17 ); | |
325 fmov( FloatRegisterImpl::D, rc_lz, L5, F18, F20 ); | |
326 | |
327 movcc( overflowClear, false, icc, I6, L4 ); | |
328 movcc( f_unorderedOrEqual, true, fcc2, (1 << 10) - 1, O0 ); | |
329 | |
330 movr( rc_nz, I5, I6, I7 ); | |
331 movr( rc_gz, L1, -1, L2 ); | |
332 | |
333 mulx( I5, I6, I7 ); | |
334 mulx( I7, -1, I6 ); | |
335 sdivx( I5, I6, I7 ); | |
336 sdivx( I7, -1, I6 ); | |
337 udivx( I5, I6, I7 ); | |
338 udivx( I7, -1, I6 ); | |
339 | |
340 umul( I5, I6, I7 ); | |
341 umul( I7, -1, I6 ); | |
342 smul( I5, I6, I7 ); | |
343 smul( I7, -1, I6 ); | |
344 umulcc( I5, I6, I7 ); | |
345 umulcc( I7, -1, I6 ); | |
346 smulcc( I5, I6, I7 ); | |
347 smulcc( I7, -1, I6 ); | |
348 | |
349 mulscc( I5, I6, I7 ); | |
350 mulscc( I7, -1, I6 ); | |
351 | |
352 nop(); | |
353 | |
354 | |
355 popc( G0, G1); | |
356 popc( -1, G2); | |
357 | |
358 prefetch( L1, L2, severalReads ); | |
359 prefetch( L3, -1, oneRead ); | |
360 prefetcha( O3, O2, 6, severalWritesAndPossiblyReads ); | |
361 prefetcha( G2, -1, oneWrite ); | |
362 | |
363 rett( I7, I7); | |
364 delayed()->nop(); | |
365 rett( G0, -1, relocInfo::none); | |
366 delayed()->nop(); | |
367 | |
368 save( I5, I6, I7 ); | |
369 save( I7, -1, I6 ); | |
370 restore( I5, I6, I7 ); | |
371 restore( I7, -1, I6 ); | |
372 | |
373 saved(); | |
374 restored(); | |
375 | |
376 sethi( 0xaaaaaaaa, I3, Relocation::spec_simple(relocInfo::none)); | |
377 | |
378 sll( I5, I6, I7 ); | |
379 sll( I7, 31, I6 ); | |
380 srl( I5, I6, I7 ); | |
381 srl( I7, 0, I6 ); | |
382 sra( I5, I6, I7 ); | |
383 sra( I7, 30, I6 ); | |
384 sllx( I5, I6, I7 ); | |
385 sllx( I7, 63, I6 ); | |
386 srlx( I5, I6, I7 ); | |
387 srlx( I7, 0, I6 ); | |
388 srax( I5, I6, I7 ); | |
389 srax( I7, 62, I6 ); | |
390 | |
391 sir( -1 ); | |
392 | |
393 stbar(); | |
394 | |
395 stf( FloatRegisterImpl::Q, F40, G0, I7 ); | |
396 stf( FloatRegisterImpl::S, F18, I3, -1 ); | |
397 | |
398 stfsr( L1, L2 ); | |
399 stfsr( I7, -1 ); | |
400 stxfsr( I6, I5 ); | |
401 stxfsr( L4, -1 ); | |
402 | |
403 stfa( FloatRegisterImpl::D, F22, I6, I7, 7 ); | |
404 stfa( FloatRegisterImpl::Q, F44, G0, -1 ); | |
405 | |
406 stb( L5, O2, I7 ); | |
407 stb( I7, I6, -1 ); | |
408 sth( L5, O2, I7 ); | |
409 sth( I7, I6, -1 ); | |
410 stw( L5, O2, I7 ); | |
411 stw( I7, I6, -1 ); | |
412 stx( L5, O2, I7 ); | |
413 stx( I7, I6, -1 ); | |
414 std( L5, O2, I7 ); | |
415 std( I7, I6, -1 ); | |
416 | |
417 stba( L5, O2, I7, 8 ); | |
418 stba( I7, I6, -1 ); | |
419 stha( L5, O2, I7, 9 ); | |
420 stha( I7, I6, -1 ); | |
421 stwa( L5, O2, I7, 0 ); | |
422 stwa( I7, I6, -1 ); | |
423 stxa( L5, O2, I7, 11 ); | |
424 stxa( I7, I6, -1 ); | |
425 stda( L5, O2, I7, 12 ); | |
426 stda( I7, I6, -1 ); | |
427 | |
428 sub( I5, I6, I7 ); | |
429 sub( I7, -1, I6 ); | |
430 subcc( I5, I6, I7 ); | |
431 subcc( I7, -1, I6 ); | |
432 subc( I5, I6, I7 ); | |
433 subc( I7, -1, I6 ); | |
434 subccc( I5, I6, I7 ); | |
435 subccc( I7, -1, I6 ); | |
436 | |
437 swap( I5, I6, I7 ); | |
438 swap( I7, -1, I6 ); | |
439 | |
440 swapa( G0, G1, 13, G2 ); | |
441 swapa( I7, -1, I6 ); | |
442 | |
443 taddcc( I5, I6, I7 ); | |
444 taddcc( I7, -1, I6 ); | |
445 taddcctv( I5, I6, I7 ); | |
446 taddcctv( I7, -1, I6 ); | |
447 | |
448 tsubcc( I5, I6, I7 ); | |
449 tsubcc( I7, -1, I6 ); | |
450 tsubcctv( I5, I6, I7 ); | |
451 tsubcctv( I7, -1, I6 ); | |
452 | |
453 trap( overflowClear, xcc, G0, G1 ); | |
454 trap( lessEqual, icc, I7, 17 ); | |
455 | |
456 bind(lbl2); | |
457 bind(lbl3); | |
458 | |
459 code()->decode(); | |
460 } | |
461 | |
462 // Generate a bunch 'o stuff unique to V8 | |
463 void Assembler::test_v8_onlys() { | |
464 Label lbl1; | |
465 | |
466 cb( cp_0or1or2, false, pc() - 4, relocInfo::none); | |
467 delayed()->nop(); | |
468 cb( cp_never, true, lbl1); | |
469 delayed()->nop(); | |
470 | |
471 cpop1(1, 2, 3, 4); | |
472 cpop2(5, 6, 7, 8); | |
473 | |
474 ldc( I0, I1, 31); | |
475 ldc( I2, -1, 0); | |
476 | |
477 lddc( I4, I4, 30); | |
478 lddc( I6, 0, 1 ); | |
479 | |
480 ldcsr( L0, L1, 0); | |
481 ldcsr( L1, (1 << 12) - 1, 17 ); | |
482 | |
483 stc( 31, L4, L5); | |
484 stc( 30, L6, -(1 << 12) ); | |
485 | |
486 stdc( 0, L7, G0); | |
487 stdc( 1, G1, 0 ); | |
488 | |
489 stcsr( 16, G2, G3); | |
490 stcsr( 17, G4, 1 ); | |
491 | |
492 stdcq( 4, G5, G6); | |
493 stdcq( 5, G7, -1 ); | |
494 | |
495 bind(lbl1); | |
496 | |
497 code()->decode(); | |
498 } | |
499 #endif | |
500 | |
501 // Implementation of MacroAssembler | |
502 | |
503 void MacroAssembler::null_check(Register reg, int offset) { | |
504 if (needs_explicit_null_check((intptr_t)offset)) { | |
505 // provoke OS NULL exception if reg = NULL by | |
506 // accessing M[reg] w/o changing any registers | |
507 ld_ptr(reg, 0, G0); | |
508 } | |
509 else { | |
510 // nothing to do, (later) access of M[reg + offset] | |
511 // will provoke OS NULL exception if reg = NULL | |
512 } | |
513 } | |
514 | |
515 // Ring buffer jumps | |
516 | |
517 #ifndef PRODUCT | |
518 void MacroAssembler::ret( bool trace ) { if (trace) { | |
519 mov(I7, O7); // traceable register | |
520 JMP(O7, 2 * BytesPerInstWord); | |
521 } else { | |
522 jmpl( I7, 2 * BytesPerInstWord, G0 ); | |
523 } | |
524 } | |
525 | |
526 void MacroAssembler::retl( bool trace ) { if (trace) JMP(O7, 2 * BytesPerInstWord); | |
527 else jmpl( O7, 2 * BytesPerInstWord, G0 ); } | |
528 #endif /* PRODUCT */ | |
529 | |
530 | |
531 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { | |
532 assert_not_delayed(); | |
533 // This can only be traceable if r1 & r2 are visible after a window save | |
534 if (TraceJumps) { | |
535 #ifndef PRODUCT | |
536 save_frame(0); | |
537 verify_thread(); | |
538 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); | |
539 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); | |
540 sll(O0, exact_log2(4*sizeof(intptr_t)), O2); | |
541 add(O2, O1, O1); | |
542 | |
543 add(r1->after_save(), r2->after_save(), O2); | |
544 set((intptr_t)file, O3); | |
545 set(line, O4); | |
546 Label L; | |
547 // get nearby pc, store jmp target | |
548 call(L, relocInfo::none); // No relocation for call to pc+0x8 | |
549 delayed()->st(O2, O1, 0); | |
550 bind(L); | |
551 | |
552 // store nearby pc | |
553 st(O7, O1, sizeof(intptr_t)); | |
554 // store file | |
555 st(O3, O1, 2*sizeof(intptr_t)); | |
556 // store line | |
557 st(O4, O1, 3*sizeof(intptr_t)); | |
558 add(O0, 1, O0); | |
559 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); | |
560 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); | |
561 restore(); | |
562 #endif /* PRODUCT */ | |
563 } | |
564 jmpl(r1, r2, G0); | |
565 } | |
566 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { | |
567 assert_not_delayed(); | |
568 // This can only be traceable if r1 is visible after a window save | |
569 if (TraceJumps) { | |
570 #ifndef PRODUCT | |
571 save_frame(0); | |
572 verify_thread(); | |
573 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); | |
574 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); | |
575 sll(O0, exact_log2(4*sizeof(intptr_t)), O2); | |
576 add(O2, O1, O1); | |
577 | |
578 add(r1->after_save(), offset, O2); | |
579 set((intptr_t)file, O3); | |
580 set(line, O4); | |
581 Label L; | |
582 // get nearby pc, store jmp target | |
583 call(L, relocInfo::none); // No relocation for call to pc+0x8 | |
584 delayed()->st(O2, O1, 0); | |
585 bind(L); | |
586 | |
587 // store nearby pc | |
588 st(O7, O1, sizeof(intptr_t)); | |
589 // store file | |
590 st(O3, O1, 2*sizeof(intptr_t)); | |
591 // store line | |
592 st(O4, O1, 3*sizeof(intptr_t)); | |
593 add(O0, 1, O0); | |
594 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); | |
595 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); | |
596 restore(); | |
597 #endif /* PRODUCT */ | |
598 } | |
599 jmp(r1, offset); | |
600 } | |
601 | |
602 // This code sequence is relocatable to any address, even on LP64. | |
603 void MacroAssembler::jumpl( Address& a, Register d, int offset, const char* file, int line ) { | |
604 assert_not_delayed(); | |
605 // Force fixed length sethi because NativeJump and NativeFarCall don't handle | |
606 // variable length instruction streams. | |
607 sethi(a, /*ForceRelocatable=*/ true); | |
608 if (TraceJumps) { | |
609 #ifndef PRODUCT | |
610 // Must do the add here so relocation can find the remainder of the | |
611 // value to be relocated. | |
612 add(a.base(), a.disp() + offset, a.base(), a.rspec(offset)); | |
613 save_frame(0); | |
614 verify_thread(); | |
615 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); | |
616 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); | |
617 sll(O0, exact_log2(4*sizeof(intptr_t)), O2); | |
618 add(O2, O1, O1); | |
619 | |
620 set((intptr_t)file, O3); | |
621 set(line, O4); | |
622 Label L; | |
623 | |
624 // get nearby pc, store jmp target | |
625 call(L, relocInfo::none); // No relocation for call to pc+0x8 | |
626 delayed()->st(a.base()->after_save(), O1, 0); | |
627 bind(L); | |
628 | |
629 // store nearby pc | |
630 st(O7, O1, sizeof(intptr_t)); | |
631 // store file | |
632 st(O3, O1, 2*sizeof(intptr_t)); | |
633 // store line | |
634 st(O4, O1, 3*sizeof(intptr_t)); | |
635 add(O0, 1, O0); | |
636 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); | |
637 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); | |
638 restore(); | |
639 jmpl(a.base(), G0, d); | |
640 #else | |
641 jmpl(a, d, offset); | |
642 #endif /* PRODUCT */ | |
643 } else { | |
644 jmpl(a, d, offset); | |
645 } | |
646 } | |
647 | |
648 void MacroAssembler::jump( Address& a, int offset, const char* file, int line ) { | |
649 jumpl( a, G0, offset, file, line ); | |
650 } | |
651 | |
652 | |
653 // Convert to C varargs format | |
654 void MacroAssembler::set_varargs( Argument inArg, Register d ) { | |
655 // spill register-resident args to their memory slots | |
656 // (SPARC calling convention requires callers to have already preallocated these) | |
657 // Note that the inArg might in fact be an outgoing argument, | |
658 // if a leaf routine or stub does some tricky argument shuffling. | |
659 // This routine must work even though one of the saved arguments | |
660 // is in the d register (e.g., set_varargs(Argument(0, false), O0)). | |
661 for (Argument savePtr = inArg; | |
662 savePtr.is_register(); | |
663 savePtr = savePtr.successor()) { | |
664 st_ptr(savePtr.as_register(), savePtr.address_in_frame()); | |
665 } | |
666 // return the address of the first memory slot | |
667 add(inArg.address_in_frame(), d); | |
668 } | |
669 | |
670 // Conditional breakpoint (for assertion checks in assembly code) | |
671 void MacroAssembler::breakpoint_trap(Condition c, CC cc) { | |
672 trap(c, cc, G0, ST_RESERVED_FOR_USER_0); | |
673 } | |
674 | |
675 // We want to use ST_BREAKPOINT here, but the debugger is confused by it. | |
676 void MacroAssembler::breakpoint_trap() { | |
677 trap(ST_RESERVED_FOR_USER_0); | |
678 } | |
679 | |
680 // flush windows (except current) using flushw instruction if avail. | |
681 void MacroAssembler::flush_windows() { | |
682 if (VM_Version::v9_instructions_work()) flushw(); | |
683 else flush_windows_trap(); | |
684 } | |
685 | |
686 // Write serialization page so VM thread can do a pseudo remote membar | |
687 // We use the current thread pointer to calculate a thread specific | |
688 // offset to write to within the page. This minimizes bus traffic | |
689 // due to cache line collision. | |
690 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { | |
691 Address mem_serialize_page(tmp1, os::get_memory_serialize_page()); | |
692 srl(thread, os::get_serialize_page_shift_count(), tmp2); | |
693 if (Assembler::is_simm13(os::vm_page_size())) { | |
694 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); | |
695 } | |
696 else { | |
697 set((os::vm_page_size() - sizeof(int)), tmp1); | |
698 and3(tmp2, tmp1, tmp2); | |
699 } | |
700 load_address(mem_serialize_page); | |
701 st(G0, tmp1, tmp2); | |
702 } | |
703 | |
704 | |
705 | |
706 void MacroAssembler::enter() { | |
707 Unimplemented(); | |
708 } | |
709 | |
710 void MacroAssembler::leave() { | |
711 Unimplemented(); | |
712 } | |
713 | |
714 void MacroAssembler::mult(Register s1, Register s2, Register d) { | |
715 if(VM_Version::v9_instructions_work()) { | |
716 mulx (s1, s2, d); | |
717 } else { | |
718 smul (s1, s2, d); | |
719 } | |
720 } | |
721 | |
722 void MacroAssembler::mult(Register s1, int simm13a, Register d) { | |
723 if(VM_Version::v9_instructions_work()) { | |
724 mulx (s1, simm13a, d); | |
725 } else { | |
726 smul (s1, simm13a, d); | |
727 } | |
728 } | |
729 | |
730 | |
731 #ifdef ASSERT | |
732 void MacroAssembler::read_ccr_v8_assert(Register ccr_save) { | |
733 const Register s1 = G3_scratch; | |
734 const Register s2 = G4_scratch; | |
735 Label get_psr_test; | |
736 // Get the condition codes the V8 way. | |
737 read_ccr_trap(s1); | |
738 mov(ccr_save, s2); | |
739 // This is a test of V8 which has icc but not xcc | |
740 // so mask off the xcc bits | |
741 and3(s2, 0xf, s2); | |
742 // Compare condition codes from the V8 and V9 ways. | |
743 subcc(s2, s1, G0); | |
744 br(Assembler::notEqual, true, Assembler::pt, get_psr_test); | |
745 delayed()->breakpoint_trap(); | |
746 bind(get_psr_test); | |
747 } | |
748 | |
749 void MacroAssembler::write_ccr_v8_assert(Register ccr_save) { | |
750 const Register s1 = G3_scratch; | |
751 const Register s2 = G4_scratch; | |
752 Label set_psr_test; | |
753 // Write out the saved condition codes the V8 way | |
754 write_ccr_trap(ccr_save, s1, s2); | |
755 // Read back the condition codes using the V9 instruction | |
756 rdccr(s1); | |
757 mov(ccr_save, s2); | |
758 // This is a test of V8 which has icc but not xcc | |
759 // so mask off the xcc bits | |
760 and3(s2, 0xf, s2); | |
761 and3(s1, 0xf, s1); | |
762 // Compare the V8 way with the V9 way. | |
763 subcc(s2, s1, G0); | |
764 br(Assembler::notEqual, true, Assembler::pt, set_psr_test); | |
765 delayed()->breakpoint_trap(); | |
766 bind(set_psr_test); | |
767 } | |
768 #else | |
769 #define read_ccr_v8_assert(x) | |
770 #define write_ccr_v8_assert(x) | |
771 #endif // ASSERT | |
772 | |
773 void MacroAssembler::read_ccr(Register ccr_save) { | |
774 if (VM_Version::v9_instructions_work()) { | |
775 rdccr(ccr_save); | |
776 // Test code sequence used on V8. Do not move above rdccr. | |
777 read_ccr_v8_assert(ccr_save); | |
778 } else { | |
779 read_ccr_trap(ccr_save); | |
780 } | |
781 } | |
782 | |
783 void MacroAssembler::write_ccr(Register ccr_save) { | |
784 if (VM_Version::v9_instructions_work()) { | |
785 // Test code sequence used on V8. Do not move below wrccr. | |
786 write_ccr_v8_assert(ccr_save); | |
787 wrccr(ccr_save); | |
788 } else { | |
789 const Register temp_reg1 = G3_scratch; | |
790 const Register temp_reg2 = G4_scratch; | |
791 write_ccr_trap(ccr_save, temp_reg1, temp_reg2); | |
792 } | |
793 } | |
794 | |
795 | |
796 // Calls to C land | |
797 | |
798 #ifdef ASSERT | |
799 // a hook for debugging | |
800 static Thread* reinitialize_thread() { | |
801 return ThreadLocalStorage::thread(); | |
802 } | |
803 #else | |
804 #define reinitialize_thread ThreadLocalStorage::thread | |
805 #endif | |
806 | |
807 #ifdef ASSERT | |
808 address last_get_thread = NULL; | |
809 #endif | |
810 | |
811 // call this when G2_thread is not known to be valid | |
812 void MacroAssembler::get_thread() { | |
813 save_frame(0); // to avoid clobbering O0 | |
814 mov(G1, L0); // avoid clobbering G1 | |
815 mov(G5_method, L1); // avoid clobbering G5 | |
816 mov(G3, L2); // avoid clobbering G3 also | |
817 mov(G4, L5); // avoid clobbering G4 | |
818 #ifdef ASSERT | |
819 Address last_get_thread_addr(L3, (address)&last_get_thread); | |
820 sethi(last_get_thread_addr); | |
821 inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call | |
822 st_ptr(L4, last_get_thread_addr); | |
823 #endif | |
824 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); | |
825 delayed()->nop(); | |
826 mov(L0, G1); | |
827 mov(L1, G5_method); | |
828 mov(L2, G3); | |
829 mov(L5, G4); | |
830 restore(O0, 0, G2_thread); | |
831 } | |
832 | |
833 static Thread* verify_thread_subroutine(Thread* gthread_value) { | |
834 Thread* correct_value = ThreadLocalStorage::thread(); | |
835 guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); | |
836 return correct_value; | |
837 } | |
838 | |
839 void MacroAssembler::verify_thread() { | |
840 if (VerifyThread) { | |
841 // NOTE: this chops off the heads of the 64-bit O registers. | |
842 #ifdef CC_INTERP | |
843 save_frame(0); | |
844 #else | |
845 // make sure G2_thread contains the right value | |
846 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof) | |
847 mov(G1, L1); // avoid clobbering G1 | |
848 // G2 saved below | |
849 mov(G3, L3); // avoid clobbering G3 | |
850 mov(G4, L4); // avoid clobbering G4 | |
851 mov(G5_method, L5); // avoid clobbering G5_method | |
852 #endif /* CC_INTERP */ | |
853 #if defined(COMPILER2) && !defined(_LP64) | |
854 // Save & restore possible 64-bit Long arguments in G-regs | |
855 srlx(G1,32,L0); | |
856 srlx(G4,32,L6); | |
857 #endif | |
858 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); | |
859 delayed()->mov(G2_thread, O0); | |
860 | |
861 mov(L1, G1); // Restore G1 | |
862 // G2 restored below | |
863 mov(L3, G3); // restore G3 | |
864 mov(L4, G4); // restore G4 | |
865 mov(L5, G5_method); // restore G5_method | |
866 #if defined(COMPILER2) && !defined(_LP64) | |
867 // Save & restore possible 64-bit Long arguments in G-regs | |
868 sllx(L0,32,G2); // Move old high G1 bits high in G2 | |
869 sllx(G1, 0,G1); // Clear current high G1 bits | |
870 or3 (G1,G2,G1); // Recover 64-bit G1 | |
871 sllx(L6,32,G2); // Move old high G4 bits high in G2 | |
872 sllx(G4, 0,G4); // Clear current high G4 bits | |
873 or3 (G4,G2,G4); // Recover 64-bit G4 | |
874 #endif | |
875 restore(O0, 0, G2_thread); | |
876 } | |
877 } | |
878 | |
879 | |
880 void MacroAssembler::save_thread(const Register thread_cache) { | |
881 verify_thread(); | |
882 if (thread_cache->is_valid()) { | |
883 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); | |
884 mov(G2_thread, thread_cache); | |
885 } | |
886 if (VerifyThread) { | |
887 // smash G2_thread, as if the VM were about to anyway | |
888 set(0x67676767, G2_thread); | |
889 } | |
890 } | |
891 | |
892 | |
893 void MacroAssembler::restore_thread(const Register thread_cache) { | |
894 if (thread_cache->is_valid()) { | |
895 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); | |
896 mov(thread_cache, G2_thread); | |
897 verify_thread(); | |
898 } else { | |
899 // do it the slow way | |
900 get_thread(); | |
901 } | |
902 } | |
903 | |
904 | |
905 // %%% maybe get rid of [re]set_last_Java_frame | |
906 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { | |
907 assert_not_delayed(); | |
908 Address flags(G2_thread, | |
909 0, | |
910 in_bytes(JavaThread::frame_anchor_offset()) + | |
911 in_bytes(JavaFrameAnchor::flags_offset())); | |
912 Address pc_addr(G2_thread, | |
913 0, | |
914 in_bytes(JavaThread::last_Java_pc_offset())); | |
915 | |
916 // Always set last_Java_pc and flags first because once last_Java_sp is visible | |
917 // has_last_Java_frame is true and users will look at the rest of the fields. | |
918 // (Note: flags should always be zero before we get here so doesn't need to be set.) | |
919 | |
920 #ifdef ASSERT | |
921 // Verify that flags was zeroed on return to Java | |
922 Label PcOk; | |
923 save_frame(0); // to avoid clobbering O0 | |
924 ld_ptr(pc_addr, L0); | |
925 tst(L0); | |
926 #ifdef _LP64 | |
927 brx(Assembler::zero, false, Assembler::pt, PcOk); | |
928 #else | |
929 br(Assembler::zero, false, Assembler::pt, PcOk); | |
930 #endif // _LP64 | |
931 delayed() -> nop(); | |
932 stop("last_Java_pc not zeroed before leaving Java"); | |
933 bind(PcOk); | |
934 | |
935 // Verify that flags was zeroed on return to Java | |
936 Label FlagsOk; | |
937 ld(flags, L0); | |
938 tst(L0); | |
939 br(Assembler::zero, false, Assembler::pt, FlagsOk); | |
940 delayed() -> restore(); | |
941 stop("flags not zeroed before leaving Java"); | |
942 bind(FlagsOk); | |
943 #endif /* ASSERT */ | |
944 // | |
945 // When returning from calling out from Java mode the frame anchor's last_Java_pc | |
946 // will always be set to NULL. It is set here so that if we are doing a call to | |
947 // native (not VM) that we capture the known pc and don't have to rely on the | |
948 // native call having a standard frame linkage where we can find the pc. | |
949 | |
950 if (last_Java_pc->is_valid()) { | |
951 st_ptr(last_Java_pc, pc_addr); | |
952 } | |
953 | |
954 #ifdef _LP64 | |
955 #ifdef ASSERT | |
956 // Make sure that we have an odd stack | |
957 Label StackOk; | |
958 andcc(last_java_sp, 0x01, G0); | |
959 br(Assembler::notZero, false, Assembler::pt, StackOk); | |
960 delayed() -> nop(); | |
961 stop("Stack Not Biased in set_last_Java_frame"); | |
962 bind(StackOk); | |
963 #endif // ASSERT | |
964 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); | |
965 add( last_java_sp, STACK_BIAS, G4_scratch ); | |
966 st_ptr(G4_scratch, Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset()))); | |
967 #else | |
968 st_ptr(last_java_sp, Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset()))); | |
969 #endif // _LP64 | |
970 } | |
971 | |
972 void MacroAssembler::reset_last_Java_frame(void) { | |
973 assert_not_delayed(); | |
974 | |
975 Address sp_addr(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset())); | |
976 Address pc_addr(G2_thread, | |
977 0, | |
978 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::last_Java_pc_offset())); | |
979 Address flags(G2_thread, | |
980 0, | |
981 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset())); | |
982 | |
983 #ifdef ASSERT | |
984 // check that it WAS previously set | |
985 #ifdef CC_INTERP | |
986 save_frame(0); | |
987 #else | |
988 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof | |
989 #endif /* CC_INTERP */ | |
990 ld_ptr(sp_addr, L0); | |
991 tst(L0); | |
992 breakpoint_trap(Assembler::zero, Assembler::ptr_cc); | |
993 restore(); | |
994 #endif // ASSERT | |
995 | |
996 st_ptr(G0, sp_addr); | |
997 // Always return last_Java_pc to zero | |
998 st_ptr(G0, pc_addr); | |
999 // Always null flags after return to Java | |
1000 st(G0, flags); | |
1001 } | |
1002 | |
1003 | |
1004 void MacroAssembler::call_VM_base( | |
1005 Register oop_result, | |
1006 Register thread_cache, | |
1007 Register last_java_sp, | |
1008 address entry_point, | |
1009 int number_of_arguments, | |
1010 bool check_exceptions) | |
1011 { | |
1012 assert_not_delayed(); | |
1013 | |
1014 // determine last_java_sp register | |
1015 if (!last_java_sp->is_valid()) { | |
1016 last_java_sp = SP; | |
1017 } | |
1018 // debugging support | |
1019 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); | |
1020 | |
1021 // 64-bit last_java_sp is biased! | |
1022 set_last_Java_frame(last_java_sp, noreg); | |
1023 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early | |
1024 save_thread(thread_cache); | |
1025 // do the call | |
1026 call(entry_point, relocInfo::runtime_call_type); | |
1027 if (!VerifyThread) | |
1028 delayed()->mov(G2_thread, O0); // pass thread as first argument | |
1029 else | |
1030 delayed()->nop(); // (thread already passed) | |
1031 restore_thread(thread_cache); | |
1032 reset_last_Java_frame(); | |
1033 | |
1034 // check for pending exceptions. use Gtemp as scratch register. | |
1035 if (check_exceptions) { | |
1036 check_and_forward_exception(Gtemp); | |
1037 } | |
1038 | |
1039 // get oop result if there is one and reset the value in the thread | |
1040 if (oop_result->is_valid()) { | |
1041 get_vm_result(oop_result); | |
1042 } | |
1043 } | |
1044 | |
1045 void MacroAssembler::check_and_forward_exception(Register scratch_reg) | |
1046 { | |
1047 Label L; | |
1048 | |
1049 check_and_handle_popframe(scratch_reg); | |
1050 check_and_handle_earlyret(scratch_reg); | |
1051 | |
1052 Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset())); | |
1053 ld_ptr(exception_addr, scratch_reg); | |
1054 br_null(scratch_reg,false,pt,L); | |
1055 delayed()->nop(); | |
1056 // we use O7 linkage so that forward_exception_entry has the issuing PC | |
1057 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); | |
1058 delayed()->nop(); | |
1059 bind(L); | |
1060 } | |
1061 | |
1062 | |
1063 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { | |
1064 } | |
1065 | |
1066 | |
1067 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { | |
1068 } | |
1069 | |
1070 | |
1071 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { | |
1072 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); | |
1073 } | |
1074 | |
1075 | |
1076 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { | |
1077 // O0 is reserved for the thread | |
1078 mov(arg_1, O1); | |
1079 call_VM(oop_result, entry_point, 1, check_exceptions); | |
1080 } | |
1081 | |
1082 | |
1083 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { | |
1084 // O0 is reserved for the thread | |
1085 mov(arg_1, O1); | |
1086 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); | |
1087 call_VM(oop_result, entry_point, 2, check_exceptions); | |
1088 } | |
1089 | |
1090 | |
1091 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { | |
1092 // O0 is reserved for the thread | |
1093 mov(arg_1, O1); | |
1094 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); | |
1095 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); | |
1096 call_VM(oop_result, entry_point, 3, check_exceptions); | |
1097 } | |
1098 | |
1099 | |
1100 | |
1101 // Note: The following call_VM overloadings are useful when a "save" | |
1102 // has already been performed by a stub, and the last Java frame is | |
1103 // the previous one. In that case, last_java_sp must be passed as FP | |
1104 // instead of SP. | |
1105 | |
1106 | |
1107 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { | |
1108 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); | |
1109 } | |
1110 | |
1111 | |
1112 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { | |
1113 // O0 is reserved for the thread | |
1114 mov(arg_1, O1); | |
1115 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); | |
1116 } | |
1117 | |
1118 | |
1119 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { | |
1120 // O0 is reserved for the thread | |
1121 mov(arg_1, O1); | |
1122 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); | |
1123 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); | |
1124 } | |
1125 | |
1126 | |
1127 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { | |
1128 // O0 is reserved for the thread | |
1129 mov(arg_1, O1); | |
1130 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); | |
1131 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); | |
1132 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); | |
1133 } | |
1134 | |
1135 | |
1136 | |
1137 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { | |
1138 assert_not_delayed(); | |
1139 save_thread(thread_cache); | |
1140 // do the call | |
1141 call(entry_point, relocInfo::runtime_call_type); | |
1142 delayed()->nop(); | |
1143 restore_thread(thread_cache); | |
1144 } | |
1145 | |
1146 | |
1147 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { | |
1148 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); | |
1149 } | |
1150 | |
1151 | |
1152 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { | |
1153 mov(arg_1, O0); | |
1154 call_VM_leaf(thread_cache, entry_point, 1); | |
1155 } | |
1156 | |
1157 | |
1158 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { | |
1159 mov(arg_1, O0); | |
1160 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); | |
1161 call_VM_leaf(thread_cache, entry_point, 2); | |
1162 } | |
1163 | |
1164 | |
1165 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { | |
1166 mov(arg_1, O0); | |
1167 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); | |
1168 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); | |
1169 call_VM_leaf(thread_cache, entry_point, 3); | |
1170 } | |
1171 | |
1172 | |
1173 void MacroAssembler::get_vm_result(Register oop_result) { | |
1174 verify_thread(); | |
1175 Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset())); | |
1176 ld_ptr( vm_result_addr, oop_result); | |
1177 st_ptr(G0, vm_result_addr); | |
1178 verify_oop(oop_result); | |
1179 } | |
1180 | |
1181 | |
1182 void MacroAssembler::get_vm_result_2(Register oop_result) { | |
1183 verify_thread(); | |
1184 Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset())); | |
1185 ld_ptr(vm_result_addr_2, oop_result); | |
1186 st_ptr(G0, vm_result_addr_2); | |
1187 verify_oop(oop_result); | |
1188 } | |
1189 | |
1190 | |
1191 // We require that C code which does not return a value in vm_result will | |
1192 // leave it undisturbed. | |
1193 void MacroAssembler::set_vm_result(Register oop_result) { | |
1194 verify_thread(); | |
1195 Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset())); | |
1196 verify_oop(oop_result); | |
1197 | |
1198 # ifdef ASSERT | |
1199 // Check that we are not overwriting any other oop. | |
1200 #ifdef CC_INTERP | |
1201 save_frame(0); | |
1202 #else | |
1203 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof | |
1204 #endif /* CC_INTERP */ | |
1205 ld_ptr(vm_result_addr, L0); | |
1206 tst(L0); | |
1207 restore(); | |
1208 breakpoint_trap(notZero, Assembler::ptr_cc); | |
1209 // } | |
1210 # endif | |
1211 | |
1212 st_ptr(oop_result, vm_result_addr); | |
1213 } | |
1214 | |
1215 | |
1216 void MacroAssembler::store_check(Register tmp, Register obj) { | |
1217 // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.) | |
1218 | |
1219 /* $$$ This stuff needs to go into one of the BarrierSet generator | |
1220 functions. (The particular barrier sets will have to be friends of | |
1221 MacroAssembler, I guess.) */ | |
1222 BarrierSet* bs = Universe::heap()->barrier_set(); | |
1223 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); | |
1224 CardTableModRefBS* ct = (CardTableModRefBS*)bs; | |
1225 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); | |
1226 #ifdef _LP64 | |
1227 srlx(obj, CardTableModRefBS::card_shift, obj); | |
1228 #else | |
1229 srl(obj, CardTableModRefBS::card_shift, obj); | |
1230 #endif | |
1231 assert( tmp != obj, "need separate temp reg"); | |
1232 Address rs(tmp, (address)ct->byte_map_base); | |
1233 load_address(rs); | |
1234 stb(G0, rs.base(), obj); | |
1235 } | |
1236 | |
1237 void MacroAssembler::store_check(Register tmp, Register obj, Register offset) { | |
1238 store_check(tmp, obj); | |
1239 } | |
1240 | |
1241 // %%% Note: The following six instructions have been moved, | |
1242 // unchanged, from assembler_sparc.inline.hpp. | |
1243 // They will be refactored at a later date. | |
1244 | |
1245 void MacroAssembler::sethi(intptr_t imm22a, | |
1246 Register d, | |
1247 bool ForceRelocatable, | |
1248 RelocationHolder const& rspec) { | |
1249 Address adr( d, (address)imm22a, rspec ); | |
1250 MacroAssembler::sethi( adr, ForceRelocatable ); | |
1251 } | |
1252 | |
1253 | |
1254 void MacroAssembler::sethi(Address& a, bool ForceRelocatable) { | |
1255 address save_pc; | |
1256 int shiftcnt; | |
1257 // if addr of local, do not need to load it | |
1258 assert(a.base() != FP && a.base() != SP, "just use ld or st for locals"); | |
1259 #ifdef _LP64 | |
1260 # ifdef CHECK_DELAY | |
1261 assert_not_delayed( (char *)"cannot put two instructions in delay slot" ); | |
1262 # endif | |
1263 v9_dep(); | |
1264 // ForceRelocatable = 1; | |
1265 save_pc = pc(); | |
1266 if (a.hi32() == 0 && a.low32() >= 0) { | |
1267 Assembler::sethi(a.low32(), a.base(), a.rspec()); | |
1268 } | |
1269 else if (a.hi32() == -1) { | |
1270 Assembler::sethi(~a.low32(), a.base(), a.rspec()); | |
1271 xor3(a.base(), ~low10(~0), a.base()); | |
1272 } | |
1273 else { | |
1274 Assembler::sethi(a.hi32(), a.base(), a.rspec() ); // 22 | |
1275 if ( a.hi32() & 0x3ff ) // Any bits? | |
1276 or3( a.base(), a.hi32() & 0x3ff ,a.base() ); // High 32 bits are now in low 32 | |
1277 if ( a.low32() & 0xFFFFFC00 ) { // done? | |
1278 if( (a.low32() >> 20) & 0xfff ) { // Any bits set? | |
1279 sllx(a.base(), 12, a.base()); // Make room for next 12 bits | |
1280 or3( a.base(), (a.low32() >> 20) & 0xfff,a.base() ); // Or in next 12 | |
1281 shiftcnt = 0; // We already shifted | |
1282 } | |
1283 else | |
1284 shiftcnt = 12; | |
1285 if( (a.low32() >> 10) & 0x3ff ) { | |
1286 sllx(a.base(), shiftcnt+10, a.base());// Make room for last 10 bits | |
1287 or3( a.base(), (a.low32() >> 10) & 0x3ff,a.base() ); // Or in next 10 | |
1288 shiftcnt = 0; | |
1289 } | |
1290 else | |
1291 shiftcnt = 10; | |
1292 sllx(a.base(), shiftcnt+10 , a.base()); // Shift leaving disp field 0'd | |
1293 } | |
1294 else | |
1295 sllx( a.base(), 32, a.base() ); | |
1296 } | |
1297 // Pad out the instruction sequence so it can be | |
1298 // patched later. | |
1299 if ( ForceRelocatable || (a.rtype() != relocInfo::none && | |
1300 a.rtype() != relocInfo::runtime_call_type) ) { | |
1301 while ( pc() < (save_pc + (7 * BytesPerInstWord )) ) | |
1302 nop(); | |
1303 } | |
1304 #else | |
1305 Assembler::sethi(a.hi(), a.base(), a.rspec()); | |
1306 #endif | |
1307 | |
1308 } | |
1309 | |
1310 int MacroAssembler::size_of_sethi(address a, bool worst_case) { | |
1311 #ifdef _LP64 | |
1312 if (worst_case) return 7; | |
1313 intptr_t iaddr = (intptr_t)a; | |
1314 int hi32 = (int)(iaddr >> 32); | |
1315 int lo32 = (int)(iaddr); | |
1316 int inst_count; | |
1317 if (hi32 == 0 && lo32 >= 0) | |
1318 inst_count = 1; | |
1319 else if (hi32 == -1) | |
1320 inst_count = 2; | |
1321 else { | |
1322 inst_count = 2; | |
1323 if ( hi32 & 0x3ff ) | |
1324 inst_count++; | |
1325 if ( lo32 & 0xFFFFFC00 ) { | |
1326 if( (lo32 >> 20) & 0xfff ) inst_count += 2; | |
1327 if( (lo32 >> 10) & 0x3ff ) inst_count += 2; | |
1328 } | |
1329 } | |
1330 return BytesPerInstWord * inst_count; | |
1331 #else | |
1332 return BytesPerInstWord; | |
1333 #endif | |
1334 } | |
1335 | |
1336 int MacroAssembler::worst_case_size_of_set() { | |
1337 return size_of_sethi(NULL, true) + 1; | |
1338 } | |
1339 | |
1340 void MacroAssembler::set(intptr_t value, Register d, | |
1341 RelocationHolder const& rspec) { | |
1342 Address val( d, (address)value, rspec); | |
1343 | |
1344 if ( rspec.type() == relocInfo::none ) { | |
1345 // can optimize | |
1346 if (-4096 <= value && value <= 4095) { | |
1347 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) | |
1348 return; | |
1349 } | |
1350 if (inv_hi22(hi22(value)) == value) { | |
1351 sethi(val); | |
1352 return; | |
1353 } | |
1354 } | |
1355 assert_not_delayed( (char *)"cannot put two instructions in delay slot" ); | |
1356 sethi( val ); | |
1357 if (rspec.type() != relocInfo::none || (value & 0x3ff) != 0) { | |
1358 add( d, value & 0x3ff, d, rspec); | |
1359 } | |
1360 } | |
1361 | |
1362 void MacroAssembler::setsw(int value, Register d, | |
1363 RelocationHolder const& rspec) { | |
1364 Address val( d, (address)value, rspec); | |
1365 if ( rspec.type() == relocInfo::none ) { | |
1366 // can optimize | |
1367 if (-4096 <= value && value <= 4095) { | |
1368 or3(G0, value, d); | |
1369 return; | |
1370 } | |
1371 if (inv_hi22(hi22(value)) == value) { | |
1372 sethi( val ); | |
1373 #ifndef _LP64 | |
1374 if ( value < 0 ) { | |
1375 assert_not_delayed(); | |
1376 sra (d, G0, d); | |
1377 } | |
1378 #endif | |
1379 return; | |
1380 } | |
1381 } | |
1382 assert_not_delayed(); | |
1383 sethi( val ); | |
1384 add( d, value & 0x3ff, d, rspec); | |
1385 | |
1386 // (A negative value could be loaded in 2 insns with sethi/xor, | |
1387 // but it would take a more complex relocation.) | |
1388 #ifndef _LP64 | |
1389 if ( value < 0) | |
1390 sra(d, G0, d); | |
1391 #endif | |
1392 } | |
1393 | |
1394 // %%% End of moved six set instructions. | |
1395 | |
1396 | |
1397 void MacroAssembler::set64(jlong value, Register d, Register tmp) { | |
1398 assert_not_delayed(); | |
1399 v9_dep(); | |
1400 | |
1401 int hi = (int)(value >> 32); | |
1402 int lo = (int)(value & ~0); | |
1403 // (Matcher::isSimpleConstant64 knows about the following optimizations.) | |
1404 if (Assembler::is_simm13(lo) && value == lo) { | |
1405 or3(G0, lo, d); | |
1406 } else if (hi == 0) { | |
1407 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 | |
1408 if (low10(lo) != 0) | |
1409 or3(d, low10(lo), d); | |
1410 } | |
1411 else if (hi == -1) { | |
1412 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 | |
1413 xor3(d, low10(lo) ^ ~low10(~0), d); | |
1414 } | |
1415 else if (lo == 0) { | |
1416 if (Assembler::is_simm13(hi)) { | |
1417 or3(G0, hi, d); | |
1418 } else { | |
1419 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 | |
1420 if (low10(hi) != 0) | |
1421 or3(d, low10(hi), d); | |
1422 } | |
1423 sllx(d, 32, d); | |
1424 } | |
1425 else { | |
1426 Assembler::sethi(hi, tmp); | |
1427 Assembler::sethi(lo, d); // macro assembler version sign-extends | |
1428 if (low10(hi) != 0) | |
1429 or3 (tmp, low10(hi), tmp); | |
1430 if (low10(lo) != 0) | |
1431 or3 ( d, low10(lo), d); | |
1432 sllx(tmp, 32, tmp); | |
1433 or3 (d, tmp, d); | |
1434 } | |
1435 } | |
1436 | |
1437 // compute size in bytes of sparc frame, given | |
1438 // number of extraWords | |
1439 int MacroAssembler::total_frame_size_in_bytes(int extraWords) { | |
1440 | |
1441 int nWords = frame::memory_parameter_word_sp_offset; | |
1442 | |
1443 nWords += extraWords; | |
1444 | |
1445 if (nWords & 1) ++nWords; // round up to double-word | |
1446 | |
1447 return nWords * BytesPerWord; | |
1448 } | |
1449 | |
1450 | |
1451 // save_frame: given number of "extra" words in frame, | |
1452 // issue approp. save instruction (p 200, v8 manual) | |
1453 | |
1454 void MacroAssembler::save_frame(int extraWords = 0) { | |
1455 int delta = -total_frame_size_in_bytes(extraWords); | |
1456 if (is_simm13(delta)) { | |
1457 save(SP, delta, SP); | |
1458 } else { | |
1459 set(delta, G3_scratch); | |
1460 save(SP, G3_scratch, SP); | |
1461 } | |
1462 } | |
1463 | |
1464 | |
1465 void MacroAssembler::save_frame_c1(int size_in_bytes) { | |
1466 if (is_simm13(-size_in_bytes)) { | |
1467 save(SP, -size_in_bytes, SP); | |
1468 } else { | |
1469 set(-size_in_bytes, G3_scratch); | |
1470 save(SP, G3_scratch, SP); | |
1471 } | |
1472 } | |
1473 | |
1474 | |
1475 void MacroAssembler::save_frame_and_mov(int extraWords, | |
1476 Register s1, Register d1, | |
1477 Register s2, Register d2) { | |
1478 assert_not_delayed(); | |
1479 | |
1480 // The trick here is to use precisely the same memory word | |
1481 // that trap handlers also use to save the register. | |
1482 // This word cannot be used for any other purpose, but | |
1483 // it works fine to save the register's value, whether or not | |
1484 // an interrupt flushes register windows at any given moment! | |
1485 Address s1_addr; | |
1486 if (s1->is_valid() && (s1->is_in() || s1->is_local())) { | |
1487 s1_addr = s1->address_in_saved_window(); | |
1488 st_ptr(s1, s1_addr); | |
1489 } | |
1490 | |
1491 Address s2_addr; | |
1492 if (s2->is_valid() && (s2->is_in() || s2->is_local())) { | |
1493 s2_addr = s2->address_in_saved_window(); | |
1494 st_ptr(s2, s2_addr); | |
1495 } | |
1496 | |
1497 save_frame(extraWords); | |
1498 | |
1499 if (s1_addr.base() == SP) { | |
1500 ld_ptr(s1_addr.after_save(), d1); | |
1501 } else if (s1->is_valid()) { | |
1502 mov(s1->after_save(), d1); | |
1503 } | |
1504 | |
1505 if (s2_addr.base() == SP) { | |
1506 ld_ptr(s2_addr.after_save(), d2); | |
1507 } else if (s2->is_valid()) { | |
1508 mov(s2->after_save(), d2); | |
1509 } | |
1510 } | |
1511 | |
1512 | |
1513 Address MacroAssembler::allocate_oop_address(jobject obj, Register d) { | |
1514 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); | |
1515 int oop_index = oop_recorder()->allocate_index(obj); | |
1516 return Address(d, address(obj), oop_Relocation::spec(oop_index)); | |
1517 } | |
1518 | |
1519 | |
1520 Address MacroAssembler::constant_oop_address(jobject obj, Register d) { | |
1521 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); | |
1522 int oop_index = oop_recorder()->find_index(obj); | |
1523 return Address(d, address(obj), oop_Relocation::spec(oop_index)); | |
1524 } | |
1525 | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
1526 void MacroAssembler::set_narrow_oop(jobject obj, Register d) { |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
1527 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
1528 int oop_index = oop_recorder()->find_index(obj); |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
1529 RelocationHolder rspec = oop_Relocation::spec(oop_index); |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
1530 |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
1531 assert_not_delayed(); |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
1532 // Relocation with special format (see relocInfo_sparc.hpp). |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
1533 relocate(rspec, 1); |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
1534 // Assembler::sethi(0x3fffff, d); |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
1535 emit_long( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
1536 // Don't add relocation for 'add'. Do patching during 'sethi' processing. |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
1537 add(d, 0x3ff, d); |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
1538 |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
1539 } |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
1540 |
0 | 1541 |
1542 void MacroAssembler::align(int modulus) { | |
1543 while (offset() % modulus != 0) nop(); | |
1544 } | |
1545 | |
1546 | |
1547 void MacroAssembler::safepoint() { | |
1548 relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint)); | |
1549 } | |
1550 | |
1551 | |
1552 void RegistersForDebugging::print(outputStream* s) { | |
1553 int j; | |
1554 for ( j = 0; j < 8; ++j ) | |
1555 if ( j != 6 ) s->print_cr("i%d = 0x%.16lx", j, i[j]); | |
1556 else s->print_cr( "fp = 0x%.16lx", i[j]); | |
1557 s->cr(); | |
1558 | |
1559 for ( j = 0; j < 8; ++j ) | |
1560 s->print_cr("l%d = 0x%.16lx", j, l[j]); | |
1561 s->cr(); | |
1562 | |
1563 for ( j = 0; j < 8; ++j ) | |
1564 if ( j != 6 ) s->print_cr("o%d = 0x%.16lx", j, o[j]); | |
1565 else s->print_cr( "sp = 0x%.16lx", o[j]); | |
1566 s->cr(); | |
1567 | |
1568 for ( j = 0; j < 8; ++j ) | |
1569 s->print_cr("g%d = 0x%.16lx", j, g[j]); | |
1570 s->cr(); | |
1571 | |
1572 // print out floats with compression | |
1573 for (j = 0; j < 32; ) { | |
1574 jfloat val = f[j]; | |
1575 int last = j; | |
1576 for ( ; last+1 < 32; ++last ) { | |
1577 char b1[1024], b2[1024]; | |
1578 sprintf(b1, "%f", val); | |
1579 sprintf(b2, "%f", f[last+1]); | |
1580 if (strcmp(b1, b2)) | |
1581 break; | |
1582 } | |
1583 s->print("f%d", j); | |
1584 if ( j != last ) s->print(" - f%d", last); | |
1585 s->print(" = %f", val); | |
1586 s->fill_to(25); | |
1587 s->print_cr(" (0x%x)", val); | |
1588 j = last + 1; | |
1589 } | |
1590 s->cr(); | |
1591 | |
1592 // and doubles (evens only) | |
1593 for (j = 0; j < 32; ) { | |
1594 jdouble val = d[j]; | |
1595 int last = j; | |
1596 for ( ; last+1 < 32; ++last ) { | |
1597 char b1[1024], b2[1024]; | |
1598 sprintf(b1, "%f", val); | |
1599 sprintf(b2, "%f", d[last+1]); | |
1600 if (strcmp(b1, b2)) | |
1601 break; | |
1602 } | |
1603 s->print("d%d", 2 * j); | |
1604 if ( j != last ) s->print(" - d%d", last); | |
1605 s->print(" = %f", val); | |
1606 s->fill_to(30); | |
1607 s->print("(0x%x)", *(int*)&val); | |
1608 s->fill_to(42); | |
1609 s->print_cr("(0x%x)", *(1 + (int*)&val)); | |
1610 j = last + 1; | |
1611 } | |
1612 s->cr(); | |
1613 } | |
1614 | |
1615 void RegistersForDebugging::save_registers(MacroAssembler* a) { | |
1616 a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); | |
1617 a->flush_windows(); | |
1618 int i; | |
1619 for (i = 0; i < 8; ++i) { | |
1620 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); | |
1621 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); | |
1622 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); | |
1623 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); | |
1624 } | |
1625 for (i = 0; i < 32; ++i) { | |
1626 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); | |
1627 } | |
1628 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { | |
1629 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); | |
1630 } | |
1631 } | |
1632 | |
1633 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { | |
1634 for (int i = 1; i < 8; ++i) { | |
1635 a->ld_ptr(r, g_offset(i), as_gRegister(i)); | |
1636 } | |
1637 for (int j = 0; j < 32; ++j) { | |
1638 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); | |
1639 } | |
1640 for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) { | |
1641 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); | |
1642 } | |
1643 } | |
1644 | |
1645 | |
1646 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack | |
1647 void MacroAssembler::push_fTOS() { | |
1648 // %%%%%% need to implement this | |
1649 } | |
1650 | |
1651 // pops double TOS element from CPU stack and pushes on FPU stack | |
1652 void MacroAssembler::pop_fTOS() { | |
1653 // %%%%%% need to implement this | |
1654 } | |
1655 | |
1656 void MacroAssembler::empty_FPU_stack() { | |
1657 // %%%%%% need to implement this | |
1658 } | |
1659 | |
1660 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { | |
1661 // plausibility check for oops | |
1662 if (!VerifyOops) return; | |
1663 | |
1664 if (reg == G0) return; // always NULL, which is always an oop | |
1665 | |
1666 char buffer[16]; | |
1667 sprintf(buffer, "%d", line); | |
1668 int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer); | |
1669 char * real_msg = new char[len]; | |
1670 sprintf(real_msg, "%s (%s:%d)", msg, file, line); | |
1671 | |
1672 // Call indirectly to solve generation ordering problem | |
1673 Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address()); | |
1674 | |
1675 // Make some space on stack above the current register window. | |
1676 // Enough to hold 8 64-bit registers. | |
1677 add(SP,-8*8,SP); | |
1678 | |
1679 // Save some 64-bit registers; a normal 'save' chops the heads off | |
1680 // of 64-bit longs in the 32-bit build. | |
1681 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); | |
1682 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); | |
1683 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed | |
1684 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); | |
1685 | |
1686 set((intptr_t)real_msg, O1); | |
1687 // Load address to call to into O7 | |
1688 load_ptr_contents(a, O7); | |
1689 // Register call to verify_oop_subroutine | |
1690 callr(O7, G0); | |
1691 delayed()->nop(); | |
1692 // recover frame size | |
1693 add(SP, 8*8,SP); | |
1694 } | |
1695 | |
1696 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { | |
1697 // plausibility check for oops | |
1698 if (!VerifyOops) return; | |
1699 | |
1700 char buffer[64]; | |
1701 sprintf(buffer, "%d", line); | |
1702 int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer); | |
1703 sprintf(buffer, " at SP+%d ", addr.disp()); | |
1704 len += strlen(buffer); | |
1705 char * real_msg = new char[len]; | |
1706 sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); | |
1707 | |
1708 // Call indirectly to solve generation ordering problem | |
1709 Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address()); | |
1710 | |
1711 // Make some space on stack above the current register window. | |
1712 // Enough to hold 8 64-bit registers. | |
1713 add(SP,-8*8,SP); | |
1714 | |
1715 // Save some 64-bit registers; a normal 'save' chops the heads off | |
1716 // of 64-bit longs in the 32-bit build. | |
1717 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); | |
1718 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); | |
1719 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed | |
1720 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); | |
1721 | |
1722 set((intptr_t)real_msg, O1); | |
1723 // Load address to call to into O7 | |
1724 load_ptr_contents(a, O7); | |
1725 // Register call to verify_oop_subroutine | |
1726 callr(O7, G0); | |
1727 delayed()->nop(); | |
1728 // recover frame size | |
1729 add(SP, 8*8,SP); | |
1730 } | |
1731 | |
1732 // side-door communication with signalHandler in os_solaris.cpp | |
1733 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; | |
1734 | |
1735 // This macro is expanded just once; it creates shared code. Contract: | |
1736 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY | |
1737 // registers, including flags. May not use a register 'save', as this blows | |
1738 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' | |
1739 // call. | |
1740 void MacroAssembler::verify_oop_subroutine() { | |
1741 assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" ); | |
1742 | |
1743 // Leaf call; no frame. | |
1744 Label succeed, fail, null_or_fail; | |
1745 | |
1746 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). | |
1747 // O0 is now the oop to be checked. O7 is the return address. | |
1748 Register O0_obj = O0; | |
1749 | |
1750 // Save some more registers for temps. | |
1751 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); | |
1752 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); | |
1753 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); | |
1754 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); | |
1755 | |
1756 // Save flags | |
1757 Register O5_save_flags = O5; | |
1758 rdccr( O5_save_flags ); | |
1759 | |
1760 { // count number of verifies | |
1761 Register O2_adr = O2; | |
1762 Register O3_accum = O3; | |
1763 Address count_addr( O2_adr, (address) StubRoutines::verify_oop_count_addr() ); | |
1764 sethi(count_addr); | |
1765 ld(count_addr, O3_accum); | |
1766 inc(O3_accum); | |
1767 st(O3_accum, count_addr); | |
1768 } | |
1769 | |
1770 Register O2_mask = O2; | |
1771 Register O3_bits = O3; | |
1772 Register O4_temp = O4; | |
1773 | |
1774 // mark lower end of faulting range | |
1775 assert(_verify_oop_implicit_branch[0] == NULL, "set once"); | |
1776 _verify_oop_implicit_branch[0] = pc(); | |
1777 | |
1778 // We can't check the mark oop because it could be in the process of | |
1779 // locking or unlocking while this is running. | |
1780 set(Universe::verify_oop_mask (), O2_mask); | |
1781 set(Universe::verify_oop_bits (), O3_bits); | |
1782 | |
1783 // assert((obj & oop_mask) == oop_bits); | |
1784 and3(O0_obj, O2_mask, O4_temp); | |
1785 cmp(O4_temp, O3_bits); | |
1786 brx(notEqual, false, pn, null_or_fail); | |
1787 delayed()->nop(); | |
1788 | |
1789 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { | |
1790 // the null_or_fail case is useless; must test for null separately | |
1791 br_null(O0_obj, false, pn, succeed); | |
1792 delayed()->nop(); | |
1793 } | |
1794 | |
1795 // Check the klassOop of this object for being in the right area of memory. | |
1796 // Cannot do the load in the delay above slot in case O0 is null | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1797 load_klass(O0_obj, O0_obj); |
0 | 1798 // assert((klass & klass_mask) == klass_bits); |
1799 if( Universe::verify_klass_mask() != Universe::verify_oop_mask() ) | |
1800 set(Universe::verify_klass_mask(), O2_mask); | |
1801 if( Universe::verify_klass_bits() != Universe::verify_oop_bits() ) | |
1802 set(Universe::verify_klass_bits(), O3_bits); | |
1803 and3(O0_obj, O2_mask, O4_temp); | |
1804 cmp(O4_temp, O3_bits); | |
1805 brx(notEqual, false, pn, fail); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1806 delayed()->nop(); |
0 | 1807 // Check the klass's klass |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1808 load_klass(O0_obj, O0_obj); |
0 | 1809 and3(O0_obj, O2_mask, O4_temp); |
1810 cmp(O4_temp, O3_bits); | |
1811 brx(notEqual, false, pn, fail); | |
1812 delayed()->wrccr( O5_save_flags ); // Restore CCR's | |
1813 | |
1814 // mark upper end of faulting range | |
1815 _verify_oop_implicit_branch[1] = pc(); | |
1816 | |
1817 //----------------------- | |
1818 // all tests pass | |
1819 bind(succeed); | |
1820 | |
1821 // Restore prior 64-bit registers | |
1822 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); | |
1823 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); | |
1824 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); | |
1825 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); | |
1826 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); | |
1827 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); | |
1828 | |
1829 retl(); // Leaf return; restore prior O7 in delay slot | |
1830 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); | |
1831 | |
1832 //----------------------- | |
1833 bind(null_or_fail); // nulls are less common but OK | |
1834 br_null(O0_obj, false, pt, succeed); | |
1835 delayed()->wrccr( O5_save_flags ); // Restore CCR's | |
1836 | |
1837 //----------------------- | |
1838 // report failure: | |
1839 bind(fail); | |
1840 _verify_oop_implicit_branch[2] = pc(); | |
1841 | |
1842 wrccr( O5_save_flags ); // Restore CCR's | |
1843 | |
1844 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); | |
1845 | |
1846 // stop_subroutine expects message pointer in I1. | |
1847 mov(I1, O1); | |
1848 | |
1849 // Restore prior 64-bit registers | |
1850 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); | |
1851 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); | |
1852 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); | |
1853 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); | |
1854 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); | |
1855 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); | |
1856 | |
1857 // factor long stop-sequence into subroutine to save space | |
1858 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); | |
1859 | |
1860 // call indirectly to solve generation ordering problem | |
1861 Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address()); | |
1862 load_ptr_contents(a, O5); | |
1863 jmpl(O5, 0, O7); | |
1864 delayed()->nop(); | |
1865 } | |
1866 | |
1867 | |
1868 void MacroAssembler::stop(const char* msg) { | |
1869 // save frame first to get O7 for return address | |
1870 // add one word to size in case struct is odd number of words long | |
1871 // It must be doubleword-aligned for storing doubles into it. | |
1872 | |
1873 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); | |
1874 | |
1875 // stop_subroutine expects message pointer in I1. | |
1876 set((intptr_t)msg, O1); | |
1877 | |
1878 // factor long stop-sequence into subroutine to save space | |
1879 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); | |
1880 | |
1881 // call indirectly to solve generation ordering problem | |
1882 Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address()); | |
1883 load_ptr_contents(a, O5); | |
1884 jmpl(O5, 0, O7); | |
1885 delayed()->nop(); | |
1886 | |
1887 breakpoint_trap(); // make stop actually stop rather than writing | |
1888 // unnoticeable results in the output files. | |
1889 | |
1890 // restore(); done in callee to save space! | |
1891 } | |
1892 | |
1893 | |
1894 void MacroAssembler::warn(const char* msg) { | |
1895 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); | |
1896 RegistersForDebugging::save_registers(this); | |
1897 mov(O0, L0); | |
1898 set((intptr_t)msg, O0); | |
1899 call( CAST_FROM_FN_PTR(address, warning) ); | |
1900 delayed()->nop(); | |
1901 // ret(); | |
1902 // delayed()->restore(); | |
1903 RegistersForDebugging::restore_registers(this, L0); | |
1904 restore(); | |
1905 } | |
1906 | |
1907 | |
1908 void MacroAssembler::untested(const char* what) { | |
1909 // We must be able to turn interactive prompting off | |
1910 // in order to run automated test scripts on the VM | |
1911 // Use the flag ShowMessageBoxOnError | |
1912 | |
1913 char* b = new char[1024]; | |
1914 sprintf(b, "untested: %s", what); | |
1915 | |
1916 if ( ShowMessageBoxOnError ) stop(b); | |
1917 else warn(b); | |
1918 } | |
1919 | |
1920 | |
1921 void MacroAssembler::stop_subroutine() { | |
1922 RegistersForDebugging::save_registers(this); | |
1923 | |
1924 // for the sake of the debugger, stick a PC on the current frame | |
1925 // (this assumes that the caller has performed an extra "save") | |
1926 mov(I7, L7); | |
1927 add(O7, -7 * BytesPerInt, I7); | |
1928 | |
1929 save_frame(); // one more save to free up another O7 register | |
1930 mov(I0, O1); // addr of reg save area | |
1931 | |
1932 // We expect pointer to message in I1. Caller must set it up in O1 | |
1933 mov(I1, O0); // get msg | |
1934 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); | |
1935 delayed()->nop(); | |
1936 | |
1937 restore(); | |
1938 | |
1939 RegistersForDebugging::restore_registers(this, O0); | |
1940 | |
1941 save_frame(0); | |
1942 call(CAST_FROM_FN_PTR(address,breakpoint)); | |
1943 delayed()->nop(); | |
1944 restore(); | |
1945 | |
1946 mov(L7, I7); | |
1947 retl(); | |
1948 delayed()->restore(); // see stop above | |
1949 } | |
1950 | |
1951 | |
1952 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { | |
1953 if ( ShowMessageBoxOnError ) { | |
1954 JavaThreadState saved_state = JavaThread::current()->thread_state(); | |
1955 JavaThread::current()->set_thread_state(_thread_in_vm); | |
1956 { | |
1957 // In order to get locks work, we need to fake a in_VM state | |
1958 ttyLocker ttyl; | |
1959 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); | |
1960 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { | |
1961 ::tty->print_cr("Interpreter::bytecode_counter = %d", BytecodeCounter::counter_value()); | |
1962 } | |
1963 if (os::message_box(msg, "Execution stopped, print registers?")) | |
1964 regs->print(::tty); | |
1965 } | |
1966 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); | |
1967 } | |
1968 else | |
1969 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); | |
1970 assert(false, "error"); | |
1971 } | |
1972 | |
1973 | |
1974 #ifndef PRODUCT | |
1975 void MacroAssembler::test() { | |
1976 ResourceMark rm; | |
1977 | |
1978 CodeBuffer cb("test", 10000, 10000); | |
1979 MacroAssembler* a = new MacroAssembler(&cb); | |
1980 VM_Version::allow_all(); | |
1981 a->test_v9(); | |
1982 a->test_v8_onlys(); | |
1983 VM_Version::revert(); | |
1984 | |
1985 StubRoutines::Sparc::test_stop_entry()(); | |
1986 } | |
1987 #endif | |
1988 | |
1989 | |
1990 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { | |
1991 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? | |
1992 Label no_extras; | |
1993 br( negative, true, pt, no_extras ); // if neg, clear reg | |
1994 delayed()->set( 0, Rresult); // annuled, so only if taken | |
1995 bind( no_extras ); | |
1996 } | |
1997 | |
1998 | |
1999 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { | |
2000 #ifdef _LP64 | |
2001 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); | |
2002 #else | |
2003 add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult); | |
2004 #endif | |
2005 bclr(1, Rresult); | |
2006 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes | |
2007 } | |
2008 | |
2009 | |
2010 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { | |
2011 calc_frame_size(Rextra_words, Rresult); | |
2012 neg(Rresult); | |
2013 save(SP, Rresult, SP); | |
2014 } | |
2015 | |
2016 | |
2017 // --------------------------------------------------------- | |
2018 Assembler::RCondition cond2rcond(Assembler::Condition c) { | |
2019 switch (c) { | |
2020 /*case zero: */ | |
2021 case Assembler::equal: return Assembler::rc_z; | |
2022 case Assembler::lessEqual: return Assembler::rc_lez; | |
2023 case Assembler::less: return Assembler::rc_lz; | |
2024 /*case notZero:*/ | |
2025 case Assembler::notEqual: return Assembler::rc_nz; | |
2026 case Assembler::greater: return Assembler::rc_gz; | |
2027 case Assembler::greaterEqual: return Assembler::rc_gez; | |
2028 } | |
2029 ShouldNotReachHere(); | |
2030 return Assembler::rc_z; | |
2031 } | |
2032 | |
2033 // compares register with zero and branches. NOT FOR USE WITH 64-bit POINTERS | |
2034 void MacroAssembler::br_zero( Condition c, bool a, Predict p, Register s1, Label& L) { | |
2035 tst(s1); | |
2036 br (c, a, p, L); | |
2037 } | |
2038 | |
2039 | |
2040 // Compares a pointer register with zero and branches on null. | |
2041 // Does a test & branch on 32-bit systems and a register-branch on 64-bit. | |
2042 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { | |
2043 assert_not_delayed(); | |
2044 #ifdef _LP64 | |
2045 bpr( rc_z, a, p, s1, L ); | |
2046 #else | |
2047 tst(s1); | |
2048 br ( zero, a, p, L ); | |
2049 #endif | |
2050 } | |
2051 | |
2052 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { | |
2053 assert_not_delayed(); | |
2054 #ifdef _LP64 | |
2055 bpr( rc_nz, a, p, s1, L ); | |
2056 #else | |
2057 tst(s1); | |
2058 br ( notZero, a, p, L ); | |
2059 #endif | |
2060 } | |
2061 | |
2062 | |
2063 // instruction sequences factored across compiler & interpreter | |
2064 | |
2065 | |
2066 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, | |
2067 Register Rb_hi, Register Rb_low, | |
2068 Register Rresult) { | |
2069 | |
2070 Label check_low_parts, done; | |
2071 | |
2072 cmp(Ra_hi, Rb_hi ); // compare hi parts | |
2073 br(equal, true, pt, check_low_parts); | |
2074 delayed()->cmp(Ra_low, Rb_low); // test low parts | |
2075 | |
2076 // And, with an unsigned comparison, it does not matter if the numbers | |
2077 // are negative or not. | |
2078 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. | |
2079 // The second one is bigger (unsignedly). | |
2080 | |
2081 // Other notes: The first move in each triplet can be unconditional | |
2082 // (and therefore probably prefetchable). | |
2083 // And the equals case for the high part does not need testing, | |
2084 // since that triplet is reached only after finding the high halves differ. | |
2085 | |
2086 if (VM_Version::v9_instructions_work()) { | |
2087 | |
2088 mov ( -1, Rresult); | |
2089 ba( false, done ); delayed()-> movcc(greater, false, icc, 1, Rresult); | |
2090 } | |
2091 else { | |
2092 br(less, true, pt, done); delayed()-> set(-1, Rresult); | |
2093 br(greater, true, pt, done); delayed()-> set( 1, Rresult); | |
2094 } | |
2095 | |
2096 bind( check_low_parts ); | |
2097 | |
2098 if (VM_Version::v9_instructions_work()) { | |
2099 mov( -1, Rresult); | |
2100 movcc(equal, false, icc, 0, Rresult); | |
2101 movcc(greaterUnsigned, false, icc, 1, Rresult); | |
2102 } | |
2103 else { | |
2104 set(-1, Rresult); | |
2105 br(equal, true, pt, done); delayed()->set( 0, Rresult); | |
2106 br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult); | |
2107 } | |
2108 bind( done ); | |
2109 } | |
2110 | |
2111 void MacroAssembler::lneg( Register Rhi, Register Rlow ) { | |
2112 subcc( G0, Rlow, Rlow ); | |
2113 subc( G0, Rhi, Rhi ); | |
2114 } | |
2115 | |
2116 void MacroAssembler::lshl( Register Rin_high, Register Rin_low, | |
2117 Register Rcount, | |
2118 Register Rout_high, Register Rout_low, | |
2119 Register Rtemp ) { | |
2120 | |
2121 | |
2122 Register Ralt_count = Rtemp; | |
2123 Register Rxfer_bits = Rtemp; | |
2124 | |
2125 assert( Ralt_count != Rin_high | |
2126 && Ralt_count != Rin_low | |
2127 && Ralt_count != Rcount | |
2128 && Rxfer_bits != Rin_low | |
2129 && Rxfer_bits != Rin_high | |
2130 && Rxfer_bits != Rcount | |
2131 && Rxfer_bits != Rout_low | |
2132 && Rout_low != Rin_high, | |
2133 "register alias checks"); | |
2134 | |
2135 Label big_shift, done; | |
2136 | |
2137 // This code can be optimized to use the 64 bit shifts in V9. | |
2138 // Here we use the 32 bit shifts. | |
2139 | |
2140 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits | |
2141 subcc(Rcount, 31, Ralt_count); | |
2142 br(greater, true, pn, big_shift); | |
2143 delayed()-> | |
2144 dec(Ralt_count); | |
2145 | |
2146 // shift < 32 bits, Ralt_count = Rcount-31 | |
2147 | |
2148 // We get the transfer bits by shifting right by 32-count the low | |
2149 // register. This is done by shifting right by 31-count and then by one | |
2150 // more to take care of the special (rare) case where count is zero | |
2151 // (shifting by 32 would not work). | |
2152 | |
2153 neg( Ralt_count ); | |
2154 | |
2155 // The order of the next two instructions is critical in the case where | |
2156 // Rin and Rout are the same and should not be reversed. | |
2157 | |
2158 srl( Rin_low, Ralt_count, Rxfer_bits ); // shift right by 31-count | |
2159 if (Rcount != Rout_low) { | |
2160 sll( Rin_low, Rcount, Rout_low ); // low half | |
2161 } | |
2162 sll( Rin_high, Rcount, Rout_high ); | |
2163 if (Rcount == Rout_low) { | |
2164 sll( Rin_low, Rcount, Rout_low ); // low half | |
2165 } | |
2166 srl( Rxfer_bits, 1, Rxfer_bits ); // shift right by one more | |
2167 ba (false, done); | |
2168 delayed()-> | |
2169 or3( Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low | |
2170 | |
2171 // shift >= 32 bits, Ralt_count = Rcount-32 | |
2172 bind(big_shift); | |
2173 sll( Rin_low, Ralt_count, Rout_high ); | |
2174 clr( Rout_low ); | |
2175 | |
2176 bind(done); | |
2177 } | |
2178 | |
2179 | |
2180 void MacroAssembler::lshr( Register Rin_high, Register Rin_low, | |
2181 Register Rcount, | |
2182 Register Rout_high, Register Rout_low, | |
2183 Register Rtemp ) { | |
2184 | |
2185 Register Ralt_count = Rtemp; | |
2186 Register Rxfer_bits = Rtemp; | |
2187 | |
2188 assert( Ralt_count != Rin_high | |
2189 && Ralt_count != Rin_low | |
2190 && Ralt_count != Rcount | |
2191 && Rxfer_bits != Rin_low | |
2192 && Rxfer_bits != Rin_high | |
2193 && Rxfer_bits != Rcount | |
2194 && Rxfer_bits != Rout_high | |
2195 && Rout_high != Rin_low, | |
2196 "register alias checks"); | |
2197 | |
2198 Label big_shift, done; | |
2199 | |
2200 // This code can be optimized to use the 64 bit shifts in V9. | |
2201 // Here we use the 32 bit shifts. | |
2202 | |
2203 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits | |
2204 subcc(Rcount, 31, Ralt_count); | |
2205 br(greater, true, pn, big_shift); | |
2206 delayed()->dec(Ralt_count); | |
2207 | |
2208 // shift < 32 bits, Ralt_count = Rcount-31 | |
2209 | |
2210 // We get the transfer bits by shifting left by 32-count the high | |
2211 // register. This is done by shifting left by 31-count and then by one | |
2212 // more to take care of the special (rare) case where count is zero | |
2213 // (shifting by 32 would not work). | |
2214 | |
2215 neg( Ralt_count ); | |
2216 if (Rcount != Rout_low) { | |
2217 srl( Rin_low, Rcount, Rout_low ); | |
2218 } | |
2219 | |
2220 // The order of the next two instructions is critical in the case where | |
2221 // Rin and Rout are the same and should not be reversed. | |
2222 | |
2223 sll( Rin_high, Ralt_count, Rxfer_bits ); // shift left by 31-count | |
2224 sra( Rin_high, Rcount, Rout_high ); // high half | |
2225 sll( Rxfer_bits, 1, Rxfer_bits ); // shift left by one more | |
2226 if (Rcount == Rout_low) { | |
2227 srl( Rin_low, Rcount, Rout_low ); | |
2228 } | |
2229 ba (false, done); | |
2230 delayed()-> | |
2231 or3( Rout_low, Rxfer_bits, Rout_low ); // new low value: or shifted old low part and xfer from high | |
2232 | |
2233 // shift >= 32 bits, Ralt_count = Rcount-32 | |
2234 bind(big_shift); | |
2235 | |
2236 sra( Rin_high, Ralt_count, Rout_low ); | |
2237 sra( Rin_high, 31, Rout_high ); // sign into hi | |
2238 | |
2239 bind( done ); | |
2240 } | |
2241 | |
2242 | |
2243 | |
2244 void MacroAssembler::lushr( Register Rin_high, Register Rin_low, | |
2245 Register Rcount, | |
2246 Register Rout_high, Register Rout_low, | |
2247 Register Rtemp ) { | |
2248 | |
2249 Register Ralt_count = Rtemp; | |
2250 Register Rxfer_bits = Rtemp; | |
2251 | |
2252 assert( Ralt_count != Rin_high | |
2253 && Ralt_count != Rin_low | |
2254 && Ralt_count != Rcount | |
2255 && Rxfer_bits != Rin_low | |
2256 && Rxfer_bits != Rin_high | |
2257 && Rxfer_bits != Rcount | |
2258 && Rxfer_bits != Rout_high | |
2259 && Rout_high != Rin_low, | |
2260 "register alias checks"); | |
2261 | |
2262 Label big_shift, done; | |
2263 | |
2264 // This code can be optimized to use the 64 bit shifts in V9. | |
2265 // Here we use the 32 bit shifts. | |
2266 | |
2267 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits | |
2268 subcc(Rcount, 31, Ralt_count); | |
2269 br(greater, true, pn, big_shift); | |
2270 delayed()->dec(Ralt_count); | |
2271 | |
2272 // shift < 32 bits, Ralt_count = Rcount-31 | |
2273 | |
2274 // We get the transfer bits by shifting left by 32-count the high | |
2275 // register. This is done by shifting left by 31-count and then by one | |
2276 // more to take care of the special (rare) case where count is zero | |
2277 // (shifting by 32 would not work). | |
2278 | |
2279 neg( Ralt_count ); | |
2280 if (Rcount != Rout_low) { | |
2281 srl( Rin_low, Rcount, Rout_low ); | |
2282 } | |
2283 | |
2284 // The order of the next two instructions is critical in the case where | |
2285 // Rin and Rout are the same and should not be reversed. | |
2286 | |
2287 sll( Rin_high, Ralt_count, Rxfer_bits ); // shift left by 31-count | |
2288 srl( Rin_high, Rcount, Rout_high ); // high half | |
2289 sll( Rxfer_bits, 1, Rxfer_bits ); // shift left by one more | |
2290 if (Rcount == Rout_low) { | |
2291 srl( Rin_low, Rcount, Rout_low ); | |
2292 } | |
2293 ba (false, done); | |
2294 delayed()-> | |
2295 or3( Rout_low, Rxfer_bits, Rout_low ); // new low value: or shifted old low part and xfer from high | |
2296 | |
2297 // shift >= 32 bits, Ralt_count = Rcount-32 | |
2298 bind(big_shift); | |
2299 | |
2300 srl( Rin_high, Ralt_count, Rout_low ); | |
2301 clr( Rout_high ); | |
2302 | |
2303 bind( done ); | |
2304 } | |
2305 | |
2306 #ifdef _LP64 | |
2307 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { | |
2308 cmp(Ra, Rb); | |
2309 mov( -1, Rresult); | |
2310 movcc(equal, false, xcc, 0, Rresult); | |
2311 movcc(greater, false, xcc, 1, Rresult); | |
2312 } | |
2313 #endif | |
2314 | |
2315 | |
2316 void MacroAssembler::float_cmp( bool is_float, int unordered_result, | |
2317 FloatRegister Fa, FloatRegister Fb, | |
2318 Register Rresult) { | |
2319 | |
2320 fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb); | |
2321 | |
2322 Condition lt = unordered_result == -1 ? f_unorderedOrLess : f_less; | |
2323 Condition eq = f_equal; | |
2324 Condition gt = unordered_result == 1 ? f_unorderedOrGreater : f_greater; | |
2325 | |
2326 if (VM_Version::v9_instructions_work()) { | |
2327 | |
2328 mov( -1, Rresult ); | |
2329 movcc( eq, true, fcc0, 0, Rresult ); | |
2330 movcc( gt, true, fcc0, 1, Rresult ); | |
2331 | |
2332 } else { | |
2333 Label done; | |
2334 | |
2335 set( -1, Rresult ); | |
2336 //fb(lt, true, pn, done); delayed()->set( -1, Rresult ); | |
2337 fb( eq, true, pn, done); delayed()->set( 0, Rresult ); | |
2338 fb( gt, true, pn, done); delayed()->set( 1, Rresult ); | |
2339 | |
2340 bind (done); | |
2341 } | |
2342 } | |
2343 | |
2344 | |
2345 void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) | |
2346 { | |
2347 if (VM_Version::v9_instructions_work()) { | |
2348 Assembler::fneg(w, s, d); | |
2349 } else { | |
2350 if (w == FloatRegisterImpl::S) { | |
2351 Assembler::fneg(w, s, d); | |
2352 } else if (w == FloatRegisterImpl::D) { | |
2353 // number() does a sanity check on the alignment. | |
2354 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && | |
2355 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); | |
2356 | |
2357 Assembler::fneg(FloatRegisterImpl::S, s, d); | |
2358 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); | |
2359 } else { | |
2360 assert(w == FloatRegisterImpl::Q, "Invalid float register width"); | |
2361 | |
2362 // number() does a sanity check on the alignment. | |
2363 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && | |
2364 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); | |
2365 | |
2366 Assembler::fneg(FloatRegisterImpl::S, s, d); | |
2367 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); | |
2368 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); | |
2369 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); | |
2370 } | |
2371 } | |
2372 } | |
2373 | |
2374 void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) | |
2375 { | |
2376 if (VM_Version::v9_instructions_work()) { | |
2377 Assembler::fmov(w, s, d); | |
2378 } else { | |
2379 if (w == FloatRegisterImpl::S) { | |
2380 Assembler::fmov(w, s, d); | |
2381 } else if (w == FloatRegisterImpl::D) { | |
2382 // number() does a sanity check on the alignment. | |
2383 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && | |
2384 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); | |
2385 | |
2386 Assembler::fmov(FloatRegisterImpl::S, s, d); | |
2387 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); | |
2388 } else { | |
2389 assert(w == FloatRegisterImpl::Q, "Invalid float register width"); | |
2390 | |
2391 // number() does a sanity check on the alignment. | |
2392 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && | |
2393 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); | |
2394 | |
2395 Assembler::fmov(FloatRegisterImpl::S, s, d); | |
2396 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); | |
2397 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); | |
2398 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); | |
2399 } | |
2400 } | |
2401 } | |
2402 | |
2403 void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) | |
2404 { | |
2405 if (VM_Version::v9_instructions_work()) { | |
2406 Assembler::fabs(w, s, d); | |
2407 } else { | |
2408 if (w == FloatRegisterImpl::S) { | |
2409 Assembler::fabs(w, s, d); | |
2410 } else if (w == FloatRegisterImpl::D) { | |
2411 // number() does a sanity check on the alignment. | |
2412 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && | |
2413 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); | |
2414 | |
2415 Assembler::fabs(FloatRegisterImpl::S, s, d); | |
2416 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); | |
2417 } else { | |
2418 assert(w == FloatRegisterImpl::Q, "Invalid float register width"); | |
2419 | |
2420 // number() does a sanity check on the alignment. | |
2421 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && | |
2422 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); | |
2423 | |
2424 Assembler::fabs(FloatRegisterImpl::S, s, d); | |
2425 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); | |
2426 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); | |
2427 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); | |
2428 } | |
2429 } | |
2430 } | |
2431 | |
2432 void MacroAssembler::save_all_globals_into_locals() { | |
2433 mov(G1,L1); | |
2434 mov(G2,L2); | |
2435 mov(G3,L3); | |
2436 mov(G4,L4); | |
2437 mov(G5,L5); | |
2438 mov(G6,L6); | |
2439 mov(G7,L7); | |
2440 } | |
2441 | |
2442 void MacroAssembler::restore_globals_from_locals() { | |
2443 mov(L1,G1); | |
2444 mov(L2,G2); | |
2445 mov(L3,G3); | |
2446 mov(L4,G4); | |
2447 mov(L5,G5); | |
2448 mov(L6,G6); | |
2449 mov(L7,G7); | |
2450 } | |
2451 | |
2452 // Use for 64 bit operation. | |
2453 void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm) | |
2454 { | |
2455 // store ptr_reg as the new top value | |
2456 #ifdef _LP64 | |
2457 casx(top_ptr_reg, top_reg, ptr_reg); | |
2458 #else | |
2459 cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm); | |
2460 #endif // _LP64 | |
2461 } | |
2462 | |
2463 // [RGV] This routine does not handle 64 bit operations. | |
2464 // use casx_under_lock() or casx directly!!! | |
2465 void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm) | |
2466 { | |
2467 // store ptr_reg as the new top value | |
2468 if (VM_Version::v9_instructions_work()) { | |
2469 cas(top_ptr_reg, top_reg, ptr_reg); | |
2470 } else { | |
2471 | |
2472 // If the register is not an out nor global, it is not visible | |
2473 // after the save. Allocate a register for it, save its | |
2474 // value in the register save area (the save may not flush | |
2475 // registers to the save area). | |
2476 | |
2477 Register top_ptr_reg_after_save; | |
2478 Register top_reg_after_save; | |
2479 Register ptr_reg_after_save; | |
2480 | |
2481 if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) { | |
2482 top_ptr_reg_after_save = top_ptr_reg->after_save(); | |
2483 } else { | |
2484 Address reg_save_addr = top_ptr_reg->address_in_saved_window(); | |
2485 top_ptr_reg_after_save = L0; | |
2486 st(top_ptr_reg, reg_save_addr); | |
2487 } | |
2488 | |
2489 if (top_reg->is_out() || top_reg->is_global()) { | |
2490 top_reg_after_save = top_reg->after_save(); | |
2491 } else { | |
2492 Address reg_save_addr = top_reg->address_in_saved_window(); | |
2493 top_reg_after_save = L1; | |
2494 st(top_reg, reg_save_addr); | |
2495 } | |
2496 | |
2497 if (ptr_reg->is_out() || ptr_reg->is_global()) { | |
2498 ptr_reg_after_save = ptr_reg->after_save(); | |
2499 } else { | |
2500 Address reg_save_addr = ptr_reg->address_in_saved_window(); | |
2501 ptr_reg_after_save = L2; | |
2502 st(ptr_reg, reg_save_addr); | |
2503 } | |
2504 | |
2505 const Register& lock_reg = L3; | |
2506 const Register& lock_ptr_reg = L4; | |
2507 const Register& value_reg = L5; | |
2508 const Register& yield_reg = L6; | |
2509 const Register& yieldall_reg = L7; | |
2510 | |
2511 save_frame(); | |
2512 | |
2513 if (top_ptr_reg_after_save == L0) { | |
2514 ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save); | |
2515 } | |
2516 | |
2517 if (top_reg_after_save == L1) { | |
2518 ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save); | |
2519 } | |
2520 | |
2521 if (ptr_reg_after_save == L2) { | |
2522 ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save); | |
2523 } | |
2524 | |
2525 Label(retry_get_lock); | |
2526 Label(not_same); | |
2527 Label(dont_yield); | |
2528 | |
2529 assert(lock_addr, "lock_address should be non null for v8"); | |
2530 set((intptr_t)lock_addr, lock_ptr_reg); | |
2531 // Initialize yield counter | |
2532 mov(G0,yield_reg); | |
2533 mov(G0, yieldall_reg); | |
2534 set(StubRoutines::Sparc::locked, lock_reg); | |
2535 | |
2536 bind(retry_get_lock); | |
2537 cmp(yield_reg, V8AtomicOperationUnderLockSpinCount); | |
2538 br(Assembler::less, false, Assembler::pt, dont_yield); | |
2539 delayed()->nop(); | |
2540 | |
2541 if(use_call_vm) { | |
2542 Untested("Need to verify global reg consistancy"); | |
2543 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg); | |
2544 } else { | |
2545 // Save the regs and make space for a C call | |
2546 save(SP, -96, SP); | |
2547 save_all_globals_into_locals(); | |
2548 call(CAST_FROM_FN_PTR(address,os::yield_all)); | |
2549 delayed()->mov(yieldall_reg, O0); | |
2550 restore_globals_from_locals(); | |
2551 restore(); | |
2552 } | |
2553 | |
2554 // reset the counter | |
2555 mov(G0,yield_reg); | |
2556 add(yieldall_reg, 1, yieldall_reg); | |
2557 | |
2558 bind(dont_yield); | |
2559 // try to get lock | |
2560 swap(lock_ptr_reg, 0, lock_reg); | |
2561 | |
2562 // did we get the lock? | |
2563 cmp(lock_reg, StubRoutines::Sparc::unlocked); | |
2564 br(Assembler::notEqual, true, Assembler::pn, retry_get_lock); | |
2565 delayed()->add(yield_reg,1,yield_reg); | |
2566 | |
2567 // yes, got lock. do we have the same top? | |
2568 ld(top_ptr_reg_after_save, 0, value_reg); | |
2569 cmp(value_reg, top_reg_after_save); | |
2570 br(Assembler::notEqual, false, Assembler::pn, not_same); | |
2571 delayed()->nop(); | |
2572 | |
2573 // yes, same top. | |
2574 st(ptr_reg_after_save, top_ptr_reg_after_save, 0); | |
2575 membar(Assembler::StoreStore); | |
2576 | |
2577 bind(not_same); | |
2578 mov(value_reg, ptr_reg_after_save); | |
2579 st(lock_reg, lock_ptr_reg, 0); // unlock | |
2580 | |
2581 restore(); | |
2582 } | |
2583 } | |
2584 | |
2585 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg, | |
2586 Label& done, Label* slow_case, | |
2587 BiasedLockingCounters* counters) { | |
2588 assert(UseBiasedLocking, "why call this otherwise?"); | |
2589 | |
2590 if (PrintBiasedLockingStatistics) { | |
2591 assert_different_registers(obj_reg, mark_reg, temp_reg, O7); | |
2592 if (counters == NULL) | |
2593 counters = BiasedLocking::counters(); | |
2594 } | |
2595 | |
2596 Label cas_label; | |
2597 | |
2598 // Biased locking | |
2599 // See whether the lock is currently biased toward our thread and | |
2600 // whether the epoch is still valid | |
2601 // Note that the runtime guarantees sufficient alignment of JavaThread | |
2602 // pointers to allow age to be placed into low bits | |
2603 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); | |
2604 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); | |
2605 cmp(temp_reg, markOopDesc::biased_lock_pattern); | |
2606 brx(Assembler::notEqual, false, Assembler::pn, cas_label); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2607 delayed()->nop(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2608 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2609 load_klass(obj_reg, temp_reg); |
0 | 2610 ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); |
2611 or3(G2_thread, temp_reg, temp_reg); | |
2612 xor3(mark_reg, temp_reg, temp_reg); | |
2613 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); | |
2614 if (counters != NULL) { | |
2615 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); | |
2616 // Reload mark_reg as we may need it later | |
2617 ld_ptr(Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()), mark_reg); | |
2618 } | |
2619 brx(Assembler::equal, true, Assembler::pt, done); | |
2620 delayed()->nop(); | |
2621 | |
2622 Label try_revoke_bias; | |
2623 Label try_rebias; | |
2624 Address mark_addr = Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()); | |
2625 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); | |
2626 | |
2627 // At this point we know that the header has the bias pattern and | |
2628 // that we are not the bias owner in the current epoch. We need to | |
2629 // figure out more details about the state of the header in order to | |
2630 // know what operations can be legally performed on the object's | |
2631 // header. | |
2632 | |
2633 // If the low three bits in the xor result aren't clear, that means | |
2634 // the prototype header is no longer biased and we have to revoke | |
2635 // the bias on this object. | |
2636 btst(markOopDesc::biased_lock_mask_in_place, temp_reg); | |
2637 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); | |
2638 | |
2639 // Biasing is still enabled for this data type. See whether the | |
2640 // epoch of the current bias is still valid, meaning that the epoch | |
2641 // bits of the mark word are equal to the epoch bits of the | |
2642 // prototype header. (Note that the prototype header's epoch bits | |
2643 // only change at a safepoint.) If not, attempt to rebias the object | |
2644 // toward the current thread. Note that we must be absolutely sure | |
2645 // that the current epoch is invalid in order to do this because | |
2646 // otherwise the manipulations it performs on the mark word are | |
2647 // illegal. | |
2648 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg); | |
2649 brx(Assembler::notZero, false, Assembler::pn, try_rebias); | |
2650 | |
2651 // The epoch of the current bias is still valid but we know nothing | |
2652 // about the owner; it might be set or it might be clear. Try to | |
2653 // acquire the bias of the object using an atomic operation. If this | |
2654 // fails we will go in to the runtime to revoke the object's bias. | |
2655 // Note that we first construct the presumed unbiased header so we | |
2656 // don't accidentally blow away another thread's valid bias. | |
2657 delayed()->and3(mark_reg, | |
2658 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, | |
2659 mark_reg); | |
2660 or3(G2_thread, mark_reg, temp_reg); | |
2661 casx_under_lock(mark_addr.base(), mark_reg, temp_reg, | |
2662 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); | |
2663 // If the biasing toward our thread failed, this means that | |
2664 // another thread succeeded in biasing it toward itself and we | |
2665 // need to revoke that bias. The revocation will occur in the | |
2666 // interpreter runtime in the slow case. | |
2667 cmp(mark_reg, temp_reg); | |
2668 if (counters != NULL) { | |
2669 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); | |
2670 } | |
2671 if (slow_case != NULL) { | |
2672 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); | |
2673 delayed()->nop(); | |
2674 } | |
2675 br(Assembler::always, false, Assembler::pt, done); | |
2676 delayed()->nop(); | |
2677 | |
2678 bind(try_rebias); | |
2679 // At this point we know the epoch has expired, meaning that the | |
2680 // current "bias owner", if any, is actually invalid. Under these | |
2681 // circumstances _only_, we are allowed to use the current header's | |
2682 // value as the comparison value when doing the cas to acquire the | |
2683 // bias in the current epoch. In other words, we allow transfer of | |
2684 // the bias from one thread to another directly in this situation. | |
2685 // | |
2686 // FIXME: due to a lack of registers we currently blow away the age | |
2687 // bits in this situation. Should attempt to preserve them. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2688 load_klass(obj_reg, temp_reg); |
0 | 2689 ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); |
2690 or3(G2_thread, temp_reg, temp_reg); | |
2691 casx_under_lock(mark_addr.base(), mark_reg, temp_reg, | |
2692 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); | |
2693 // If the biasing toward our thread failed, this means that | |
2694 // another thread succeeded in biasing it toward itself and we | |
2695 // need to revoke that bias. The revocation will occur in the | |
2696 // interpreter runtime in the slow case. | |
2697 cmp(mark_reg, temp_reg); | |
2698 if (counters != NULL) { | |
2699 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); | |
2700 } | |
2701 if (slow_case != NULL) { | |
2702 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); | |
2703 delayed()->nop(); | |
2704 } | |
2705 br(Assembler::always, false, Assembler::pt, done); | |
2706 delayed()->nop(); | |
2707 | |
2708 bind(try_revoke_bias); | |
2709 // The prototype mark in the klass doesn't have the bias bit set any | |
2710 // more, indicating that objects of this data type are not supposed | |
2711 // to be biased any more. We are going to try to reset the mark of | |
2712 // this object to the prototype value and fall through to the | |
2713 // CAS-based locking scheme. Note that if our CAS fails, it means | |
2714 // that another thread raced us for the privilege of revoking the | |
2715 // bias of this particular object, so it's okay to continue in the | |
2716 // normal locking code. | |
2717 // | |
2718 // FIXME: due to a lack of registers we currently blow away the age | |
2719 // bits in this situation. Should attempt to preserve them. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2720 load_klass(obj_reg, temp_reg); |
0 | 2721 ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); |
2722 casx_under_lock(mark_addr.base(), mark_reg, temp_reg, | |
2723 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); | |
2724 // Fall through to the normal CAS-based lock, because no matter what | |
2725 // the result of the above CAS, some thread must have succeeded in | |
2726 // removing the bias bit from the object's header. | |
2727 if (counters != NULL) { | |
2728 cmp(mark_reg, temp_reg); | |
2729 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); | |
2730 } | |
2731 | |
2732 bind(cas_label); | |
2733 } | |
2734 | |
2735 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, | |
2736 bool allow_delay_slot_filling) { | |
2737 // Check for biased locking unlock case, which is a no-op | |
2738 // Note: we do not have to check the thread ID for two reasons. | |
2739 // First, the interpreter checks for IllegalMonitorStateException at | |
2740 // a higher level. Second, if the bias was revoked while we held the | |
2741 // lock, the object could not be rebiased toward another thread, so | |
2742 // the bias bit would be clear. | |
2743 ld_ptr(mark_addr, temp_reg); | |
2744 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); | |
2745 cmp(temp_reg, markOopDesc::biased_lock_pattern); | |
2746 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); | |
2747 delayed(); | |
2748 if (!allow_delay_slot_filling) { | |
2749 nop(); | |
2750 } | |
2751 } | |
2752 | |
2753 | |
2754 // CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by | |
2755 // Solaris/SPARC's "as". Another apt name would be cas_ptr() | |
2756 | |
2757 void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) { | |
2758 casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()) ; | |
2759 } | |
2760 | |
2761 | |
2762 | |
2763 // compiler_lock_object() and compiler_unlock_object() are direct transliterations | |
2764 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. | |
2765 // The code could be tightened up considerably. | |
2766 // | |
2767 // box->dhw disposition - post-conditions at DONE_LABEL. | |
2768 // - Successful inflated lock: box->dhw != 0. | |
2769 // Any non-zero value suffices. | |
2770 // Consider G2_thread, rsp, boxReg, or unused_mark() | |
2771 // - Successful Stack-lock: box->dhw == mark. | |
2772 // box->dhw must contain the displaced mark word value | |
2773 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. | |
2774 // The slow-path fast_enter() and slow_enter() operators | |
2775 // are responsible for setting box->dhw = NonZero (typically ::unused_mark). | |
2776 // - Biased: box->dhw is undefined | |
2777 // | |
2778 // SPARC refworkload performance - specifically jetstream and scimark - are | |
2779 // extremely sensitive to the size of the code emitted by compiler_lock_object | |
2780 // and compiler_unlock_object. Critically, the key factor is code size, not path | |
2781 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the | |
2782 // effect). | |
2783 | |
2784 | |
2785 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch, | |
2786 BiasedLockingCounters* counters) { | |
2787 Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes()); | |
2788 | |
2789 verify_oop(Roop); | |
2790 Label done ; | |
2791 | |
2792 if (counters != NULL) { | |
2793 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); | |
2794 } | |
2795 | |
2796 if (EmitSync & 1) { | |
2797 mov (3, Rscratch) ; | |
2798 st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); | |
2799 cmp (SP, G0) ; | |
2800 return ; | |
2801 } | |
2802 | |
2803 if (EmitSync & 2) { | |
2804 | |
2805 // Fetch object's markword | |
2806 ld_ptr(mark_addr, Rmark); | |
2807 | |
2808 if (UseBiasedLocking) { | |
2809 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); | |
2810 } | |
2811 | |
2812 // Save Rbox in Rscratch to be used for the cas operation | |
2813 mov(Rbox, Rscratch); | |
2814 | |
2815 // set Rmark to markOop | markOopDesc::unlocked_value | |
2816 or3(Rmark, markOopDesc::unlocked_value, Rmark); | |
2817 | |
2818 // Initialize the box. (Must happen before we update the object mark!) | |
2819 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); | |
2820 | |
2821 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop | |
2822 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); | |
2823 casx_under_lock(mark_addr.base(), Rmark, Rscratch, | |
2824 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); | |
2825 | |
2826 // if compare/exchange succeeded we found an unlocked object and we now have locked it | |
2827 // hence we are done | |
2828 cmp(Rmark, Rscratch); | |
2829 #ifdef _LP64 | |
2830 sub(Rscratch, STACK_BIAS, Rscratch); | |
2831 #endif | |
2832 brx(Assembler::equal, false, Assembler::pt, done); | |
2833 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot | |
2834 | |
2835 // we did not find an unlocked object so see if this is a recursive case | |
2836 // sub(Rscratch, SP, Rscratch); | |
2837 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); | |
2838 andcc(Rscratch, 0xfffff003, Rscratch); | |
2839 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); | |
2840 bind (done) ; | |
2841 return ; | |
2842 } | |
2843 | |
2844 Label Egress ; | |
2845 | |
2846 if (EmitSync & 256) { | |
2847 Label IsInflated ; | |
2848 | |
2849 ld_ptr (mark_addr, Rmark); // fetch obj->mark | |
2850 // Triage: biased, stack-locked, neutral, inflated | |
2851 if (UseBiasedLocking) { | |
2852 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); | |
2853 // Invariant: if control reaches this point in the emitted stream | |
2854 // then Rmark has not been modified. | |
2855 } | |
2856 | |
2857 // Store mark into displaced mark field in the on-stack basic-lock "box" | |
2858 // Critically, this must happen before the CAS | |
2859 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty. | |
2860 st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); | |
2861 andcc (Rmark, 2, G0) ; | |
2862 brx (Assembler::notZero, false, Assembler::pn, IsInflated) ; | |
2863 delayed() -> | |
2864 | |
2865 // Try stack-lock acquisition. | |
2866 // Beware: the 1st instruction is in a delay slot | |
2867 mov (Rbox, Rscratch); | |
2868 or3 (Rmark, markOopDesc::unlocked_value, Rmark); | |
2869 assert (mark_addr.disp() == 0, "cas must take a zero displacement"); | |
2870 casn (mark_addr.base(), Rmark, Rscratch) ; | |
2871 cmp (Rmark, Rscratch); | |
2872 brx (Assembler::equal, false, Assembler::pt, done); | |
2873 delayed()->sub(Rscratch, SP, Rscratch); | |
2874 | |
2875 // Stack-lock attempt failed - check for recursive stack-lock. | |
2876 // See the comments below about how we might remove this case. | |
2877 #ifdef _LP64 | |
2878 sub (Rscratch, STACK_BIAS, Rscratch); | |
2879 #endif | |
2880 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); | |
2881 andcc (Rscratch, 0xfffff003, Rscratch); | |
2882 br (Assembler::always, false, Assembler::pt, done) ; | |
2883 delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); | |
2884 | |
2885 bind (IsInflated) ; | |
2886 if (EmitSync & 64) { | |
2887 // If m->owner != null goto IsLocked | |
2888 // Pessimistic form: Test-and-CAS vs CAS | |
2889 // The optimistic form avoids RTS->RTO cache line upgrades. | |
2890 ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ; | |
2891 andcc (Rscratch, Rscratch, G0) ; | |
2892 brx (Assembler::notZero, false, Assembler::pn, done) ; | |
2893 delayed()->nop() ; | |
2894 // m->owner == null : it's unlocked. | |
2895 } | |
2896 | |
2897 // Try to CAS m->owner from null to Self | |
2898 // Invariant: if we acquire the lock then _recursions should be 0. | |
2899 add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ; | |
2900 mov (G2_thread, Rscratch) ; | |
2901 casn (Rmark, G0, Rscratch) ; | |
2902 cmp (Rscratch, G0) ; | |
2903 // Intentional fall-through into done | |
2904 } else { | |
2905 // Aggressively avoid the Store-before-CAS penalty | |
2906 // Defer the store into box->dhw until after the CAS | |
2907 Label IsInflated, Recursive ; | |
2908 | |
2909 // Anticipate CAS -- Avoid RTS->RTO upgrade | |
2910 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ; | |
2911 | |
2912 ld_ptr (mark_addr, Rmark); // fetch obj->mark | |
2913 // Triage: biased, stack-locked, neutral, inflated | |
2914 | |
2915 if (UseBiasedLocking) { | |
2916 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); | |
2917 // Invariant: if control reaches this point in the emitted stream | |
2918 // then Rmark has not been modified. | |
2919 } | |
2920 andcc (Rmark, 2, G0) ; | |
2921 brx (Assembler::notZero, false, Assembler::pn, IsInflated) ; | |
2922 delayed()-> // Beware - dangling delay-slot | |
2923 | |
2924 // Try stack-lock acquisition. | |
2925 // Transiently install BUSY (0) encoding in the mark word. | |
2926 // if the CAS of 0 into the mark was successful then we execute: | |
2927 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box | |
2928 // ST obj->mark = box -- overwrite transient 0 value | |
2929 // This presumes TSO, of course. | |
2930 | |
2931 mov (0, Rscratch) ; | |
2932 or3 (Rmark, markOopDesc::unlocked_value, Rmark); | |
2933 assert (mark_addr.disp() == 0, "cas must take a zero displacement"); | |
2934 casn (mark_addr.base(), Rmark, Rscratch) ; | |
2935 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ; | |
2936 cmp (Rscratch, Rmark) ; | |
2937 brx (Assembler::notZero, false, Assembler::pn, Recursive) ; | |
2938 delayed() -> | |
2939 st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); | |
2940 if (counters != NULL) { | |
2941 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); | |
2942 } | |
2943 br (Assembler::always, false, Assembler::pt, done); | |
2944 delayed() -> | |
2945 st_ptr (Rbox, mark_addr) ; | |
2946 | |
2947 bind (Recursive) ; | |
2948 // Stack-lock attempt failed - check for recursive stack-lock. | |
2949 // Tests show that we can remove the recursive case with no impact | |
2950 // on refworkload 0.83. If we need to reduce the size of the code | |
2951 // emitted by compiler_lock_object() the recursive case is perfect | |
2952 // candidate. | |
2953 // | |
2954 // A more extreme idea is to always inflate on stack-lock recursion. | |
2955 // This lets us eliminate the recursive checks in compiler_lock_object | |
2956 // and compiler_unlock_object and the (box->dhw == 0) encoding. | |
2957 // A brief experiment - requiring changes to synchronizer.cpp, interpreter, | |
2958 // and showed a performance *increase*. In the same experiment I eliminated | |
2959 // the fast-path stack-lock code from the interpreter and always passed | |
2960 // control to the "slow" operators in synchronizer.cpp. | |
2961 | |
2962 // RScratch contains the fetched obj->mark value from the failed CASN. | |
2963 #ifdef _LP64 | |
2964 sub (Rscratch, STACK_BIAS, Rscratch); | |
2965 #endif | |
2966 sub(Rscratch, SP, Rscratch); | |
2967 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); | |
2968 andcc (Rscratch, 0xfffff003, Rscratch); | |
2969 if (counters != NULL) { | |
2970 // Accounting needs the Rscratch register | |
2971 st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); | |
2972 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); | |
2973 br (Assembler::always, false, Assembler::pt, done) ; | |
2974 delayed()->nop() ; | |
2975 } else { | |
2976 br (Assembler::always, false, Assembler::pt, done) ; | |
2977 delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); | |
2978 } | |
2979 | |
2980 bind (IsInflated) ; | |
2981 if (EmitSync & 64) { | |
2982 // If m->owner != null goto IsLocked | |
2983 // Test-and-CAS vs CAS | |
2984 // Pessimistic form avoids futile (doomed) CAS attempts | |
2985 // The optimistic form avoids RTS->RTO cache line upgrades. | |
2986 ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ; | |
2987 andcc (Rscratch, Rscratch, G0) ; | |
2988 brx (Assembler::notZero, false, Assembler::pn, done) ; | |
2989 delayed()->nop() ; | |
2990 // m->owner == null : it's unlocked. | |
2991 } | |
2992 | |
2993 // Try to CAS m->owner from null to Self | |
2994 // Invariant: if we acquire the lock then _recursions should be 0. | |
2995 add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ; | |
2996 mov (G2_thread, Rscratch) ; | |
2997 casn (Rmark, G0, Rscratch) ; | |
2998 cmp (Rscratch, G0) ; | |
2999 // ST box->displaced_header = NonZero. | |
3000 // Any non-zero value suffices: | |
3001 // unused_mark(), G2_thread, RBox, RScratch, rsp, etc. | |
3002 st_ptr (Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); | |
3003 // Intentional fall-through into done | |
3004 } | |
3005 | |
3006 bind (done) ; | |
3007 } | |
3008 | |
3009 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch) { | |
3010 Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes()); | |
3011 | |
3012 Label done ; | |
3013 | |
3014 if (EmitSync & 4) { | |
3015 cmp (SP, G0) ; | |
3016 return ; | |
3017 } | |
3018 | |
3019 if (EmitSync & 8) { | |
3020 if (UseBiasedLocking) { | |
3021 biased_locking_exit(mark_addr, Rscratch, done); | |
3022 } | |
3023 | |
3024 // Test first if it is a fast recursive unlock | |
3025 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); | |
3026 cmp(Rmark, G0); | |
3027 brx(Assembler::equal, false, Assembler::pt, done); | |
3028 delayed()->nop(); | |
3029 | |
3030 // Check if it is still a light weight lock, this is is true if we see | |
3031 // the stack address of the basicLock in the markOop of the object | |
3032 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); | |
3033 casx_under_lock(mark_addr.base(), Rbox, Rmark, | |
3034 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); | |
3035 br (Assembler::always, false, Assembler::pt, done); | |
3036 delayed()->cmp(Rbox, Rmark); | |
3037 bind (done) ; | |
3038 return ; | |
3039 } | |
3040 | |
3041 // Beware ... If the aggregate size of the code emitted by CLO and CUO is | |
3042 // is too large performance rolls abruptly off a cliff. | |
3043 // This could be related to inlining policies, code cache management, or | |
3044 // I$ effects. | |
3045 Label LStacked ; | |
3046 | |
3047 if (UseBiasedLocking) { | |
3048 // TODO: eliminate redundant LDs of obj->mark | |
3049 biased_locking_exit(mark_addr, Rscratch, done); | |
3050 } | |
3051 | |
3052 ld_ptr (Roop, oopDesc::mark_offset_in_bytes(), Rmark) ; | |
3053 ld_ptr (Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); | |
3054 andcc (Rscratch, Rscratch, G0); | |
3055 brx (Assembler::zero, false, Assembler::pn, done); | |
3056 delayed()-> nop() ; // consider: relocate fetch of mark, above, into this DS | |
3057 andcc (Rmark, 2, G0) ; | |
3058 brx (Assembler::zero, false, Assembler::pt, LStacked) ; | |
3059 delayed()-> nop() ; | |
3060 | |
3061 // It's inflated | |
3062 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before | |
3063 // the ST of 0 into _owner which releases the lock. This prevents loads | |
3064 // and stores within the critical section from reordering (floating) | |
3065 // past the store that releases the lock. But TSO is a strong memory model | |
3066 // and that particular flavor of barrier is a noop, so we can safely elide it. | |
3067 // Note that we use 1-0 locking by default for the inflated case. We | |
3068 // close the resultant (and rare) race by having contented threads in | |
3069 // monitorenter periodically poll _owner. | |
3070 ld_ptr (Address(Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ; | |
3071 ld_ptr (Address(Rmark, 0, ObjectMonitor::recursions_offset_in_bytes()-2), Rbox) ; | |
3072 xor3 (Rscratch, G2_thread, Rscratch) ; | |
3073 orcc (Rbox, Rscratch, Rbox) ; | |
3074 brx (Assembler::notZero, false, Assembler::pn, done) ; | |
3075 delayed()-> | |
3076 ld_ptr (Address (Rmark, 0, ObjectMonitor::EntryList_offset_in_bytes()-2), Rscratch) ; | |
3077 ld_ptr (Address (Rmark, 0, ObjectMonitor::cxq_offset_in_bytes()-2), Rbox) ; | |
3078 orcc (Rbox, Rscratch, G0) ; | |
3079 if (EmitSync & 65536) { | |
3080 Label LSucc ; | |
3081 brx (Assembler::notZero, false, Assembler::pn, LSucc) ; | |
3082 delayed()->nop() ; | |
3083 br (Assembler::always, false, Assembler::pt, done) ; | |
3084 delayed()-> | |
3085 st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ; | |
3086 | |
3087 bind (LSucc) ; | |
3088 st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ; | |
3089 if (os::is_MP()) { membar (StoreLoad) ; } | |
3090 ld_ptr (Address (Rmark, 0, ObjectMonitor::succ_offset_in_bytes()-2), Rscratch) ; | |
3091 andcc (Rscratch, Rscratch, G0) ; | |
3092 brx (Assembler::notZero, false, Assembler::pt, done) ; | |
3093 delayed()-> andcc (G0, G0, G0) ; | |
3094 add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ; | |
3095 mov (G2_thread, Rscratch) ; | |
3096 casn (Rmark, G0, Rscratch) ; | |
3097 cmp (Rscratch, G0) ; | |
3098 // invert icc.zf and goto done | |
3099 brx (Assembler::notZero, false, Assembler::pt, done) ; | |
3100 delayed() -> cmp (G0, G0) ; | |
3101 br (Assembler::always, false, Assembler::pt, done); | |
3102 delayed() -> cmp (G0, 1) ; | |
3103 } else { | |
3104 brx (Assembler::notZero, false, Assembler::pn, done) ; | |
3105 delayed()->nop() ; | |
3106 br (Assembler::always, false, Assembler::pt, done) ; | |
3107 delayed()-> | |
3108 st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ; | |
3109 } | |
3110 | |
3111 bind (LStacked) ; | |
3112 // Consider: we could replace the expensive CAS in the exit | |
3113 // path with a simple ST of the displaced mark value fetched from | |
3114 // the on-stack basiclock box. That admits a race where a thread T2 | |
3115 // in the slow lock path -- inflating with monitor M -- could race a | |
3116 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2. | |
3117 // More precisely T1 in the stack-lock unlock path could "stomp" the | |
3118 // inflated mark value M installed by T2, resulting in an orphan | |
3119 // object monitor M and T2 becoming stranded. We can remedy that situation | |
3120 // by having T2 periodically poll the object's mark word using timed wait | |
3121 // operations. If T2 discovers that a stomp has occurred it vacates | |
3122 // the monitor M and wakes any other threads stranded on the now-orphan M. | |
3123 // In addition the monitor scavenger, which performs deflation, | |
3124 // would also need to check for orpan monitors and stranded threads. | |
3125 // | |
3126 // Finally, inflation is also used when T2 needs to assign a hashCode | |
3127 // to O and O is stack-locked by T1. The "stomp" race could cause | |
3128 // an assigned hashCode value to be lost. We can avoid that condition | |
3129 // and provide the necessary hashCode stability invariants by ensuring | |
3130 // that hashCode generation is idempotent between copying GCs. | |
3131 // For example we could compute the hashCode of an object O as | |
3132 // O's heap address XOR some high quality RNG value that is refreshed | |
3133 // at GC-time. The monitor scavenger would install the hashCode | |
3134 // found in any orphan monitors. Again, the mechanism admits a | |
3135 // lost-update "stomp" WAW race but detects and recovers as needed. | |
3136 // | |
3137 // A prototype implementation showed excellent results, although | |
3138 // the scavenger and timeout code was rather involved. | |
3139 | |
3140 casn (mark_addr.base(), Rbox, Rscratch) ; | |
3141 cmp (Rbox, Rscratch); | |
3142 // Intentional fall through into done ... | |
3143 | |
3144 bind (done) ; | |
3145 } | |
3146 | |
3147 | |
3148 | |
3149 void MacroAssembler::print_CPU_state() { | |
3150 // %%%%% need to implement this | |
3151 } | |
3152 | |
3153 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { | |
3154 // %%%%% need to implement this | |
3155 } | |
3156 | |
3157 void MacroAssembler::push_IU_state() { | |
3158 // %%%%% need to implement this | |
3159 } | |
3160 | |
3161 | |
3162 void MacroAssembler::pop_IU_state() { | |
3163 // %%%%% need to implement this | |
3164 } | |
3165 | |
3166 | |
3167 void MacroAssembler::push_FPU_state() { | |
3168 // %%%%% need to implement this | |
3169 } | |
3170 | |
3171 | |
3172 void MacroAssembler::pop_FPU_state() { | |
3173 // %%%%% need to implement this | |
3174 } | |
3175 | |
3176 | |
3177 void MacroAssembler::push_CPU_state() { | |
3178 // %%%%% need to implement this | |
3179 } | |
3180 | |
3181 | |
3182 void MacroAssembler::pop_CPU_state() { | |
3183 // %%%%% need to implement this | |
3184 } | |
3185 | |
3186 | |
3187 | |
3188 void MacroAssembler::verify_tlab() { | |
3189 #ifdef ASSERT | |
3190 if (UseTLAB && VerifyOops) { | |
3191 Label next, next2, ok; | |
3192 Register t1 = L0; | |
3193 Register t2 = L1; | |
3194 Register t3 = L2; | |
3195 | |
3196 save_frame(0); | |
3197 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); | |
3198 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); | |
3199 or3(t1, t2, t3); | |
3200 cmp(t1, t2); | |
3201 br(Assembler::greaterEqual, false, Assembler::pn, next); | |
3202 delayed()->nop(); | |
3203 stop("assert(top >= start)"); | |
3204 should_not_reach_here(); | |
3205 | |
3206 bind(next); | |
3207 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); | |
3208 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); | |
3209 or3(t3, t2, t3); | |
3210 cmp(t1, t2); | |
3211 br(Assembler::lessEqual, false, Assembler::pn, next2); | |
3212 delayed()->nop(); | |
3213 stop("assert(top <= end)"); | |
3214 should_not_reach_here(); | |
3215 | |
3216 bind(next2); | |
3217 and3(t3, MinObjAlignmentInBytesMask, t3); | |
3218 cmp(t3, 0); | |
3219 br(Assembler::lessEqual, false, Assembler::pn, ok); | |
3220 delayed()->nop(); | |
3221 stop("assert(aligned)"); | |
3222 should_not_reach_here(); | |
3223 | |
3224 bind(ok); | |
3225 restore(); | |
3226 } | |
3227 #endif | |
3228 } | |
3229 | |
3230 | |
3231 void MacroAssembler::eden_allocate( | |
3232 Register obj, // result: pointer to object after successful allocation | |
3233 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise | |
3234 int con_size_in_bytes, // object size in bytes if known at compile time | |
3235 Register t1, // temp register | |
3236 Register t2, // temp register | |
3237 Label& slow_case // continuation point if fast allocation fails | |
3238 ){ | |
3239 // make sure arguments make sense | |
3240 assert_different_registers(obj, var_size_in_bytes, t1, t2); | |
3241 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); | |
3242 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); | |
3243 | |
3244 // get eden boundaries | |
3245 // note: we need both top & top_addr! | |
3246 const Register top_addr = t1; | |
3247 const Register end = t2; | |
3248 | |
3249 CollectedHeap* ch = Universe::heap(); | |
3250 set((intx)ch->top_addr(), top_addr); | |
3251 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); | |
3252 ld_ptr(top_addr, delta, end); | |
3253 ld_ptr(top_addr, 0, obj); | |
3254 | |
3255 // try to allocate | |
3256 Label retry; | |
3257 bind(retry); | |
3258 #ifdef ASSERT | |
3259 // make sure eden top is properly aligned | |
3260 { | |
3261 Label L; | |
3262 btst(MinObjAlignmentInBytesMask, obj); | |
3263 br(Assembler::zero, false, Assembler::pt, L); | |
3264 delayed()->nop(); | |
3265 stop("eden top is not properly aligned"); | |
3266 bind(L); | |
3267 } | |
3268 #endif // ASSERT | |
3269 const Register free = end; | |
3270 sub(end, obj, free); // compute amount of free space | |
3271 if (var_size_in_bytes->is_valid()) { | |
3272 // size is unknown at compile time | |
3273 cmp(free, var_size_in_bytes); | |
3274 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case | |
3275 delayed()->add(obj, var_size_in_bytes, end); | |
3276 } else { | |
3277 // size is known at compile time | |
3278 cmp(free, con_size_in_bytes); | |
3279 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case | |
3280 delayed()->add(obj, con_size_in_bytes, end); | |
3281 } | |
3282 // Compare obj with the value at top_addr; if still equal, swap the value of | |
3283 // end with the value at top_addr. If not equal, read the value at top_addr | |
3284 // into end. | |
3285 casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); | |
3286 // if someone beat us on the allocation, try again, otherwise continue | |
3287 cmp(obj, end); | |
3288 brx(Assembler::notEqual, false, Assembler::pn, retry); | |
3289 delayed()->mov(end, obj); // nop if successfull since obj == end | |
3290 | |
3291 #ifdef ASSERT | |
3292 // make sure eden top is properly aligned | |
3293 { | |
3294 Label L; | |
3295 const Register top_addr = t1; | |
3296 | |
3297 set((intx)ch->top_addr(), top_addr); | |
3298 ld_ptr(top_addr, 0, top_addr); | |
3299 btst(MinObjAlignmentInBytesMask, top_addr); | |
3300 br(Assembler::zero, false, Assembler::pt, L); | |
3301 delayed()->nop(); | |
3302 stop("eden top is not properly aligned"); | |
3303 bind(L); | |
3304 } | |
3305 #endif // ASSERT | |
3306 } | |
3307 | |
3308 | |
3309 void MacroAssembler::tlab_allocate( | |
3310 Register obj, // result: pointer to object after successful allocation | |
3311 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise | |
3312 int con_size_in_bytes, // object size in bytes if known at compile time | |
3313 Register t1, // temp register | |
3314 Label& slow_case // continuation point if fast allocation fails | |
3315 ){ | |
3316 // make sure arguments make sense | |
3317 assert_different_registers(obj, var_size_in_bytes, t1); | |
3318 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); | |
3319 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); | |
3320 | |
3321 const Register free = t1; | |
3322 | |
3323 verify_tlab(); | |
3324 | |
3325 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); | |
3326 | |
3327 // calculate amount of free space | |
3328 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); | |
3329 sub(free, obj, free); | |
3330 | |
3331 Label done; | |
3332 if (var_size_in_bytes == noreg) { | |
3333 cmp(free, con_size_in_bytes); | |
3334 } else { | |
3335 cmp(free, var_size_in_bytes); | |
3336 } | |
3337 br(Assembler::less, false, Assembler::pn, slow_case); | |
3338 // calculate the new top pointer | |
3339 if (var_size_in_bytes == noreg) { | |
3340 delayed()->add(obj, con_size_in_bytes, free); | |
3341 } else { | |
3342 delayed()->add(obj, var_size_in_bytes, free); | |
3343 } | |
3344 | |
3345 bind(done); | |
3346 | |
3347 #ifdef ASSERT | |
3348 // make sure new free pointer is properly aligned | |
3349 { | |
3350 Label L; | |
3351 btst(MinObjAlignmentInBytesMask, free); | |
3352 br(Assembler::zero, false, Assembler::pt, L); | |
3353 delayed()->nop(); | |
3354 stop("updated TLAB free is not properly aligned"); | |
3355 bind(L); | |
3356 } | |
3357 #endif // ASSERT | |
3358 | |
3359 // update the tlab top pointer | |
3360 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); | |
3361 verify_tlab(); | |
3362 } | |
3363 | |
3364 | |
3365 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) { | |
3366 Register top = O0; | |
3367 Register t1 = G1; | |
3368 Register t2 = G3; | |
3369 Register t3 = O1; | |
3370 assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */); | |
3371 Label do_refill, discard_tlab; | |
3372 | |
3373 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { | |
3374 // No allocation in the shared eden. | |
3375 br(Assembler::always, false, Assembler::pt, slow_case); | |
3376 delayed()->nop(); | |
3377 } | |
3378 | |
3379 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); | |
3380 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1); | |
3381 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2); | |
3382 | |
3383 // calculate amount of free space | |
3384 sub(t1, top, t1); | |
3385 srl_ptr(t1, LogHeapWordSize, t1); | |
3386 | |
3387 // Retain tlab and allocate object in shared space if | |
3388 // the amount free in the tlab is too large to discard. | |
3389 cmp(t1, t2); | |
3390 brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab); | |
3391 | |
3392 // increment waste limit to prevent getting stuck on this slow path | |
3393 delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2); | |
3394 st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); | |
3395 if (TLABStats) { | |
3396 // increment number of slow_allocations | |
3397 ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2); | |
3398 add(t2, 1, t2); | |
3399 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset())); | |
3400 } | |
3401 br(Assembler::always, false, Assembler::pt, try_eden); | |
3402 delayed()->nop(); | |
3403 | |
3404 bind(discard_tlab); | |
3405 if (TLABStats) { | |
3406 // increment number of refills | |
3407 ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2); | |
3408 add(t2, 1, t2); | |
3409 stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset())); | |
3410 // accumulate wastage | |
3411 ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2); | |
3412 add(t2, t1, t2); | |
3413 stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); | |
3414 } | |
3415 | |
3416 // if tlab is currently allocated (top or end != null) then | |
3417 // fill [top, end + alignment_reserve) with array object | |
3418 br_null(top, false, Assembler::pn, do_refill); | |
3419 delayed()->nop(); | |
3420 | |
3421 set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2); | |
3422 st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word | |
3423 // set klass to intArrayKlass | |
3424 sub(t1, typeArrayOopDesc::header_size(T_INT), t1); | |
3425 add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1); | |
3426 sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1); | |
3427 st(t1, top, arrayOopDesc::length_offset_in_bytes()); | |
167
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
164
diff
changeset
|
3428 set((intptr_t)Universe::intArrayKlassObj_addr(), t2); |
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
164
diff
changeset
|
3429 ld_ptr(t2, 0, t2); |
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
164
diff
changeset
|
3430 // store klass last. concurrent gcs assumes klass length is valid if |
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
164
diff
changeset
|
3431 // klass field is not null. |
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
164
diff
changeset
|
3432 store_klass(t2, top); |
0 | 3433 verify_oop(top); |
3434 | |
3435 // refill the tlab with an eden allocation | |
3436 bind(do_refill); | |
3437 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1); | |
3438 sll_ptr(t1, LogHeapWordSize, t1); | |
3439 // add object_size ?? | |
3440 eden_allocate(top, t1, 0, t2, t3, slow_case); | |
3441 | |
3442 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset())); | |
3443 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset())); | |
3444 #ifdef ASSERT | |
3445 // check that tlab_size (t1) is still valid | |
3446 { | |
3447 Label ok; | |
3448 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); | |
3449 sll_ptr(t2, LogHeapWordSize, t2); | |
3450 cmp(t1, t2); | |
3451 br(Assembler::equal, false, Assembler::pt, ok); | |
3452 delayed()->nop(); | |
3453 stop("assert(t1 == tlab_size)"); | |
3454 should_not_reach_here(); | |
3455 | |
3456 bind(ok); | |
3457 } | |
3458 #endif // ASSERT | |
3459 add(top, t1, top); // t1 is tlab_size | |
3460 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); | |
3461 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); | |
3462 verify_tlab(); | |
3463 br(Assembler::always, false, Assembler::pt, retry); | |
3464 delayed()->nop(); | |
3465 } | |
3466 | |
3467 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { | |
3468 switch (cond) { | |
3469 // Note some conditions are synonyms for others | |
3470 case Assembler::never: return Assembler::always; | |
3471 case Assembler::zero: return Assembler::notZero; | |
3472 case Assembler::lessEqual: return Assembler::greater; | |
3473 case Assembler::less: return Assembler::greaterEqual; | |
3474 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; | |
3475 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; | |
3476 case Assembler::negative: return Assembler::positive; | |
3477 case Assembler::overflowSet: return Assembler::overflowClear; | |
3478 case Assembler::always: return Assembler::never; | |
3479 case Assembler::notZero: return Assembler::zero; | |
3480 case Assembler::greater: return Assembler::lessEqual; | |
3481 case Assembler::greaterEqual: return Assembler::less; | |
3482 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; | |
3483 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; | |
3484 case Assembler::positive: return Assembler::negative; | |
3485 case Assembler::overflowClear: return Assembler::overflowSet; | |
3486 } | |
3487 | |
3488 ShouldNotReachHere(); return Assembler::overflowClear; | |
3489 } | |
3490 | |
3491 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, | |
3492 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { | |
3493 Condition negated_cond = negate_condition(cond); | |
3494 Label L; | |
3495 brx(negated_cond, false, Assembler::pt, L); | |
3496 delayed()->nop(); | |
3497 inc_counter(counter_ptr, Rtmp1, Rtmp2); | |
3498 bind(L); | |
3499 } | |
3500 | |
3501 void MacroAssembler::inc_counter(address counter_ptr, Register Rtmp1, Register Rtmp2) { | |
3502 Address counter_addr(Rtmp1, counter_ptr); | |
3503 load_contents(counter_addr, Rtmp2); | |
3504 inc(Rtmp2); | |
3505 store_contents(Rtmp2, counter_addr); | |
3506 } | |
3507 | |
3508 SkipIfEqual::SkipIfEqual( | |
3509 MacroAssembler* masm, Register temp, const bool* flag_addr, | |
3510 Assembler::Condition condition) { | |
3511 _masm = masm; | |
3512 Address flag(temp, (address)flag_addr, relocInfo::none); | |
3513 _masm->sethi(flag); | |
3514 _masm->ldub(flag, temp); | |
3515 _masm->tst(temp); | |
3516 _masm->br(condition, false, Assembler::pt, _label); | |
3517 _masm->delayed()->nop(); | |
3518 } | |
3519 | |
3520 SkipIfEqual::~SkipIfEqual() { | |
3521 _masm->bind(_label); | |
3522 } | |
3523 | |
3524 | |
3525 // Writes to stack successive pages until offset reached to check for | |
3526 // stack overflow + shadow pages. This clobbers tsp and scratch. | |
3527 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, | |
3528 Register Rscratch) { | |
3529 // Use stack pointer in temp stack pointer | |
3530 mov(SP, Rtsp); | |
3531 | |
3532 // Bang stack for total size given plus stack shadow page size. | |
3533 // Bang one page at a time because a large size can overflow yellow and | |
3534 // red zones (the bang will fail but stack overflow handling can't tell that | |
3535 // it was a stack overflow bang vs a regular segv). | |
3536 int offset = os::vm_page_size(); | |
3537 Register Roffset = Rscratch; | |
3538 | |
3539 Label loop; | |
3540 bind(loop); | |
3541 set((-offset)+STACK_BIAS, Rscratch); | |
3542 st(G0, Rtsp, Rscratch); | |
3543 set(offset, Roffset); | |
3544 sub(Rsize, Roffset, Rsize); | |
3545 cmp(Rsize, G0); | |
3546 br(Assembler::greater, false, Assembler::pn, loop); | |
3547 delayed()->sub(Rtsp, Roffset, Rtsp); | |
3548 | |
3549 // Bang down shadow pages too. | |
3550 // The -1 because we already subtracted 1 page. | |
3551 for (int i = 0; i< StackShadowPages-1; i++) { | |
3552 set((-i*offset)+STACK_BIAS, Rscratch); | |
3553 st(G0, Rtsp, Rscratch); | |
3554 } | |
3555 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3556 |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
3557 void MacroAssembler::load_klass(Register src_oop, Register klass) { |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3558 // The number of bytes in this code is used by |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3559 // MachCallDynamicJavaNode::ret_addr_offset() |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3560 // if this changes, change that. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3561 if (UseCompressedOops) { |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
3562 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
3563 decode_heap_oop_not_null(klass); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3564 } else { |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
3565 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3566 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3567 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3568 |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
3569 void MacroAssembler::store_klass(Register klass, Register dst_oop) { |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3570 if (UseCompressedOops) { |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
3571 assert(dst_oop != klass, "not enough registers"); |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
3572 encode_heap_oop_not_null(klass); |
167
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
164
diff
changeset
|
3573 st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3574 } else { |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
124
diff
changeset
|
3575 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3576 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3577 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3578 |
167
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
164
diff
changeset
|
3579 void MacroAssembler::store_klass_gap(Register s, Register d) { |
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
164
diff
changeset
|
3580 if (UseCompressedOops) { |
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
164
diff
changeset
|
3581 assert(s != d, "not enough registers"); |
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
164
diff
changeset
|
3582 st(s, d, oopDesc::klass_gap_offset_in_bytes()); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3583 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3584 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3585 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3586 void MacroAssembler::load_heap_oop(const Address& s, Register d, int offset) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3587 if (UseCompressedOops) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3588 lduw(s, d, offset); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3589 decode_heap_oop(d); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3590 } else { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3591 ld_ptr(s, d, offset); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3592 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3593 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3594 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3595 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3596 if (UseCompressedOops) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3597 lduw(s1, s2, d); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3598 decode_heap_oop(d, d); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3599 } else { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3600 ld_ptr(s1, s2, d); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3601 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3602 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3603 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3604 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3605 if (UseCompressedOops) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3606 lduw(s1, simm13a, d); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3607 decode_heap_oop(d, d); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3608 } else { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3609 ld_ptr(s1, simm13a, d); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3610 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3611 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3612 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3613 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3614 if (UseCompressedOops) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3615 assert(s1 != d && s2 != d, "not enough registers"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3616 encode_heap_oop(d); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3617 st(d, s1, s2); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3618 } else { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3619 st_ptr(d, s1, s2); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3620 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3621 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3622 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3623 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3624 if (UseCompressedOops) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3625 assert(s1 != d, "not enough registers"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3626 encode_heap_oop(d); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3627 st(d, s1, simm13a); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3628 } else { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3629 st_ptr(d, s1, simm13a); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3630 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3631 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3632 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3633 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3634 if (UseCompressedOops) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3635 assert(a.base() != d, "not enough registers"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3636 encode_heap_oop(d); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3637 st(d, a, offset); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3638 } else { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3639 st_ptr(d, a, offset); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3640 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3641 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3642 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3643 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3644 void MacroAssembler::encode_heap_oop(Register src, Register dst) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3645 assert (UseCompressedOops, "must be compressed"); |
178
6d172e3548cb
6695819: verify_oopx rax: broken oop in decode_heap_oop
coleenp
parents:
124
diff
changeset
|
3646 verify_oop(src); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3647 Label done; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3648 if (src == dst) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3649 // optimize for frequent case src == dst |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3650 bpr(rc_nz, true, Assembler::pt, src, done); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3651 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3652 bind(done); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3653 srlx(src, LogMinObjAlignmentInBytes, dst); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3654 } else { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3655 bpr(rc_z, false, Assembler::pn, src, done); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3656 delayed() -> mov(G0, dst); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3657 // could be moved before branch, and annulate delay, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3658 // but may add some unneeded work decoding null |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3659 sub(src, G6_heapbase, dst); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3660 srlx(dst, LogMinObjAlignmentInBytes, dst); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3661 bind(done); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3662 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3663 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3664 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3665 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3666 void MacroAssembler::encode_heap_oop_not_null(Register r) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3667 assert (UseCompressedOops, "must be compressed"); |
178
6d172e3548cb
6695819: verify_oopx rax: broken oop in decode_heap_oop
coleenp
parents:
124
diff
changeset
|
3668 verify_oop(r); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3669 sub(r, G6_heapbase, r); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3670 srlx(r, LogMinObjAlignmentInBytes, r); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3671 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3672 |
124
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
3673 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
3674 assert (UseCompressedOops, "must be compressed"); |
178
6d172e3548cb
6695819: verify_oopx rax: broken oop in decode_heap_oop
coleenp
parents:
124
diff
changeset
|
3675 verify_oop(src); |
124
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
3676 sub(src, G6_heapbase, dst); |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
3677 srlx(dst, LogMinObjAlignmentInBytes, dst); |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
3678 } |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
3679 |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3680 // Same algorithm as oops.inline.hpp decode_heap_oop. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3681 void MacroAssembler::decode_heap_oop(Register src, Register dst) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3682 assert (UseCompressedOops, "must be compressed"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3683 Label done; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3684 sllx(src, LogMinObjAlignmentInBytes, dst); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3685 bpr(rc_nz, true, Assembler::pt, dst, done); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3686 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3687 bind(done); |
178
6d172e3548cb
6695819: verify_oopx rax: broken oop in decode_heap_oop
coleenp
parents:
124
diff
changeset
|
3688 verify_oop(dst); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3689 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3690 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3691 void MacroAssembler::decode_heap_oop_not_null(Register r) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3692 // Do not add assert code to this unless you change vtableStubs_sparc.cpp |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3693 // pd_code_size_limit. |
178
6d172e3548cb
6695819: verify_oopx rax: broken oop in decode_heap_oop
coleenp
parents:
124
diff
changeset
|
3694 // Also do not verify_oop as this is called by verify_oop. |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3695 assert (UseCompressedOops, "must be compressed"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3696 sllx(r, LogMinObjAlignmentInBytes, r); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3697 add(r, G6_heapbase, r); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3698 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3699 |
124
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
3700 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
3701 // Do not add assert code to this unless you change vtableStubs_sparc.cpp |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
3702 // pd_code_size_limit. |
178
6d172e3548cb
6695819: verify_oopx rax: broken oop in decode_heap_oop
coleenp
parents:
124
diff
changeset
|
3703 // Also do not verify_oop as this is called by verify_oop. |
124
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
3704 assert (UseCompressedOops, "must be compressed"); |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
3705 sllx(src, LogMinObjAlignmentInBytes, dst); |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
3706 add(dst, G6_heapbase, dst); |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
3707 } |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
3708 |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3709 void MacroAssembler::reinit_heapbase() { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3710 if (UseCompressedOops) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3711 // call indirectly to solve generation ordering problem |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3712 Address base(G6_heapbase, (address)Universe::heap_base_addr()); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3713 load_ptr_contents(base, G6_heapbase); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3714 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3715 } |