0
|
1 /*
|
|
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 #include "incls/_precompiled.incl"
|
|
26 #include "incls/_assembler_sparc.cpp.incl"
|
|
27
|
|
28 // Implementation of Address
|
|
29
|
|
30 Address::Address( addr_type t, int which ) {
|
|
31 switch (t) {
|
|
32 case extra_in_argument:
|
|
33 case extra_out_argument:
|
|
34 _base = t == extra_in_argument ? FP : SP;
|
|
35 _hi = 0;
|
|
36 // Warning: In LP64 mode, _disp will occupy more than 10 bits.
|
|
37 // This is inconsistent with the other constructors but op
|
|
38 // codes such as ld or ldx, only access disp() to get their
|
|
39 // simm13 argument.
|
|
40 _disp = ((which - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
|
|
41 break;
|
|
42 default:
|
|
43 ShouldNotReachHere();
|
|
44 break;
|
|
45 }
|
|
46 }
|
|
47
|
|
48 static const char* argumentNames[][2] = {
|
|
49 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"},
|
|
50 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"},
|
|
51 {"A(n>9)","P(n>9)"}
|
|
52 };
|
|
53
|
|
54 const char* Argument::name() const {
|
|
55 int nofArgs = sizeof argumentNames / sizeof argumentNames[0];
|
|
56 int num = number();
|
|
57 if (num >= nofArgs) num = nofArgs - 1;
|
|
58 return argumentNames[num][is_in() ? 1 : 0];
|
|
59 }
|
|
60
|
|
61 void Assembler::print_instruction(int inst) {
|
|
62 const char* s;
|
|
63 switch (inv_op(inst)) {
|
|
64 default: s = "????"; break;
|
|
65 case call_op: s = "call"; break;
|
|
66 case branch_op:
|
|
67 switch (inv_op2(inst)) {
|
|
68 case bpr_op2: s = "bpr"; break;
|
|
69 case fb_op2: s = "fb"; break;
|
|
70 case fbp_op2: s = "fbp"; break;
|
|
71 case br_op2: s = "br"; break;
|
|
72 case bp_op2: s = "bp"; break;
|
|
73 case cb_op2: s = "cb"; break;
|
|
74 default: s = "????"; break;
|
|
75 }
|
|
76 }
|
|
77 ::tty->print("%s", s);
|
|
78 }
|
|
79
|
|
80
|
|
81 // Patch instruction inst at offset inst_pos to refer to dest_pos
|
|
82 // and return the resulting instruction.
|
|
83 // We should have pcs, not offsets, but since all is relative, it will work out
|
|
84 // OK.
|
|
85 int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) {
|
|
86
|
|
87 int m; // mask for displacement field
|
|
88 int v; // new value for displacement field
|
|
89 const int word_aligned_ones = -4;
|
|
90 switch (inv_op(inst)) {
|
|
91 default: ShouldNotReachHere();
|
|
92 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break;
|
|
93 case branch_op:
|
|
94 switch (inv_op2(inst)) {
|
|
95 case bpr_op2: m = wdisp16(word_aligned_ones, 0); v = wdisp16(dest_pos, inst_pos); break;
|
|
96 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
|
|
97 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
|
|
98 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
|
|
99 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
|
|
100 case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
|
|
101 default: ShouldNotReachHere();
|
|
102 }
|
|
103 }
|
|
104 return inst & ~m | v;
|
|
105 }
|
|
106
|
|
107 // Return the offset of the branch destionation of instruction inst
|
|
108 // at offset pos.
|
|
109 // Should have pcs, but since all is relative, it works out.
|
|
110 int Assembler::branch_destination(int inst, int pos) {
|
|
111 int r;
|
|
112 switch (inv_op(inst)) {
|
|
113 default: ShouldNotReachHere();
|
|
114 case call_op: r = inv_wdisp(inst, pos, 30); break;
|
|
115 case branch_op:
|
|
116 switch (inv_op2(inst)) {
|
|
117 case bpr_op2: r = inv_wdisp16(inst, pos); break;
|
|
118 case fbp_op2: r = inv_wdisp( inst, pos, 19); break;
|
|
119 case bp_op2: r = inv_wdisp( inst, pos, 19); break;
|
|
120 case fb_op2: r = inv_wdisp( inst, pos, 22); break;
|
|
121 case br_op2: r = inv_wdisp( inst, pos, 22); break;
|
|
122 case cb_op2: r = inv_wdisp( inst, pos, 22); break;
|
|
123 default: ShouldNotReachHere();
|
|
124 }
|
|
125 }
|
|
126 return r;
|
|
127 }
|
|
128
|
|
129 int AbstractAssembler::code_fill_byte() {
|
|
130 return 0x00; // illegal instruction 0x00000000
|
|
131 }
|
|
132
|
|
133 // Generate a bunch 'o stuff (including v9's
|
|
134 #ifndef PRODUCT
|
|
135 void Assembler::test_v9() {
|
|
136 add( G0, G1, G2 );
|
|
137 add( G3, 0, G4 );
|
|
138
|
|
139 addcc( G5, G6, G7 );
|
|
140 addcc( I0, 1, I1 );
|
|
141 addc( I2, I3, I4 );
|
|
142 addc( I5, -1, I6 );
|
|
143 addccc( I7, L0, L1 );
|
|
144 addccc( L2, (1 << 12) - 2, L3 );
|
|
145
|
|
146 Label lbl1, lbl2, lbl3;
|
|
147
|
|
148 bind(lbl1);
|
|
149
|
|
150 bpr( rc_z, true, pn, L4, pc(), relocInfo::oop_type );
|
|
151 delayed()->nop();
|
|
152 bpr( rc_lez, false, pt, L5, lbl1);
|
|
153 delayed()->nop();
|
|
154
|
|
155 fb( f_never, true, pc() + 4, relocInfo::none);
|
|
156 delayed()->nop();
|
|
157 fb( f_notEqual, false, lbl2 );
|
|
158 delayed()->nop();
|
|
159
|
|
160 fbp( f_notZero, true, fcc0, pn, pc() - 4, relocInfo::none);
|
|
161 delayed()->nop();
|
|
162 fbp( f_lessOrGreater, false, fcc1, pt, lbl3 );
|
|
163 delayed()->nop();
|
|
164
|
|
165 br( equal, true, pc() + 1024, relocInfo::none);
|
|
166 delayed()->nop();
|
|
167 br( lessEqual, false, lbl1 );
|
|
168 delayed()->nop();
|
|
169 br( never, false, lbl1 );
|
|
170 delayed()->nop();
|
|
171
|
|
172 bp( less, true, icc, pn, pc(), relocInfo::none);
|
|
173 delayed()->nop();
|
|
174 bp( lessEqualUnsigned, false, xcc, pt, lbl2 );
|
|
175 delayed()->nop();
|
|
176
|
|
177 call( pc(), relocInfo::none);
|
|
178 delayed()->nop();
|
|
179 call( lbl3 );
|
|
180 delayed()->nop();
|
|
181
|
|
182
|
|
183 casa( L6, L7, O0 );
|
|
184 casxa( O1, O2, O3, 0 );
|
|
185
|
|
186 udiv( O4, O5, O7 );
|
|
187 udiv( G0, (1 << 12) - 1, G1 );
|
|
188 sdiv( G1, G2, G3 );
|
|
189 sdiv( G4, -((1 << 12) - 1), G5 );
|
|
190 udivcc( G6, G7, I0 );
|
|
191 udivcc( I1, -((1 << 12) - 2), I2 );
|
|
192 sdivcc( I3, I4, I5 );
|
|
193 sdivcc( I6, -((1 << 12) - 0), I7 );
|
|
194
|
|
195 done();
|
|
196 retry();
|
|
197
|
|
198 fadd( FloatRegisterImpl::S, F0, F1, F2 );
|
|
199 fsub( FloatRegisterImpl::D, F34, F0, F62 );
|
|
200
|
|
201 fcmp( FloatRegisterImpl::Q, fcc0, F0, F60);
|
|
202 fcmpe( FloatRegisterImpl::S, fcc1, F31, F30);
|
|
203
|
|
204 ftox( FloatRegisterImpl::D, F2, F4 );
|
|
205 ftoi( FloatRegisterImpl::Q, F4, F8 );
|
|
206
|
|
207 ftof( FloatRegisterImpl::S, FloatRegisterImpl::Q, F3, F12 );
|
|
208
|
|
209 fxtof( FloatRegisterImpl::S, F4, F5 );
|
|
210 fitof( FloatRegisterImpl::D, F6, F8 );
|
|
211
|
|
212 fmov( FloatRegisterImpl::Q, F16, F20 );
|
|
213 fneg( FloatRegisterImpl::S, F6, F7 );
|
|
214 fabs( FloatRegisterImpl::D, F10, F12 );
|
|
215
|
|
216 fmul( FloatRegisterImpl::Q, F24, F28, F32 );
|
|
217 fmul( FloatRegisterImpl::S, FloatRegisterImpl::D, F8, F9, F14 );
|
|
218 fdiv( FloatRegisterImpl::S, F10, F11, F12 );
|
|
219
|
|
220 fsqrt( FloatRegisterImpl::S, F13, F14 );
|
|
221
|
|
222 flush( L0, L1 );
|
|
223 flush( L2, -1 );
|
|
224
|
|
225 flushw();
|
|
226
|
|
227 illtrap( (1 << 22) - 2);
|
|
228
|
|
229 impdep1( 17, (1 << 19) - 1 );
|
|
230 impdep2( 3, 0 );
|
|
231
|
|
232 jmpl( L3, L4, L5 );
|
|
233 delayed()->nop();
|
|
234 jmpl( L6, -1, L7, Relocation::spec_simple(relocInfo::none));
|
|
235 delayed()->nop();
|
|
236
|
|
237
|
|
238 ldf( FloatRegisterImpl::S, O0, O1, F15 );
|
|
239 ldf( FloatRegisterImpl::D, O2, -1, F14 );
|
|
240
|
|
241
|
|
242 ldfsr( O3, O4 );
|
|
243 ldfsr( O5, -1 );
|
|
244 ldxfsr( O6, O7 );
|
|
245 ldxfsr( I0, -1 );
|
|
246
|
|
247 ldfa( FloatRegisterImpl::D, I1, I2, 1, F16 );
|
|
248 ldfa( FloatRegisterImpl::Q, I3, -1, F36 );
|
|
249
|
|
250 ldsb( I4, I5, I6 );
|
|
251 ldsb( I7, -1, G0 );
|
|
252 ldsh( G1, G3, G4 );
|
|
253 ldsh( G5, -1, G6 );
|
|
254 ldsw( G7, L0, L1 );
|
|
255 ldsw( L2, -1, L3 );
|
|
256 ldub( L4, L5, L6 );
|
|
257 ldub( L7, -1, O0 );
|
|
258 lduh( O1, O2, O3 );
|
|
259 lduh( O4, -1, O5 );
|
|
260 lduw( O6, O7, G0 );
|
|
261 lduw( G1, -1, G2 );
|
|
262 ldx( G3, G4, G5 );
|
|
263 ldx( G6, -1, G7 );
|
|
264 ldd( I0, I1, I2 );
|
|
265 ldd( I3, -1, I4 );
|
|
266
|
|
267 ldsba( I5, I6, 2, I7 );
|
|
268 ldsba( L0, -1, L1 );
|
|
269 ldsha( L2, L3, 3, L4 );
|
|
270 ldsha( L5, -1, L6 );
|
|
271 ldswa( L7, O0, (1 << 8) - 1, O1 );
|
|
272 ldswa( O2, -1, O3 );
|
|
273 lduba( O4, O5, 0, O6 );
|
|
274 lduba( O7, -1, I0 );
|
|
275 lduha( I1, I2, 1, I3 );
|
|
276 lduha( I4, -1, I5 );
|
|
277 lduwa( I6, I7, 2, L0 );
|
|
278 lduwa( L1, -1, L2 );
|
|
279 ldxa( L3, L4, 3, L5 );
|
|
280 ldxa( L6, -1, L7 );
|
|
281 ldda( G0, G1, 4, G2 );
|
|
282 ldda( G3, -1, G4 );
|
|
283
|
|
284 ldstub( G5, G6, G7 );
|
|
285 ldstub( O0, -1, O1 );
|
|
286
|
|
287 ldstuba( O2, O3, 5, O4 );
|
|
288 ldstuba( O5, -1, O6 );
|
|
289
|
|
290 and3( I0, L0, O0 );
|
|
291 and3( G7, -1, O7 );
|
|
292 andcc( L2, I2, G2 );
|
|
293 andcc( L4, -1, G4 );
|
|
294 andn( I5, I6, I7 );
|
|
295 andn( I6, -1, I7 );
|
|
296 andncc( I5, I6, I7 );
|
|
297 andncc( I7, -1, I6 );
|
|
298 or3( I5, I6, I7 );
|
|
299 or3( I7, -1, I6 );
|
|
300 orcc( I5, I6, I7 );
|
|
301 orcc( I7, -1, I6 );
|
|
302 orn( I5, I6, I7 );
|
|
303 orn( I7, -1, I6 );
|
|
304 orncc( I5, I6, I7 );
|
|
305 orncc( I7, -1, I6 );
|
|
306 xor3( I5, I6, I7 );
|
|
307 xor3( I7, -1, I6 );
|
|
308 xorcc( I5, I6, I7 );
|
|
309 xorcc( I7, -1, I6 );
|
|
310 xnor( I5, I6, I7 );
|
|
311 xnor( I7, -1, I6 );
|
|
312 xnorcc( I5, I6, I7 );
|
|
313 xnorcc( I7, -1, I6 );
|
|
314
|
|
315 membar( Membar_mask_bits(StoreStore | LoadStore | StoreLoad | LoadLoad | Sync | MemIssue | Lookaside ) );
|
|
316 membar( StoreStore );
|
|
317 membar( LoadStore );
|
|
318 membar( StoreLoad );
|
|
319 membar( LoadLoad );
|
|
320 membar( Sync );
|
|
321 membar( MemIssue );
|
|
322 membar( Lookaside );
|
|
323
|
|
324 fmov( FloatRegisterImpl::S, f_ordered, true, fcc2, F16, F17 );
|
|
325 fmov( FloatRegisterImpl::D, rc_lz, L5, F18, F20 );
|
|
326
|
|
327 movcc( overflowClear, false, icc, I6, L4 );
|
|
328 movcc( f_unorderedOrEqual, true, fcc2, (1 << 10) - 1, O0 );
|
|
329
|
|
330 movr( rc_nz, I5, I6, I7 );
|
|
331 movr( rc_gz, L1, -1, L2 );
|
|
332
|
|
333 mulx( I5, I6, I7 );
|
|
334 mulx( I7, -1, I6 );
|
|
335 sdivx( I5, I6, I7 );
|
|
336 sdivx( I7, -1, I6 );
|
|
337 udivx( I5, I6, I7 );
|
|
338 udivx( I7, -1, I6 );
|
|
339
|
|
340 umul( I5, I6, I7 );
|
|
341 umul( I7, -1, I6 );
|
|
342 smul( I5, I6, I7 );
|
|
343 smul( I7, -1, I6 );
|
|
344 umulcc( I5, I6, I7 );
|
|
345 umulcc( I7, -1, I6 );
|
|
346 smulcc( I5, I6, I7 );
|
|
347 smulcc( I7, -1, I6 );
|
|
348
|
|
349 mulscc( I5, I6, I7 );
|
|
350 mulscc( I7, -1, I6 );
|
|
351
|
|
352 nop();
|
|
353
|
|
354
|
|
355 popc( G0, G1);
|
|
356 popc( -1, G2);
|
|
357
|
|
358 prefetch( L1, L2, severalReads );
|
|
359 prefetch( L3, -1, oneRead );
|
|
360 prefetcha( O3, O2, 6, severalWritesAndPossiblyReads );
|
|
361 prefetcha( G2, -1, oneWrite );
|
|
362
|
|
363 rett( I7, I7);
|
|
364 delayed()->nop();
|
|
365 rett( G0, -1, relocInfo::none);
|
|
366 delayed()->nop();
|
|
367
|
|
368 save( I5, I6, I7 );
|
|
369 save( I7, -1, I6 );
|
|
370 restore( I5, I6, I7 );
|
|
371 restore( I7, -1, I6 );
|
|
372
|
|
373 saved();
|
|
374 restored();
|
|
375
|
|
376 sethi( 0xaaaaaaaa, I3, Relocation::spec_simple(relocInfo::none));
|
|
377
|
|
378 sll( I5, I6, I7 );
|
|
379 sll( I7, 31, I6 );
|
|
380 srl( I5, I6, I7 );
|
|
381 srl( I7, 0, I6 );
|
|
382 sra( I5, I6, I7 );
|
|
383 sra( I7, 30, I6 );
|
|
384 sllx( I5, I6, I7 );
|
|
385 sllx( I7, 63, I6 );
|
|
386 srlx( I5, I6, I7 );
|
|
387 srlx( I7, 0, I6 );
|
|
388 srax( I5, I6, I7 );
|
|
389 srax( I7, 62, I6 );
|
|
390
|
|
391 sir( -1 );
|
|
392
|
|
393 stbar();
|
|
394
|
|
395 stf( FloatRegisterImpl::Q, F40, G0, I7 );
|
|
396 stf( FloatRegisterImpl::S, F18, I3, -1 );
|
|
397
|
|
398 stfsr( L1, L2 );
|
|
399 stfsr( I7, -1 );
|
|
400 stxfsr( I6, I5 );
|
|
401 stxfsr( L4, -1 );
|
|
402
|
|
403 stfa( FloatRegisterImpl::D, F22, I6, I7, 7 );
|
|
404 stfa( FloatRegisterImpl::Q, F44, G0, -1 );
|
|
405
|
|
406 stb( L5, O2, I7 );
|
|
407 stb( I7, I6, -1 );
|
|
408 sth( L5, O2, I7 );
|
|
409 sth( I7, I6, -1 );
|
|
410 stw( L5, O2, I7 );
|
|
411 stw( I7, I6, -1 );
|
|
412 stx( L5, O2, I7 );
|
|
413 stx( I7, I6, -1 );
|
|
414 std( L5, O2, I7 );
|
|
415 std( I7, I6, -1 );
|
|
416
|
|
417 stba( L5, O2, I7, 8 );
|
|
418 stba( I7, I6, -1 );
|
|
419 stha( L5, O2, I7, 9 );
|
|
420 stha( I7, I6, -1 );
|
|
421 stwa( L5, O2, I7, 0 );
|
|
422 stwa( I7, I6, -1 );
|
|
423 stxa( L5, O2, I7, 11 );
|
|
424 stxa( I7, I6, -1 );
|
|
425 stda( L5, O2, I7, 12 );
|
|
426 stda( I7, I6, -1 );
|
|
427
|
|
428 sub( I5, I6, I7 );
|
|
429 sub( I7, -1, I6 );
|
|
430 subcc( I5, I6, I7 );
|
|
431 subcc( I7, -1, I6 );
|
|
432 subc( I5, I6, I7 );
|
|
433 subc( I7, -1, I6 );
|
|
434 subccc( I5, I6, I7 );
|
|
435 subccc( I7, -1, I6 );
|
|
436
|
|
437 swap( I5, I6, I7 );
|
|
438 swap( I7, -1, I6 );
|
|
439
|
|
440 swapa( G0, G1, 13, G2 );
|
|
441 swapa( I7, -1, I6 );
|
|
442
|
|
443 taddcc( I5, I6, I7 );
|
|
444 taddcc( I7, -1, I6 );
|
|
445 taddcctv( I5, I6, I7 );
|
|
446 taddcctv( I7, -1, I6 );
|
|
447
|
|
448 tsubcc( I5, I6, I7 );
|
|
449 tsubcc( I7, -1, I6 );
|
|
450 tsubcctv( I5, I6, I7 );
|
|
451 tsubcctv( I7, -1, I6 );
|
|
452
|
|
453 trap( overflowClear, xcc, G0, G1 );
|
|
454 trap( lessEqual, icc, I7, 17 );
|
|
455
|
|
456 bind(lbl2);
|
|
457 bind(lbl3);
|
|
458
|
|
459 code()->decode();
|
|
460 }
|
|
461
|
|
462 // Generate a bunch 'o stuff unique to V8
|
|
463 void Assembler::test_v8_onlys() {
|
|
464 Label lbl1;
|
|
465
|
|
466 cb( cp_0or1or2, false, pc() - 4, relocInfo::none);
|
|
467 delayed()->nop();
|
|
468 cb( cp_never, true, lbl1);
|
|
469 delayed()->nop();
|
|
470
|
|
471 cpop1(1, 2, 3, 4);
|
|
472 cpop2(5, 6, 7, 8);
|
|
473
|
|
474 ldc( I0, I1, 31);
|
|
475 ldc( I2, -1, 0);
|
|
476
|
|
477 lddc( I4, I4, 30);
|
|
478 lddc( I6, 0, 1 );
|
|
479
|
|
480 ldcsr( L0, L1, 0);
|
|
481 ldcsr( L1, (1 << 12) - 1, 17 );
|
|
482
|
|
483 stc( 31, L4, L5);
|
|
484 stc( 30, L6, -(1 << 12) );
|
|
485
|
|
486 stdc( 0, L7, G0);
|
|
487 stdc( 1, G1, 0 );
|
|
488
|
|
489 stcsr( 16, G2, G3);
|
|
490 stcsr( 17, G4, 1 );
|
|
491
|
|
492 stdcq( 4, G5, G6);
|
|
493 stdcq( 5, G7, -1 );
|
|
494
|
|
495 bind(lbl1);
|
|
496
|
|
497 code()->decode();
|
|
498 }
|
|
499 #endif
|
|
500
|
|
501 // Implementation of MacroAssembler
|
|
502
|
|
503 void MacroAssembler::null_check(Register reg, int offset) {
|
|
504 if (needs_explicit_null_check((intptr_t)offset)) {
|
|
505 // provoke OS NULL exception if reg = NULL by
|
|
506 // accessing M[reg] w/o changing any registers
|
|
507 ld_ptr(reg, 0, G0);
|
|
508 }
|
|
509 else {
|
|
510 // nothing to do, (later) access of M[reg + offset]
|
|
511 // will provoke OS NULL exception if reg = NULL
|
|
512 }
|
|
513 }
|
|
514
|
|
515 // Ring buffer jumps
|
|
516
|
|
517 #ifndef PRODUCT
|
|
518 void MacroAssembler::ret( bool trace ) { if (trace) {
|
|
519 mov(I7, O7); // traceable register
|
|
520 JMP(O7, 2 * BytesPerInstWord);
|
|
521 } else {
|
|
522 jmpl( I7, 2 * BytesPerInstWord, G0 );
|
|
523 }
|
|
524 }
|
|
525
|
|
526 void MacroAssembler::retl( bool trace ) { if (trace) JMP(O7, 2 * BytesPerInstWord);
|
|
527 else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
|
|
528 #endif /* PRODUCT */
|
|
529
|
|
530
|
|
531 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) {
|
|
532 assert_not_delayed();
|
|
533 // This can only be traceable if r1 & r2 are visible after a window save
|
|
534 if (TraceJumps) {
|
|
535 #ifndef PRODUCT
|
|
536 save_frame(0);
|
|
537 verify_thread();
|
|
538 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
|
|
539 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
|
|
540 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
|
|
541 add(O2, O1, O1);
|
|
542
|
|
543 add(r1->after_save(), r2->after_save(), O2);
|
|
544 set((intptr_t)file, O3);
|
|
545 set(line, O4);
|
|
546 Label L;
|
|
547 // get nearby pc, store jmp target
|
|
548 call(L, relocInfo::none); // No relocation for call to pc+0x8
|
|
549 delayed()->st(O2, O1, 0);
|
|
550 bind(L);
|
|
551
|
|
552 // store nearby pc
|
|
553 st(O7, O1, sizeof(intptr_t));
|
|
554 // store file
|
|
555 st(O3, O1, 2*sizeof(intptr_t));
|
|
556 // store line
|
|
557 st(O4, O1, 3*sizeof(intptr_t));
|
|
558 add(O0, 1, O0);
|
|
559 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
|
|
560 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
|
|
561 restore();
|
|
562 #endif /* PRODUCT */
|
|
563 }
|
|
564 jmpl(r1, r2, G0);
|
|
565 }
|
|
566 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) {
|
|
567 assert_not_delayed();
|
|
568 // This can only be traceable if r1 is visible after a window save
|
|
569 if (TraceJumps) {
|
|
570 #ifndef PRODUCT
|
|
571 save_frame(0);
|
|
572 verify_thread();
|
|
573 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
|
|
574 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
|
|
575 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
|
|
576 add(O2, O1, O1);
|
|
577
|
|
578 add(r1->after_save(), offset, O2);
|
|
579 set((intptr_t)file, O3);
|
|
580 set(line, O4);
|
|
581 Label L;
|
|
582 // get nearby pc, store jmp target
|
|
583 call(L, relocInfo::none); // No relocation for call to pc+0x8
|
|
584 delayed()->st(O2, O1, 0);
|
|
585 bind(L);
|
|
586
|
|
587 // store nearby pc
|
|
588 st(O7, O1, sizeof(intptr_t));
|
|
589 // store file
|
|
590 st(O3, O1, 2*sizeof(intptr_t));
|
|
591 // store line
|
|
592 st(O4, O1, 3*sizeof(intptr_t));
|
|
593 add(O0, 1, O0);
|
|
594 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
|
|
595 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
|
|
596 restore();
|
|
597 #endif /* PRODUCT */
|
|
598 }
|
|
599 jmp(r1, offset);
|
|
600 }
|
|
601
|
|
602 // This code sequence is relocatable to any address, even on LP64.
|
|
603 void MacroAssembler::jumpl( Address& a, Register d, int offset, const char* file, int line ) {
|
|
604 assert_not_delayed();
|
|
605 // Force fixed length sethi because NativeJump and NativeFarCall don't handle
|
|
606 // variable length instruction streams.
|
|
607 sethi(a, /*ForceRelocatable=*/ true);
|
|
608 if (TraceJumps) {
|
|
609 #ifndef PRODUCT
|
|
610 // Must do the add here so relocation can find the remainder of the
|
|
611 // value to be relocated.
|
|
612 add(a.base(), a.disp() + offset, a.base(), a.rspec(offset));
|
|
613 save_frame(0);
|
|
614 verify_thread();
|
|
615 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
|
|
616 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
|
|
617 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
|
|
618 add(O2, O1, O1);
|
|
619
|
|
620 set((intptr_t)file, O3);
|
|
621 set(line, O4);
|
|
622 Label L;
|
|
623
|
|
624 // get nearby pc, store jmp target
|
|
625 call(L, relocInfo::none); // No relocation for call to pc+0x8
|
|
626 delayed()->st(a.base()->after_save(), O1, 0);
|
|
627 bind(L);
|
|
628
|
|
629 // store nearby pc
|
|
630 st(O7, O1, sizeof(intptr_t));
|
|
631 // store file
|
|
632 st(O3, O1, 2*sizeof(intptr_t));
|
|
633 // store line
|
|
634 st(O4, O1, 3*sizeof(intptr_t));
|
|
635 add(O0, 1, O0);
|
|
636 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
|
|
637 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
|
|
638 restore();
|
|
639 jmpl(a.base(), G0, d);
|
|
640 #else
|
|
641 jmpl(a, d, offset);
|
|
642 #endif /* PRODUCT */
|
|
643 } else {
|
|
644 jmpl(a, d, offset);
|
|
645 }
|
|
646 }
|
|
647
|
|
648 void MacroAssembler::jump( Address& a, int offset, const char* file, int line ) {
|
|
649 jumpl( a, G0, offset, file, line );
|
|
650 }
|
|
651
|
|
652
|
|
653 // Convert to C varargs format
|
|
654 void MacroAssembler::set_varargs( Argument inArg, Register d ) {
|
|
655 // spill register-resident args to their memory slots
|
|
656 // (SPARC calling convention requires callers to have already preallocated these)
|
|
657 // Note that the inArg might in fact be an outgoing argument,
|
|
658 // if a leaf routine or stub does some tricky argument shuffling.
|
|
659 // This routine must work even though one of the saved arguments
|
|
660 // is in the d register (e.g., set_varargs(Argument(0, false), O0)).
|
|
661 for (Argument savePtr = inArg;
|
|
662 savePtr.is_register();
|
|
663 savePtr = savePtr.successor()) {
|
|
664 st_ptr(savePtr.as_register(), savePtr.address_in_frame());
|
|
665 }
|
|
666 // return the address of the first memory slot
|
|
667 add(inArg.address_in_frame(), d);
|
|
668 }
|
|
669
|
|
670 // Conditional breakpoint (for assertion checks in assembly code)
|
|
671 void MacroAssembler::breakpoint_trap(Condition c, CC cc) {
|
|
672 trap(c, cc, G0, ST_RESERVED_FOR_USER_0);
|
|
673 }
|
|
674
|
|
675 // We want to use ST_BREAKPOINT here, but the debugger is confused by it.
|
|
676 void MacroAssembler::breakpoint_trap() {
|
|
677 trap(ST_RESERVED_FOR_USER_0);
|
|
678 }
|
|
679
|
|
680 // flush windows (except current) using flushw instruction if avail.
|
|
681 void MacroAssembler::flush_windows() {
|
|
682 if (VM_Version::v9_instructions_work()) flushw();
|
|
683 else flush_windows_trap();
|
|
684 }
|
|
685
|
|
686 // Write serialization page so VM thread can do a pseudo remote membar
|
|
687 // We use the current thread pointer to calculate a thread specific
|
|
688 // offset to write to within the page. This minimizes bus traffic
|
|
689 // due to cache line collision.
|
|
690 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
|
|
691 Address mem_serialize_page(tmp1, os::get_memory_serialize_page());
|
|
692 srl(thread, os::get_serialize_page_shift_count(), tmp2);
|
|
693 if (Assembler::is_simm13(os::vm_page_size())) {
|
|
694 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2);
|
|
695 }
|
|
696 else {
|
|
697 set((os::vm_page_size() - sizeof(int)), tmp1);
|
|
698 and3(tmp2, tmp1, tmp2);
|
|
699 }
|
|
700 load_address(mem_serialize_page);
|
|
701 st(G0, tmp1, tmp2);
|
|
702 }
|
|
703
|
|
704
|
|
705
|
|
706 void MacroAssembler::enter() {
|
|
707 Unimplemented();
|
|
708 }
|
|
709
|
|
710 void MacroAssembler::leave() {
|
|
711 Unimplemented();
|
|
712 }
|
|
713
|
|
714 void MacroAssembler::mult(Register s1, Register s2, Register d) {
|
|
715 if(VM_Version::v9_instructions_work()) {
|
|
716 mulx (s1, s2, d);
|
|
717 } else {
|
|
718 smul (s1, s2, d);
|
|
719 }
|
|
720 }
|
|
721
|
|
722 void MacroAssembler::mult(Register s1, int simm13a, Register d) {
|
|
723 if(VM_Version::v9_instructions_work()) {
|
|
724 mulx (s1, simm13a, d);
|
|
725 } else {
|
|
726 smul (s1, simm13a, d);
|
|
727 }
|
|
728 }
|
|
729
|
|
730
|
|
731 #ifdef ASSERT
|
|
732 void MacroAssembler::read_ccr_v8_assert(Register ccr_save) {
|
|
733 const Register s1 = G3_scratch;
|
|
734 const Register s2 = G4_scratch;
|
|
735 Label get_psr_test;
|
|
736 // Get the condition codes the V8 way.
|
|
737 read_ccr_trap(s1);
|
|
738 mov(ccr_save, s2);
|
|
739 // This is a test of V8 which has icc but not xcc
|
|
740 // so mask off the xcc bits
|
|
741 and3(s2, 0xf, s2);
|
|
742 // Compare condition codes from the V8 and V9 ways.
|
|
743 subcc(s2, s1, G0);
|
|
744 br(Assembler::notEqual, true, Assembler::pt, get_psr_test);
|
|
745 delayed()->breakpoint_trap();
|
|
746 bind(get_psr_test);
|
|
747 }
|
|
748
|
|
749 void MacroAssembler::write_ccr_v8_assert(Register ccr_save) {
|
|
750 const Register s1 = G3_scratch;
|
|
751 const Register s2 = G4_scratch;
|
|
752 Label set_psr_test;
|
|
753 // Write out the saved condition codes the V8 way
|
|
754 write_ccr_trap(ccr_save, s1, s2);
|
|
755 // Read back the condition codes using the V9 instruction
|
|
756 rdccr(s1);
|
|
757 mov(ccr_save, s2);
|
|
758 // This is a test of V8 which has icc but not xcc
|
|
759 // so mask off the xcc bits
|
|
760 and3(s2, 0xf, s2);
|
|
761 and3(s1, 0xf, s1);
|
|
762 // Compare the V8 way with the V9 way.
|
|
763 subcc(s2, s1, G0);
|
|
764 br(Assembler::notEqual, true, Assembler::pt, set_psr_test);
|
|
765 delayed()->breakpoint_trap();
|
|
766 bind(set_psr_test);
|
|
767 }
|
|
768 #else
|
|
769 #define read_ccr_v8_assert(x)
|
|
770 #define write_ccr_v8_assert(x)
|
|
771 #endif // ASSERT
|
|
772
|
|
773 void MacroAssembler::read_ccr(Register ccr_save) {
|
|
774 if (VM_Version::v9_instructions_work()) {
|
|
775 rdccr(ccr_save);
|
|
776 // Test code sequence used on V8. Do not move above rdccr.
|
|
777 read_ccr_v8_assert(ccr_save);
|
|
778 } else {
|
|
779 read_ccr_trap(ccr_save);
|
|
780 }
|
|
781 }
|
|
782
|
|
783 void MacroAssembler::write_ccr(Register ccr_save) {
|
|
784 if (VM_Version::v9_instructions_work()) {
|
|
785 // Test code sequence used on V8. Do not move below wrccr.
|
|
786 write_ccr_v8_assert(ccr_save);
|
|
787 wrccr(ccr_save);
|
|
788 } else {
|
|
789 const Register temp_reg1 = G3_scratch;
|
|
790 const Register temp_reg2 = G4_scratch;
|
|
791 write_ccr_trap(ccr_save, temp_reg1, temp_reg2);
|
|
792 }
|
|
793 }
|
|
794
|
|
795
|
|
796 // Calls to C land
|
|
797
|
|
798 #ifdef ASSERT
|
|
799 // a hook for debugging
|
|
800 static Thread* reinitialize_thread() {
|
|
801 return ThreadLocalStorage::thread();
|
|
802 }
|
|
803 #else
|
|
804 #define reinitialize_thread ThreadLocalStorage::thread
|
|
805 #endif
|
|
806
|
|
807 #ifdef ASSERT
|
|
808 address last_get_thread = NULL;
|
|
809 #endif
|
|
810
|
|
811 // call this when G2_thread is not known to be valid
|
|
812 void MacroAssembler::get_thread() {
|
|
813 save_frame(0); // to avoid clobbering O0
|
|
814 mov(G1, L0); // avoid clobbering G1
|
|
815 mov(G5_method, L1); // avoid clobbering G5
|
|
816 mov(G3, L2); // avoid clobbering G3 also
|
|
817 mov(G4, L5); // avoid clobbering G4
|
|
818 #ifdef ASSERT
|
|
819 Address last_get_thread_addr(L3, (address)&last_get_thread);
|
|
820 sethi(last_get_thread_addr);
|
|
821 inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
|
|
822 st_ptr(L4, last_get_thread_addr);
|
|
823 #endif
|
|
824 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
|
|
825 delayed()->nop();
|
|
826 mov(L0, G1);
|
|
827 mov(L1, G5_method);
|
|
828 mov(L2, G3);
|
|
829 mov(L5, G4);
|
|
830 restore(O0, 0, G2_thread);
|
|
831 }
|
|
832
|
|
833 static Thread* verify_thread_subroutine(Thread* gthread_value) {
|
|
834 Thread* correct_value = ThreadLocalStorage::thread();
|
|
835 guarantee(gthread_value == correct_value, "G2_thread value must be the thread");
|
|
836 return correct_value;
|
|
837 }
|
|
838
|
|
839 void MacroAssembler::verify_thread() {
|
|
840 if (VerifyThread) {
|
|
841 // NOTE: this chops off the heads of the 64-bit O registers.
|
|
842 #ifdef CC_INTERP
|
|
843 save_frame(0);
|
|
844 #else
|
|
845 // make sure G2_thread contains the right value
|
|
846 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof)
|
|
847 mov(G1, L1); // avoid clobbering G1
|
|
848 // G2 saved below
|
|
849 mov(G3, L3); // avoid clobbering G3
|
|
850 mov(G4, L4); // avoid clobbering G4
|
|
851 mov(G5_method, L5); // avoid clobbering G5_method
|
|
852 #endif /* CC_INTERP */
|
|
853 #if defined(COMPILER2) && !defined(_LP64)
|
|
854 // Save & restore possible 64-bit Long arguments in G-regs
|
|
855 srlx(G1,32,L0);
|
|
856 srlx(G4,32,L6);
|
|
857 #endif
|
|
858 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type);
|
|
859 delayed()->mov(G2_thread, O0);
|
|
860
|
|
861 mov(L1, G1); // Restore G1
|
|
862 // G2 restored below
|
|
863 mov(L3, G3); // restore G3
|
|
864 mov(L4, G4); // restore G4
|
|
865 mov(L5, G5_method); // restore G5_method
|
|
866 #if defined(COMPILER2) && !defined(_LP64)
|
|
867 // Save & restore possible 64-bit Long arguments in G-regs
|
|
868 sllx(L0,32,G2); // Move old high G1 bits high in G2
|
|
869 sllx(G1, 0,G1); // Clear current high G1 bits
|
|
870 or3 (G1,G2,G1); // Recover 64-bit G1
|
|
871 sllx(L6,32,G2); // Move old high G4 bits high in G2
|
|
872 sllx(G4, 0,G4); // Clear current high G4 bits
|
|
873 or3 (G4,G2,G4); // Recover 64-bit G4
|
|
874 #endif
|
|
875 restore(O0, 0, G2_thread);
|
|
876 }
|
|
877 }
|
|
878
|
|
879
|
|
880 void MacroAssembler::save_thread(const Register thread_cache) {
|
|
881 verify_thread();
|
|
882 if (thread_cache->is_valid()) {
|
|
883 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
|
|
884 mov(G2_thread, thread_cache);
|
|
885 }
|
|
886 if (VerifyThread) {
|
|
887 // smash G2_thread, as if the VM were about to anyway
|
|
888 set(0x67676767, G2_thread);
|
|
889 }
|
|
890 }
|
|
891
|
|
892
|
|
893 void MacroAssembler::restore_thread(const Register thread_cache) {
|
|
894 if (thread_cache->is_valid()) {
|
|
895 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
|
|
896 mov(thread_cache, G2_thread);
|
|
897 verify_thread();
|
|
898 } else {
|
|
899 // do it the slow way
|
|
900 get_thread();
|
|
901 }
|
|
902 }
|
|
903
|
|
904
|
|
905 // %%% maybe get rid of [re]set_last_Java_frame
|
|
906 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) {
|
|
907 assert_not_delayed();
|
|
908 Address flags(G2_thread,
|
|
909 0,
|
|
910 in_bytes(JavaThread::frame_anchor_offset()) +
|
|
911 in_bytes(JavaFrameAnchor::flags_offset()));
|
|
912 Address pc_addr(G2_thread,
|
|
913 0,
|
|
914 in_bytes(JavaThread::last_Java_pc_offset()));
|
|
915
|
|
916 // Always set last_Java_pc and flags first because once last_Java_sp is visible
|
|
917 // has_last_Java_frame is true and users will look at the rest of the fields.
|
|
918 // (Note: flags should always be zero before we get here so doesn't need to be set.)
|
|
919
|
|
920 #ifdef ASSERT
|
|
921 // Verify that flags was zeroed on return to Java
|
|
922 Label PcOk;
|
|
923 save_frame(0); // to avoid clobbering O0
|
|
924 ld_ptr(pc_addr, L0);
|
|
925 tst(L0);
|
|
926 #ifdef _LP64
|
|
927 brx(Assembler::zero, false, Assembler::pt, PcOk);
|
|
928 #else
|
|
929 br(Assembler::zero, false, Assembler::pt, PcOk);
|
|
930 #endif // _LP64
|
|
931 delayed() -> nop();
|
|
932 stop("last_Java_pc not zeroed before leaving Java");
|
|
933 bind(PcOk);
|
|
934
|
|
935 // Verify that flags was zeroed on return to Java
|
|
936 Label FlagsOk;
|
|
937 ld(flags, L0);
|
|
938 tst(L0);
|
|
939 br(Assembler::zero, false, Assembler::pt, FlagsOk);
|
|
940 delayed() -> restore();
|
|
941 stop("flags not zeroed before leaving Java");
|
|
942 bind(FlagsOk);
|
|
943 #endif /* ASSERT */
|
|
944 //
|
|
945 // When returning from calling out from Java mode the frame anchor's last_Java_pc
|
|
946 // will always be set to NULL. It is set here so that if we are doing a call to
|
|
947 // native (not VM) that we capture the known pc and don't have to rely on the
|
|
948 // native call having a standard frame linkage where we can find the pc.
|
|
949
|
|
950 if (last_Java_pc->is_valid()) {
|
|
951 st_ptr(last_Java_pc, pc_addr);
|
|
952 }
|
|
953
|
|
954 #ifdef _LP64
|
|
955 #ifdef ASSERT
|
|
956 // Make sure that we have an odd stack
|
|
957 Label StackOk;
|
|
958 andcc(last_java_sp, 0x01, G0);
|
|
959 br(Assembler::notZero, false, Assembler::pt, StackOk);
|
|
960 delayed() -> nop();
|
|
961 stop("Stack Not Biased in set_last_Java_frame");
|
|
962 bind(StackOk);
|
|
963 #endif // ASSERT
|
|
964 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
|
|
965 add( last_java_sp, STACK_BIAS, G4_scratch );
|
|
966 st_ptr(G4_scratch, Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset())));
|
|
967 #else
|
|
968 st_ptr(last_java_sp, Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset())));
|
|
969 #endif // _LP64
|
|
970 }
|
|
971
|
|
972 void MacroAssembler::reset_last_Java_frame(void) {
|
|
973 assert_not_delayed();
|
|
974
|
|
975 Address sp_addr(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset()));
|
|
976 Address pc_addr(G2_thread,
|
|
977 0,
|
|
978 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
|
|
979 Address flags(G2_thread,
|
|
980 0,
|
|
981 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
|
|
982
|
|
983 #ifdef ASSERT
|
|
984 // check that it WAS previously set
|
|
985 #ifdef CC_INTERP
|
|
986 save_frame(0);
|
|
987 #else
|
|
988 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof
|
|
989 #endif /* CC_INTERP */
|
|
990 ld_ptr(sp_addr, L0);
|
|
991 tst(L0);
|
|
992 breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
|
|
993 restore();
|
|
994 #endif // ASSERT
|
|
995
|
|
996 st_ptr(G0, sp_addr);
|
|
997 // Always return last_Java_pc to zero
|
|
998 st_ptr(G0, pc_addr);
|
|
999 // Always null flags after return to Java
|
|
1000 st(G0, flags);
|
|
1001 }
|
|
1002
|
|
1003
|
|
1004 void MacroAssembler::call_VM_base(
|
|
1005 Register oop_result,
|
|
1006 Register thread_cache,
|
|
1007 Register last_java_sp,
|
|
1008 address entry_point,
|
|
1009 int number_of_arguments,
|
|
1010 bool check_exceptions)
|
|
1011 {
|
|
1012 assert_not_delayed();
|
|
1013
|
|
1014 // determine last_java_sp register
|
|
1015 if (!last_java_sp->is_valid()) {
|
|
1016 last_java_sp = SP;
|
|
1017 }
|
|
1018 // debugging support
|
|
1019 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
|
|
1020
|
|
1021 // 64-bit last_java_sp is biased!
|
|
1022 set_last_Java_frame(last_java_sp, noreg);
|
|
1023 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early
|
|
1024 save_thread(thread_cache);
|
|
1025 // do the call
|
|
1026 call(entry_point, relocInfo::runtime_call_type);
|
|
1027 if (!VerifyThread)
|
|
1028 delayed()->mov(G2_thread, O0); // pass thread as first argument
|
|
1029 else
|
|
1030 delayed()->nop(); // (thread already passed)
|
|
1031 restore_thread(thread_cache);
|
|
1032 reset_last_Java_frame();
|
|
1033
|
|
1034 // check for pending exceptions. use Gtemp as scratch register.
|
|
1035 if (check_exceptions) {
|
|
1036 check_and_forward_exception(Gtemp);
|
|
1037 }
|
|
1038
|
|
1039 // get oop result if there is one and reset the value in the thread
|
|
1040 if (oop_result->is_valid()) {
|
|
1041 get_vm_result(oop_result);
|
|
1042 }
|
|
1043 }
|
|
1044
|
|
1045 void MacroAssembler::check_and_forward_exception(Register scratch_reg)
|
|
1046 {
|
|
1047 Label L;
|
|
1048
|
|
1049 check_and_handle_popframe(scratch_reg);
|
|
1050 check_and_handle_earlyret(scratch_reg);
|
|
1051
|
|
1052 Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
|
|
1053 ld_ptr(exception_addr, scratch_reg);
|
|
1054 br_null(scratch_reg,false,pt,L);
|
|
1055 delayed()->nop();
|
|
1056 // we use O7 linkage so that forward_exception_entry has the issuing PC
|
|
1057 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
|
|
1058 delayed()->nop();
|
|
1059 bind(L);
|
|
1060 }
|
|
1061
|
|
1062
|
|
1063 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) {
|
|
1064 }
|
|
1065
|
|
1066
|
|
1067 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
|
|
1068 }
|
|
1069
|
|
1070
|
|
1071 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
|
|
1072 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
|
|
1073 }
|
|
1074
|
|
1075
|
|
1076 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
|
|
1077 // O0 is reserved for the thread
|
|
1078 mov(arg_1, O1);
|
|
1079 call_VM(oop_result, entry_point, 1, check_exceptions);
|
|
1080 }
|
|
1081
|
|
1082
|
|
1083 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
|
|
1084 // O0 is reserved for the thread
|
|
1085 mov(arg_1, O1);
|
|
1086 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
|
|
1087 call_VM(oop_result, entry_point, 2, check_exceptions);
|
|
1088 }
|
|
1089
|
|
1090
|
|
1091 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
|
|
1092 // O0 is reserved for the thread
|
|
1093 mov(arg_1, O1);
|
|
1094 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
|
|
1095 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
|
|
1096 call_VM(oop_result, entry_point, 3, check_exceptions);
|
|
1097 }
|
|
1098
|
|
1099
|
|
1100
|
|
1101 // Note: The following call_VM overloadings are useful when a "save"
|
|
1102 // has already been performed by a stub, and the last Java frame is
|
|
1103 // the previous one. In that case, last_java_sp must be passed as FP
|
|
1104 // instead of SP.
|
|
1105
|
|
1106
|
|
1107 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) {
|
|
1108 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions);
|
|
1109 }
|
|
1110
|
|
1111
|
|
1112 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
|
|
1113 // O0 is reserved for the thread
|
|
1114 mov(arg_1, O1);
|
|
1115 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
|
|
1116 }
|
|
1117
|
|
1118
|
|
1119 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
|
|
1120 // O0 is reserved for the thread
|
|
1121 mov(arg_1, O1);
|
|
1122 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
|
|
1123 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
|
|
1124 }
|
|
1125
|
|
1126
|
|
1127 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
|
|
1128 // O0 is reserved for the thread
|
|
1129 mov(arg_1, O1);
|
|
1130 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
|
|
1131 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
|
|
1132 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
|
|
1133 }
|
|
1134
|
|
1135
|
|
1136
|
|
1137 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) {
|
|
1138 assert_not_delayed();
|
|
1139 save_thread(thread_cache);
|
|
1140 // do the call
|
|
1141 call(entry_point, relocInfo::runtime_call_type);
|
|
1142 delayed()->nop();
|
|
1143 restore_thread(thread_cache);
|
|
1144 }
|
|
1145
|
|
1146
|
|
1147 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) {
|
|
1148 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments);
|
|
1149 }
|
|
1150
|
|
1151
|
|
1152 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) {
|
|
1153 mov(arg_1, O0);
|
|
1154 call_VM_leaf(thread_cache, entry_point, 1);
|
|
1155 }
|
|
1156
|
|
1157
|
|
1158 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
|
|
1159 mov(arg_1, O0);
|
|
1160 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
|
|
1161 call_VM_leaf(thread_cache, entry_point, 2);
|
|
1162 }
|
|
1163
|
|
1164
|
|
1165 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) {
|
|
1166 mov(arg_1, O0);
|
|
1167 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
|
|
1168 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument");
|
|
1169 call_VM_leaf(thread_cache, entry_point, 3);
|
|
1170 }
|
|
1171
|
|
1172
|
|
1173 void MacroAssembler::get_vm_result(Register oop_result) {
|
|
1174 verify_thread();
|
|
1175 Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset()));
|
|
1176 ld_ptr( vm_result_addr, oop_result);
|
|
1177 st_ptr(G0, vm_result_addr);
|
|
1178 verify_oop(oop_result);
|
|
1179 }
|
|
1180
|
|
1181
|
|
1182 void MacroAssembler::get_vm_result_2(Register oop_result) {
|
|
1183 verify_thread();
|
|
1184 Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset()));
|
|
1185 ld_ptr(vm_result_addr_2, oop_result);
|
|
1186 st_ptr(G0, vm_result_addr_2);
|
|
1187 verify_oop(oop_result);
|
|
1188 }
|
|
1189
|
|
1190
|
|
1191 // We require that C code which does not return a value in vm_result will
|
|
1192 // leave it undisturbed.
|
|
1193 void MacroAssembler::set_vm_result(Register oop_result) {
|
|
1194 verify_thread();
|
|
1195 Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset()));
|
|
1196 verify_oop(oop_result);
|
|
1197
|
|
1198 # ifdef ASSERT
|
|
1199 // Check that we are not overwriting any other oop.
|
|
1200 #ifdef CC_INTERP
|
|
1201 save_frame(0);
|
|
1202 #else
|
|
1203 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof
|
|
1204 #endif /* CC_INTERP */
|
|
1205 ld_ptr(vm_result_addr, L0);
|
|
1206 tst(L0);
|
|
1207 restore();
|
|
1208 breakpoint_trap(notZero, Assembler::ptr_cc);
|
|
1209 // }
|
|
1210 # endif
|
|
1211
|
|
1212 st_ptr(oop_result, vm_result_addr);
|
|
1213 }
|
|
1214
|
|
1215
|
|
1216 void MacroAssembler::store_check(Register tmp, Register obj) {
|
|
1217 // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
|
|
1218
|
|
1219 /* $$$ This stuff needs to go into one of the BarrierSet generator
|
|
1220 functions. (The particular barrier sets will have to be friends of
|
|
1221 MacroAssembler, I guess.) */
|
|
1222 BarrierSet* bs = Universe::heap()->barrier_set();
|
|
1223 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
|
|
1224 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
|
|
1225 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
|
1226 #ifdef _LP64
|
|
1227 srlx(obj, CardTableModRefBS::card_shift, obj);
|
|
1228 #else
|
|
1229 srl(obj, CardTableModRefBS::card_shift, obj);
|
|
1230 #endif
|
|
1231 assert( tmp != obj, "need separate temp reg");
|
|
1232 Address rs(tmp, (address)ct->byte_map_base);
|
|
1233 load_address(rs);
|
|
1234 stb(G0, rs.base(), obj);
|
|
1235 }
|
|
1236
|
|
1237 void MacroAssembler::store_check(Register tmp, Register obj, Register offset) {
|
|
1238 store_check(tmp, obj);
|
|
1239 }
|
|
1240
|
|
1241 // %%% Note: The following six instructions have been moved,
|
|
1242 // unchanged, from assembler_sparc.inline.hpp.
|
|
1243 // They will be refactored at a later date.
|
|
1244
|
|
1245 void MacroAssembler::sethi(intptr_t imm22a,
|
|
1246 Register d,
|
|
1247 bool ForceRelocatable,
|
|
1248 RelocationHolder const& rspec) {
|
|
1249 Address adr( d, (address)imm22a, rspec );
|
|
1250 MacroAssembler::sethi( adr, ForceRelocatable );
|
|
1251 }
|
|
1252
|
|
1253
|
|
1254 void MacroAssembler::sethi(Address& a, bool ForceRelocatable) {
|
|
1255 address save_pc;
|
|
1256 int shiftcnt;
|
|
1257 // if addr of local, do not need to load it
|
|
1258 assert(a.base() != FP && a.base() != SP, "just use ld or st for locals");
|
|
1259 #ifdef _LP64
|
|
1260 # ifdef CHECK_DELAY
|
|
1261 assert_not_delayed( (char *)"cannot put two instructions in delay slot" );
|
|
1262 # endif
|
|
1263 v9_dep();
|
|
1264 // ForceRelocatable = 1;
|
|
1265 save_pc = pc();
|
|
1266 if (a.hi32() == 0 && a.low32() >= 0) {
|
|
1267 Assembler::sethi(a.low32(), a.base(), a.rspec());
|
|
1268 }
|
|
1269 else if (a.hi32() == -1) {
|
|
1270 Assembler::sethi(~a.low32(), a.base(), a.rspec());
|
|
1271 xor3(a.base(), ~low10(~0), a.base());
|
|
1272 }
|
|
1273 else {
|
|
1274 Assembler::sethi(a.hi32(), a.base(), a.rspec() ); // 22
|
|
1275 if ( a.hi32() & 0x3ff ) // Any bits?
|
|
1276 or3( a.base(), a.hi32() & 0x3ff ,a.base() ); // High 32 bits are now in low 32
|
|
1277 if ( a.low32() & 0xFFFFFC00 ) { // done?
|
|
1278 if( (a.low32() >> 20) & 0xfff ) { // Any bits set?
|
|
1279 sllx(a.base(), 12, a.base()); // Make room for next 12 bits
|
|
1280 or3( a.base(), (a.low32() >> 20) & 0xfff,a.base() ); // Or in next 12
|
|
1281 shiftcnt = 0; // We already shifted
|
|
1282 }
|
|
1283 else
|
|
1284 shiftcnt = 12;
|
|
1285 if( (a.low32() >> 10) & 0x3ff ) {
|
|
1286 sllx(a.base(), shiftcnt+10, a.base());// Make room for last 10 bits
|
|
1287 or3( a.base(), (a.low32() >> 10) & 0x3ff,a.base() ); // Or in next 10
|
|
1288 shiftcnt = 0;
|
|
1289 }
|
|
1290 else
|
|
1291 shiftcnt = 10;
|
|
1292 sllx(a.base(), shiftcnt+10 , a.base()); // Shift leaving disp field 0'd
|
|
1293 }
|
|
1294 else
|
|
1295 sllx( a.base(), 32, a.base() );
|
|
1296 }
|
|
1297 // Pad out the instruction sequence so it can be
|
|
1298 // patched later.
|
|
1299 if ( ForceRelocatable || (a.rtype() != relocInfo::none &&
|
|
1300 a.rtype() != relocInfo::runtime_call_type) ) {
|
|
1301 while ( pc() < (save_pc + (7 * BytesPerInstWord )) )
|
|
1302 nop();
|
|
1303 }
|
|
1304 #else
|
|
1305 Assembler::sethi(a.hi(), a.base(), a.rspec());
|
|
1306 #endif
|
|
1307
|
|
1308 }
|
|
1309
|
|
1310 int MacroAssembler::size_of_sethi(address a, bool worst_case) {
|
|
1311 #ifdef _LP64
|
|
1312 if (worst_case) return 7;
|
|
1313 intptr_t iaddr = (intptr_t)a;
|
|
1314 int hi32 = (int)(iaddr >> 32);
|
|
1315 int lo32 = (int)(iaddr);
|
|
1316 int inst_count;
|
|
1317 if (hi32 == 0 && lo32 >= 0)
|
|
1318 inst_count = 1;
|
|
1319 else if (hi32 == -1)
|
|
1320 inst_count = 2;
|
|
1321 else {
|
|
1322 inst_count = 2;
|
|
1323 if ( hi32 & 0x3ff )
|
|
1324 inst_count++;
|
|
1325 if ( lo32 & 0xFFFFFC00 ) {
|
|
1326 if( (lo32 >> 20) & 0xfff ) inst_count += 2;
|
|
1327 if( (lo32 >> 10) & 0x3ff ) inst_count += 2;
|
|
1328 }
|
|
1329 }
|
|
1330 return BytesPerInstWord * inst_count;
|
|
1331 #else
|
|
1332 return BytesPerInstWord;
|
|
1333 #endif
|
|
1334 }
|
|
1335
|
|
1336 int MacroAssembler::worst_case_size_of_set() {
|
|
1337 return size_of_sethi(NULL, true) + 1;
|
|
1338 }
|
|
1339
|
|
1340 void MacroAssembler::set(intptr_t value, Register d,
|
|
1341 RelocationHolder const& rspec) {
|
|
1342 Address val( d, (address)value, rspec);
|
|
1343
|
|
1344 if ( rspec.type() == relocInfo::none ) {
|
|
1345 // can optimize
|
|
1346 if (-4096 <= value && value <= 4095) {
|
|
1347 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended)
|
|
1348 return;
|
|
1349 }
|
|
1350 if (inv_hi22(hi22(value)) == value) {
|
|
1351 sethi(val);
|
|
1352 return;
|
|
1353 }
|
|
1354 }
|
|
1355 assert_not_delayed( (char *)"cannot put two instructions in delay slot" );
|
|
1356 sethi( val );
|
|
1357 if (rspec.type() != relocInfo::none || (value & 0x3ff) != 0) {
|
|
1358 add( d, value & 0x3ff, d, rspec);
|
|
1359 }
|
|
1360 }
|
|
1361
|
|
1362 void MacroAssembler::setsw(int value, Register d,
|
|
1363 RelocationHolder const& rspec) {
|
|
1364 Address val( d, (address)value, rspec);
|
|
1365 if ( rspec.type() == relocInfo::none ) {
|
|
1366 // can optimize
|
|
1367 if (-4096 <= value && value <= 4095) {
|
|
1368 or3(G0, value, d);
|
|
1369 return;
|
|
1370 }
|
|
1371 if (inv_hi22(hi22(value)) == value) {
|
|
1372 sethi( val );
|
|
1373 #ifndef _LP64
|
|
1374 if ( value < 0 ) {
|
|
1375 assert_not_delayed();
|
|
1376 sra (d, G0, d);
|
|
1377 }
|
|
1378 #endif
|
|
1379 return;
|
|
1380 }
|
|
1381 }
|
|
1382 assert_not_delayed();
|
|
1383 sethi( val );
|
|
1384 add( d, value & 0x3ff, d, rspec);
|
|
1385
|
|
1386 // (A negative value could be loaded in 2 insns with sethi/xor,
|
|
1387 // but it would take a more complex relocation.)
|
|
1388 #ifndef _LP64
|
|
1389 if ( value < 0)
|
|
1390 sra(d, G0, d);
|
|
1391 #endif
|
|
1392 }
|
|
1393
|
|
1394 // %%% End of moved six set instructions.
|
|
1395
|
|
1396
|
|
1397 void MacroAssembler::set64(jlong value, Register d, Register tmp) {
|
|
1398 assert_not_delayed();
|
|
1399 v9_dep();
|
|
1400
|
|
1401 int hi = (int)(value >> 32);
|
|
1402 int lo = (int)(value & ~0);
|
|
1403 // (Matcher::isSimpleConstant64 knows about the following optimizations.)
|
|
1404 if (Assembler::is_simm13(lo) && value == lo) {
|
|
1405 or3(G0, lo, d);
|
|
1406 } else if (hi == 0) {
|
|
1407 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32
|
|
1408 if (low10(lo) != 0)
|
|
1409 or3(d, low10(lo), d);
|
|
1410 }
|
|
1411 else if (hi == -1) {
|
|
1412 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32
|
|
1413 xor3(d, low10(lo) ^ ~low10(~0), d);
|
|
1414 }
|
|
1415 else if (lo == 0) {
|
|
1416 if (Assembler::is_simm13(hi)) {
|
|
1417 or3(G0, hi, d);
|
|
1418 } else {
|
|
1419 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32
|
|
1420 if (low10(hi) != 0)
|
|
1421 or3(d, low10(hi), d);
|
|
1422 }
|
|
1423 sllx(d, 32, d);
|
|
1424 }
|
|
1425 else {
|
|
1426 Assembler::sethi(hi, tmp);
|
|
1427 Assembler::sethi(lo, d); // macro assembler version sign-extends
|
|
1428 if (low10(hi) != 0)
|
|
1429 or3 (tmp, low10(hi), tmp);
|
|
1430 if (low10(lo) != 0)
|
|
1431 or3 ( d, low10(lo), d);
|
|
1432 sllx(tmp, 32, tmp);
|
|
1433 or3 (d, tmp, d);
|
|
1434 }
|
|
1435 }
|
|
1436
|
|
1437 // compute size in bytes of sparc frame, given
|
|
1438 // number of extraWords
|
|
1439 int MacroAssembler::total_frame_size_in_bytes(int extraWords) {
|
|
1440
|
|
1441 int nWords = frame::memory_parameter_word_sp_offset;
|
|
1442
|
|
1443 nWords += extraWords;
|
|
1444
|
|
1445 if (nWords & 1) ++nWords; // round up to double-word
|
|
1446
|
|
1447 return nWords * BytesPerWord;
|
|
1448 }
|
|
1449
|
|
1450
|
|
1451 // save_frame: given number of "extra" words in frame,
|
|
1452 // issue approp. save instruction (p 200, v8 manual)
|
|
1453
|
|
1454 void MacroAssembler::save_frame(int extraWords = 0) {
|
|
1455 int delta = -total_frame_size_in_bytes(extraWords);
|
|
1456 if (is_simm13(delta)) {
|
|
1457 save(SP, delta, SP);
|
|
1458 } else {
|
|
1459 set(delta, G3_scratch);
|
|
1460 save(SP, G3_scratch, SP);
|
|
1461 }
|
|
1462 }
|
|
1463
|
|
1464
|
|
1465 void MacroAssembler::save_frame_c1(int size_in_bytes) {
|
|
1466 if (is_simm13(-size_in_bytes)) {
|
|
1467 save(SP, -size_in_bytes, SP);
|
|
1468 } else {
|
|
1469 set(-size_in_bytes, G3_scratch);
|
|
1470 save(SP, G3_scratch, SP);
|
|
1471 }
|
|
1472 }
|
|
1473
|
|
1474
|
|
1475 void MacroAssembler::save_frame_and_mov(int extraWords,
|
|
1476 Register s1, Register d1,
|
|
1477 Register s2, Register d2) {
|
|
1478 assert_not_delayed();
|
|
1479
|
|
1480 // The trick here is to use precisely the same memory word
|
|
1481 // that trap handlers also use to save the register.
|
|
1482 // This word cannot be used for any other purpose, but
|
|
1483 // it works fine to save the register's value, whether or not
|
|
1484 // an interrupt flushes register windows at any given moment!
|
|
1485 Address s1_addr;
|
|
1486 if (s1->is_valid() && (s1->is_in() || s1->is_local())) {
|
|
1487 s1_addr = s1->address_in_saved_window();
|
|
1488 st_ptr(s1, s1_addr);
|
|
1489 }
|
|
1490
|
|
1491 Address s2_addr;
|
|
1492 if (s2->is_valid() && (s2->is_in() || s2->is_local())) {
|
|
1493 s2_addr = s2->address_in_saved_window();
|
|
1494 st_ptr(s2, s2_addr);
|
|
1495 }
|
|
1496
|
|
1497 save_frame(extraWords);
|
|
1498
|
|
1499 if (s1_addr.base() == SP) {
|
|
1500 ld_ptr(s1_addr.after_save(), d1);
|
|
1501 } else if (s1->is_valid()) {
|
|
1502 mov(s1->after_save(), d1);
|
|
1503 }
|
|
1504
|
|
1505 if (s2_addr.base() == SP) {
|
|
1506 ld_ptr(s2_addr.after_save(), d2);
|
|
1507 } else if (s2->is_valid()) {
|
|
1508 mov(s2->after_save(), d2);
|
|
1509 }
|
|
1510 }
|
|
1511
|
|
1512
|
|
1513 Address MacroAssembler::allocate_oop_address(jobject obj, Register d) {
|
|
1514 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
|
1515 int oop_index = oop_recorder()->allocate_index(obj);
|
|
1516 return Address(d, address(obj), oop_Relocation::spec(oop_index));
|
|
1517 }
|
|
1518
|
|
1519
|
|
1520 Address MacroAssembler::constant_oop_address(jobject obj, Register d) {
|
|
1521 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
|
1522 int oop_index = oop_recorder()->find_index(obj);
|
|
1523 return Address(d, address(obj), oop_Relocation::spec(oop_index));
|
|
1524 }
|
|
1525
|
|
1526
|
|
1527 void MacroAssembler::align(int modulus) {
|
|
1528 while (offset() % modulus != 0) nop();
|
|
1529 }
|
|
1530
|
|
1531
|
|
1532 void MacroAssembler::safepoint() {
|
|
1533 relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint));
|
|
1534 }
|
|
1535
|
|
1536
|
|
1537 void RegistersForDebugging::print(outputStream* s) {
|
|
1538 int j;
|
|
1539 for ( j = 0; j < 8; ++j )
|
|
1540 if ( j != 6 ) s->print_cr("i%d = 0x%.16lx", j, i[j]);
|
|
1541 else s->print_cr( "fp = 0x%.16lx", i[j]);
|
|
1542 s->cr();
|
|
1543
|
|
1544 for ( j = 0; j < 8; ++j )
|
|
1545 s->print_cr("l%d = 0x%.16lx", j, l[j]);
|
|
1546 s->cr();
|
|
1547
|
|
1548 for ( j = 0; j < 8; ++j )
|
|
1549 if ( j != 6 ) s->print_cr("o%d = 0x%.16lx", j, o[j]);
|
|
1550 else s->print_cr( "sp = 0x%.16lx", o[j]);
|
|
1551 s->cr();
|
|
1552
|
|
1553 for ( j = 0; j < 8; ++j )
|
|
1554 s->print_cr("g%d = 0x%.16lx", j, g[j]);
|
|
1555 s->cr();
|
|
1556
|
|
1557 // print out floats with compression
|
|
1558 for (j = 0; j < 32; ) {
|
|
1559 jfloat val = f[j];
|
|
1560 int last = j;
|
|
1561 for ( ; last+1 < 32; ++last ) {
|
|
1562 char b1[1024], b2[1024];
|
|
1563 sprintf(b1, "%f", val);
|
|
1564 sprintf(b2, "%f", f[last+1]);
|
|
1565 if (strcmp(b1, b2))
|
|
1566 break;
|
|
1567 }
|
|
1568 s->print("f%d", j);
|
|
1569 if ( j != last ) s->print(" - f%d", last);
|
|
1570 s->print(" = %f", val);
|
|
1571 s->fill_to(25);
|
|
1572 s->print_cr(" (0x%x)", val);
|
|
1573 j = last + 1;
|
|
1574 }
|
|
1575 s->cr();
|
|
1576
|
|
1577 // and doubles (evens only)
|
|
1578 for (j = 0; j < 32; ) {
|
|
1579 jdouble val = d[j];
|
|
1580 int last = j;
|
|
1581 for ( ; last+1 < 32; ++last ) {
|
|
1582 char b1[1024], b2[1024];
|
|
1583 sprintf(b1, "%f", val);
|
|
1584 sprintf(b2, "%f", d[last+1]);
|
|
1585 if (strcmp(b1, b2))
|
|
1586 break;
|
|
1587 }
|
|
1588 s->print("d%d", 2 * j);
|
|
1589 if ( j != last ) s->print(" - d%d", last);
|
|
1590 s->print(" = %f", val);
|
|
1591 s->fill_to(30);
|
|
1592 s->print("(0x%x)", *(int*)&val);
|
|
1593 s->fill_to(42);
|
|
1594 s->print_cr("(0x%x)", *(1 + (int*)&val));
|
|
1595 j = last + 1;
|
|
1596 }
|
|
1597 s->cr();
|
|
1598 }
|
|
1599
|
|
1600 void RegistersForDebugging::save_registers(MacroAssembler* a) {
|
|
1601 a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
|
|
1602 a->flush_windows();
|
|
1603 int i;
|
|
1604 for (i = 0; i < 8; ++i) {
|
|
1605 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i));
|
|
1606 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i));
|
|
1607 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i));
|
|
1608 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i));
|
|
1609 }
|
|
1610 for (i = 0; i < 32; ++i) {
|
|
1611 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
|
|
1612 }
|
|
1613 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
|
|
1614 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
|
|
1615 }
|
|
1616 }
|
|
1617
|
|
1618 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) {
|
|
1619 for (int i = 1; i < 8; ++i) {
|
|
1620 a->ld_ptr(r, g_offset(i), as_gRegister(i));
|
|
1621 }
|
|
1622 for (int j = 0; j < 32; ++j) {
|
|
1623 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
|
|
1624 }
|
|
1625 for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) {
|
|
1626 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
|
|
1627 }
|
|
1628 }
|
|
1629
|
|
1630
|
|
1631 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
|
|
1632 void MacroAssembler::push_fTOS() {
|
|
1633 // %%%%%% need to implement this
|
|
1634 }
|
|
1635
|
|
1636 // pops double TOS element from CPU stack and pushes on FPU stack
|
|
1637 void MacroAssembler::pop_fTOS() {
|
|
1638 // %%%%%% need to implement this
|
|
1639 }
|
|
1640
|
|
1641 void MacroAssembler::empty_FPU_stack() {
|
|
1642 // %%%%%% need to implement this
|
|
1643 }
|
|
1644
|
|
1645 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) {
|
|
1646 // plausibility check for oops
|
|
1647 if (!VerifyOops) return;
|
|
1648
|
|
1649 if (reg == G0) return; // always NULL, which is always an oop
|
|
1650
|
|
1651 char buffer[16];
|
|
1652 sprintf(buffer, "%d", line);
|
|
1653 int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer);
|
|
1654 char * real_msg = new char[len];
|
|
1655 sprintf(real_msg, "%s (%s:%d)", msg, file, line);
|
|
1656
|
|
1657 // Call indirectly to solve generation ordering problem
|
|
1658 Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address());
|
|
1659
|
|
1660 // Make some space on stack above the current register window.
|
|
1661 // Enough to hold 8 64-bit registers.
|
|
1662 add(SP,-8*8,SP);
|
|
1663
|
|
1664 // Save some 64-bit registers; a normal 'save' chops the heads off
|
|
1665 // of 64-bit longs in the 32-bit build.
|
|
1666 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
|
|
1667 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
|
|
1668 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed
|
|
1669 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
|
|
1670
|
|
1671 set((intptr_t)real_msg, O1);
|
|
1672 // Load address to call to into O7
|
|
1673 load_ptr_contents(a, O7);
|
|
1674 // Register call to verify_oop_subroutine
|
|
1675 callr(O7, G0);
|
|
1676 delayed()->nop();
|
|
1677 // recover frame size
|
|
1678 add(SP, 8*8,SP);
|
|
1679 }
|
|
1680
|
|
1681 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) {
|
|
1682 // plausibility check for oops
|
|
1683 if (!VerifyOops) return;
|
|
1684
|
|
1685 char buffer[64];
|
|
1686 sprintf(buffer, "%d", line);
|
|
1687 int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer);
|
|
1688 sprintf(buffer, " at SP+%d ", addr.disp());
|
|
1689 len += strlen(buffer);
|
|
1690 char * real_msg = new char[len];
|
|
1691 sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line);
|
|
1692
|
|
1693 // Call indirectly to solve generation ordering problem
|
|
1694 Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address());
|
|
1695
|
|
1696 // Make some space on stack above the current register window.
|
|
1697 // Enough to hold 8 64-bit registers.
|
|
1698 add(SP,-8*8,SP);
|
|
1699
|
|
1700 // Save some 64-bit registers; a normal 'save' chops the heads off
|
|
1701 // of 64-bit longs in the 32-bit build.
|
|
1702 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
|
|
1703 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
|
|
1704 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed
|
|
1705 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
|
|
1706
|
|
1707 set((intptr_t)real_msg, O1);
|
|
1708 // Load address to call to into O7
|
|
1709 load_ptr_contents(a, O7);
|
|
1710 // Register call to verify_oop_subroutine
|
|
1711 callr(O7, G0);
|
|
1712 delayed()->nop();
|
|
1713 // recover frame size
|
|
1714 add(SP, 8*8,SP);
|
|
1715 }
|
|
1716
|
|
1717 // side-door communication with signalHandler in os_solaris.cpp
|
|
1718 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL };
|
|
1719
|
|
1720 // This macro is expanded just once; it creates shared code. Contract:
|
|
1721 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY
|
|
1722 // registers, including flags. May not use a register 'save', as this blows
|
|
1723 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf'
|
|
1724 // call.
|
|
1725 void MacroAssembler::verify_oop_subroutine() {
|
|
1726 assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" );
|
|
1727
|
|
1728 // Leaf call; no frame.
|
|
1729 Label succeed, fail, null_or_fail;
|
|
1730
|
|
1731 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home).
|
|
1732 // O0 is now the oop to be checked. O7 is the return address.
|
|
1733 Register O0_obj = O0;
|
|
1734
|
|
1735 // Save some more registers for temps.
|
|
1736 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8);
|
|
1737 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8);
|
|
1738 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8);
|
|
1739 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8);
|
|
1740
|
|
1741 // Save flags
|
|
1742 Register O5_save_flags = O5;
|
|
1743 rdccr( O5_save_flags );
|
|
1744
|
|
1745 { // count number of verifies
|
|
1746 Register O2_adr = O2;
|
|
1747 Register O3_accum = O3;
|
|
1748 Address count_addr( O2_adr, (address) StubRoutines::verify_oop_count_addr() );
|
|
1749 sethi(count_addr);
|
|
1750 ld(count_addr, O3_accum);
|
|
1751 inc(O3_accum);
|
|
1752 st(O3_accum, count_addr);
|
|
1753 }
|
|
1754
|
|
1755 Register O2_mask = O2;
|
|
1756 Register O3_bits = O3;
|
|
1757 Register O4_temp = O4;
|
|
1758
|
|
1759 // mark lower end of faulting range
|
|
1760 assert(_verify_oop_implicit_branch[0] == NULL, "set once");
|
|
1761 _verify_oop_implicit_branch[0] = pc();
|
|
1762
|
|
1763 // We can't check the mark oop because it could be in the process of
|
|
1764 // locking or unlocking while this is running.
|
|
1765 set(Universe::verify_oop_mask (), O2_mask);
|
|
1766 set(Universe::verify_oop_bits (), O3_bits);
|
|
1767
|
|
1768 // assert((obj & oop_mask) == oop_bits);
|
|
1769 and3(O0_obj, O2_mask, O4_temp);
|
|
1770 cmp(O4_temp, O3_bits);
|
|
1771 brx(notEqual, false, pn, null_or_fail);
|
|
1772 delayed()->nop();
|
|
1773
|
|
1774 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) {
|
|
1775 // the null_or_fail case is useless; must test for null separately
|
|
1776 br_null(O0_obj, false, pn, succeed);
|
|
1777 delayed()->nop();
|
|
1778 }
|
|
1779
|
|
1780 // Check the klassOop of this object for being in the right area of memory.
|
|
1781 // Cannot do the load in the delay above slot in case O0 is null
|
|
1782 ld_ptr(Address(O0_obj, 0, oopDesc::klass_offset_in_bytes()), O0_obj);
|
|
1783 // assert((klass & klass_mask) == klass_bits);
|
|
1784 if( Universe::verify_klass_mask() != Universe::verify_oop_mask() )
|
|
1785 set(Universe::verify_klass_mask(), O2_mask);
|
|
1786 if( Universe::verify_klass_bits() != Universe::verify_oop_bits() )
|
|
1787 set(Universe::verify_klass_bits(), O3_bits);
|
|
1788 and3(O0_obj, O2_mask, O4_temp);
|
|
1789 cmp(O4_temp, O3_bits);
|
|
1790 brx(notEqual, false, pn, fail);
|
|
1791 // Check the klass's klass
|
|
1792 delayed()->ld_ptr(Address(O0_obj, 0, oopDesc::klass_offset_in_bytes()), O0_obj);
|
|
1793 and3(O0_obj, O2_mask, O4_temp);
|
|
1794 cmp(O4_temp, O3_bits);
|
|
1795 brx(notEqual, false, pn, fail);
|
|
1796 delayed()->wrccr( O5_save_flags ); // Restore CCR's
|
|
1797
|
|
1798 // mark upper end of faulting range
|
|
1799 _verify_oop_implicit_branch[1] = pc();
|
|
1800
|
|
1801 //-----------------------
|
|
1802 // all tests pass
|
|
1803 bind(succeed);
|
|
1804
|
|
1805 // Restore prior 64-bit registers
|
|
1806 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0);
|
|
1807 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1);
|
|
1808 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2);
|
|
1809 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3);
|
|
1810 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4);
|
|
1811 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5);
|
|
1812
|
|
1813 retl(); // Leaf return; restore prior O7 in delay slot
|
|
1814 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7);
|
|
1815
|
|
1816 //-----------------------
|
|
1817 bind(null_or_fail); // nulls are less common but OK
|
|
1818 br_null(O0_obj, false, pt, succeed);
|
|
1819 delayed()->wrccr( O5_save_flags ); // Restore CCR's
|
|
1820
|
|
1821 //-----------------------
|
|
1822 // report failure:
|
|
1823 bind(fail);
|
|
1824 _verify_oop_implicit_branch[2] = pc();
|
|
1825
|
|
1826 wrccr( O5_save_flags ); // Restore CCR's
|
|
1827
|
|
1828 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
|
|
1829
|
|
1830 // stop_subroutine expects message pointer in I1.
|
|
1831 mov(I1, O1);
|
|
1832
|
|
1833 // Restore prior 64-bit registers
|
|
1834 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0);
|
|
1835 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1);
|
|
1836 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2);
|
|
1837 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3);
|
|
1838 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4);
|
|
1839 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5);
|
|
1840
|
|
1841 // factor long stop-sequence into subroutine to save space
|
|
1842 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
|
|
1843
|
|
1844 // call indirectly to solve generation ordering problem
|
|
1845 Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address());
|
|
1846 load_ptr_contents(a, O5);
|
|
1847 jmpl(O5, 0, O7);
|
|
1848 delayed()->nop();
|
|
1849 }
|
|
1850
|
|
1851
|
|
1852 void MacroAssembler::stop(const char* msg) {
|
|
1853 // save frame first to get O7 for return address
|
|
1854 // add one word to size in case struct is odd number of words long
|
|
1855 // It must be doubleword-aligned for storing doubles into it.
|
|
1856
|
|
1857 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
|
|
1858
|
|
1859 // stop_subroutine expects message pointer in I1.
|
|
1860 set((intptr_t)msg, O1);
|
|
1861
|
|
1862 // factor long stop-sequence into subroutine to save space
|
|
1863 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
|
|
1864
|
|
1865 // call indirectly to solve generation ordering problem
|
|
1866 Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address());
|
|
1867 load_ptr_contents(a, O5);
|
|
1868 jmpl(O5, 0, O7);
|
|
1869 delayed()->nop();
|
|
1870
|
|
1871 breakpoint_trap(); // make stop actually stop rather than writing
|
|
1872 // unnoticeable results in the output files.
|
|
1873
|
|
1874 // restore(); done in callee to save space!
|
|
1875 }
|
|
1876
|
|
1877
|
|
1878 void MacroAssembler::warn(const char* msg) {
|
|
1879 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
|
|
1880 RegistersForDebugging::save_registers(this);
|
|
1881 mov(O0, L0);
|
|
1882 set((intptr_t)msg, O0);
|
|
1883 call( CAST_FROM_FN_PTR(address, warning) );
|
|
1884 delayed()->nop();
|
|
1885 // ret();
|
|
1886 // delayed()->restore();
|
|
1887 RegistersForDebugging::restore_registers(this, L0);
|
|
1888 restore();
|
|
1889 }
|
|
1890
|
|
1891
|
|
1892 void MacroAssembler::untested(const char* what) {
|
|
1893 // We must be able to turn interactive prompting off
|
|
1894 // in order to run automated test scripts on the VM
|
|
1895 // Use the flag ShowMessageBoxOnError
|
|
1896
|
|
1897 char* b = new char[1024];
|
|
1898 sprintf(b, "untested: %s", what);
|
|
1899
|
|
1900 if ( ShowMessageBoxOnError ) stop(b);
|
|
1901 else warn(b);
|
|
1902 }
|
|
1903
|
|
1904
|
|
1905 void MacroAssembler::stop_subroutine() {
|
|
1906 RegistersForDebugging::save_registers(this);
|
|
1907
|
|
1908 // for the sake of the debugger, stick a PC on the current frame
|
|
1909 // (this assumes that the caller has performed an extra "save")
|
|
1910 mov(I7, L7);
|
|
1911 add(O7, -7 * BytesPerInt, I7);
|
|
1912
|
|
1913 save_frame(); // one more save to free up another O7 register
|
|
1914 mov(I0, O1); // addr of reg save area
|
|
1915
|
|
1916 // We expect pointer to message in I1. Caller must set it up in O1
|
|
1917 mov(I1, O0); // get msg
|
|
1918 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
|
|
1919 delayed()->nop();
|
|
1920
|
|
1921 restore();
|
|
1922
|
|
1923 RegistersForDebugging::restore_registers(this, O0);
|
|
1924
|
|
1925 save_frame(0);
|
|
1926 call(CAST_FROM_FN_PTR(address,breakpoint));
|
|
1927 delayed()->nop();
|
|
1928 restore();
|
|
1929
|
|
1930 mov(L7, I7);
|
|
1931 retl();
|
|
1932 delayed()->restore(); // see stop above
|
|
1933 }
|
|
1934
|
|
1935
|
|
1936 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) {
|
|
1937 if ( ShowMessageBoxOnError ) {
|
|
1938 JavaThreadState saved_state = JavaThread::current()->thread_state();
|
|
1939 JavaThread::current()->set_thread_state(_thread_in_vm);
|
|
1940 {
|
|
1941 // In order to get locks work, we need to fake a in_VM state
|
|
1942 ttyLocker ttyl;
|
|
1943 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg);
|
|
1944 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
|
|
1945 ::tty->print_cr("Interpreter::bytecode_counter = %d", BytecodeCounter::counter_value());
|
|
1946 }
|
|
1947 if (os::message_box(msg, "Execution stopped, print registers?"))
|
|
1948 regs->print(::tty);
|
|
1949 }
|
|
1950 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state);
|
|
1951 }
|
|
1952 else
|
|
1953 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
|
|
1954 assert(false, "error");
|
|
1955 }
|
|
1956
|
|
1957
|
|
1958 #ifndef PRODUCT
|
|
1959 void MacroAssembler::test() {
|
|
1960 ResourceMark rm;
|
|
1961
|
|
1962 CodeBuffer cb("test", 10000, 10000);
|
|
1963 MacroAssembler* a = new MacroAssembler(&cb);
|
|
1964 VM_Version::allow_all();
|
|
1965 a->test_v9();
|
|
1966 a->test_v8_onlys();
|
|
1967 VM_Version::revert();
|
|
1968
|
|
1969 StubRoutines::Sparc::test_stop_entry()();
|
|
1970 }
|
|
1971 #endif
|
|
1972
|
|
1973
|
|
1974 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) {
|
|
1975 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words?
|
|
1976 Label no_extras;
|
|
1977 br( negative, true, pt, no_extras ); // if neg, clear reg
|
|
1978 delayed()->set( 0, Rresult); // annuled, so only if taken
|
|
1979 bind( no_extras );
|
|
1980 }
|
|
1981
|
|
1982
|
|
1983 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) {
|
|
1984 #ifdef _LP64
|
|
1985 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult);
|
|
1986 #else
|
|
1987 add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult);
|
|
1988 #endif
|
|
1989 bclr(1, Rresult);
|
|
1990 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes
|
|
1991 }
|
|
1992
|
|
1993
|
|
1994 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) {
|
|
1995 calc_frame_size(Rextra_words, Rresult);
|
|
1996 neg(Rresult);
|
|
1997 save(SP, Rresult, SP);
|
|
1998 }
|
|
1999
|
|
2000
|
|
2001 // ---------------------------------------------------------
|
|
2002 Assembler::RCondition cond2rcond(Assembler::Condition c) {
|
|
2003 switch (c) {
|
|
2004 /*case zero: */
|
|
2005 case Assembler::equal: return Assembler::rc_z;
|
|
2006 case Assembler::lessEqual: return Assembler::rc_lez;
|
|
2007 case Assembler::less: return Assembler::rc_lz;
|
|
2008 /*case notZero:*/
|
|
2009 case Assembler::notEqual: return Assembler::rc_nz;
|
|
2010 case Assembler::greater: return Assembler::rc_gz;
|
|
2011 case Assembler::greaterEqual: return Assembler::rc_gez;
|
|
2012 }
|
|
2013 ShouldNotReachHere();
|
|
2014 return Assembler::rc_z;
|
|
2015 }
|
|
2016
|
|
2017 // compares register with zero and branches. NOT FOR USE WITH 64-bit POINTERS
|
|
2018 void MacroAssembler::br_zero( Condition c, bool a, Predict p, Register s1, Label& L) {
|
|
2019 tst(s1);
|
|
2020 br (c, a, p, L);
|
|
2021 }
|
|
2022
|
|
2023
|
|
2024 // Compares a pointer register with zero and branches on null.
|
|
2025 // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
|
|
2026 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) {
|
|
2027 assert_not_delayed();
|
|
2028 #ifdef _LP64
|
|
2029 bpr( rc_z, a, p, s1, L );
|
|
2030 #else
|
|
2031 tst(s1);
|
|
2032 br ( zero, a, p, L );
|
|
2033 #endif
|
|
2034 }
|
|
2035
|
|
2036 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) {
|
|
2037 assert_not_delayed();
|
|
2038 #ifdef _LP64
|
|
2039 bpr( rc_nz, a, p, s1, L );
|
|
2040 #else
|
|
2041 tst(s1);
|
|
2042 br ( notZero, a, p, L );
|
|
2043 #endif
|
|
2044 }
|
|
2045
|
|
2046
|
|
2047 // instruction sequences factored across compiler & interpreter
|
|
2048
|
|
2049
|
|
2050 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low,
|
|
2051 Register Rb_hi, Register Rb_low,
|
|
2052 Register Rresult) {
|
|
2053
|
|
2054 Label check_low_parts, done;
|
|
2055
|
|
2056 cmp(Ra_hi, Rb_hi ); // compare hi parts
|
|
2057 br(equal, true, pt, check_low_parts);
|
|
2058 delayed()->cmp(Ra_low, Rb_low); // test low parts
|
|
2059
|
|
2060 // And, with an unsigned comparison, it does not matter if the numbers
|
|
2061 // are negative or not.
|
|
2062 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff.
|
|
2063 // The second one is bigger (unsignedly).
|
|
2064
|
|
2065 // Other notes: The first move in each triplet can be unconditional
|
|
2066 // (and therefore probably prefetchable).
|
|
2067 // And the equals case for the high part does not need testing,
|
|
2068 // since that triplet is reached only after finding the high halves differ.
|
|
2069
|
|
2070 if (VM_Version::v9_instructions_work()) {
|
|
2071
|
|
2072 mov ( -1, Rresult);
|
|
2073 ba( false, done ); delayed()-> movcc(greater, false, icc, 1, Rresult);
|
|
2074 }
|
|
2075 else {
|
|
2076 br(less, true, pt, done); delayed()-> set(-1, Rresult);
|
|
2077 br(greater, true, pt, done); delayed()-> set( 1, Rresult);
|
|
2078 }
|
|
2079
|
|
2080 bind( check_low_parts );
|
|
2081
|
|
2082 if (VM_Version::v9_instructions_work()) {
|
|
2083 mov( -1, Rresult);
|
|
2084 movcc(equal, false, icc, 0, Rresult);
|
|
2085 movcc(greaterUnsigned, false, icc, 1, Rresult);
|
|
2086 }
|
|
2087 else {
|
|
2088 set(-1, Rresult);
|
|
2089 br(equal, true, pt, done); delayed()->set( 0, Rresult);
|
|
2090 br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult);
|
|
2091 }
|
|
2092 bind( done );
|
|
2093 }
|
|
2094
|
|
2095 void MacroAssembler::lneg( Register Rhi, Register Rlow ) {
|
|
2096 subcc( G0, Rlow, Rlow );
|
|
2097 subc( G0, Rhi, Rhi );
|
|
2098 }
|
|
2099
|
|
2100 void MacroAssembler::lshl( Register Rin_high, Register Rin_low,
|
|
2101 Register Rcount,
|
|
2102 Register Rout_high, Register Rout_low,
|
|
2103 Register Rtemp ) {
|
|
2104
|
|
2105
|
|
2106 Register Ralt_count = Rtemp;
|
|
2107 Register Rxfer_bits = Rtemp;
|
|
2108
|
|
2109 assert( Ralt_count != Rin_high
|
|
2110 && Ralt_count != Rin_low
|
|
2111 && Ralt_count != Rcount
|
|
2112 && Rxfer_bits != Rin_low
|
|
2113 && Rxfer_bits != Rin_high
|
|
2114 && Rxfer_bits != Rcount
|
|
2115 && Rxfer_bits != Rout_low
|
|
2116 && Rout_low != Rin_high,
|
|
2117 "register alias checks");
|
|
2118
|
|
2119 Label big_shift, done;
|
|
2120
|
|
2121 // This code can be optimized to use the 64 bit shifts in V9.
|
|
2122 // Here we use the 32 bit shifts.
|
|
2123
|
|
2124 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
|
|
2125 subcc(Rcount, 31, Ralt_count);
|
|
2126 br(greater, true, pn, big_shift);
|
|
2127 delayed()->
|
|
2128 dec(Ralt_count);
|
|
2129
|
|
2130 // shift < 32 bits, Ralt_count = Rcount-31
|
|
2131
|
|
2132 // We get the transfer bits by shifting right by 32-count the low
|
|
2133 // register. This is done by shifting right by 31-count and then by one
|
|
2134 // more to take care of the special (rare) case where count is zero
|
|
2135 // (shifting by 32 would not work).
|
|
2136
|
|
2137 neg( Ralt_count );
|
|
2138
|
|
2139 // The order of the next two instructions is critical in the case where
|
|
2140 // Rin and Rout are the same and should not be reversed.
|
|
2141
|
|
2142 srl( Rin_low, Ralt_count, Rxfer_bits ); // shift right by 31-count
|
|
2143 if (Rcount != Rout_low) {
|
|
2144 sll( Rin_low, Rcount, Rout_low ); // low half
|
|
2145 }
|
|
2146 sll( Rin_high, Rcount, Rout_high );
|
|
2147 if (Rcount == Rout_low) {
|
|
2148 sll( Rin_low, Rcount, Rout_low ); // low half
|
|
2149 }
|
|
2150 srl( Rxfer_bits, 1, Rxfer_bits ); // shift right by one more
|
|
2151 ba (false, done);
|
|
2152 delayed()->
|
|
2153 or3( Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low
|
|
2154
|
|
2155 // shift >= 32 bits, Ralt_count = Rcount-32
|
|
2156 bind(big_shift);
|
|
2157 sll( Rin_low, Ralt_count, Rout_high );
|
|
2158 clr( Rout_low );
|
|
2159
|
|
2160 bind(done);
|
|
2161 }
|
|
2162
|
|
2163
|
|
2164 void MacroAssembler::lshr( Register Rin_high, Register Rin_low,
|
|
2165 Register Rcount,
|
|
2166 Register Rout_high, Register Rout_low,
|
|
2167 Register Rtemp ) {
|
|
2168
|
|
2169 Register Ralt_count = Rtemp;
|
|
2170 Register Rxfer_bits = Rtemp;
|
|
2171
|
|
2172 assert( Ralt_count != Rin_high
|
|
2173 && Ralt_count != Rin_low
|
|
2174 && Ralt_count != Rcount
|
|
2175 && Rxfer_bits != Rin_low
|
|
2176 && Rxfer_bits != Rin_high
|
|
2177 && Rxfer_bits != Rcount
|
|
2178 && Rxfer_bits != Rout_high
|
|
2179 && Rout_high != Rin_low,
|
|
2180 "register alias checks");
|
|
2181
|
|
2182 Label big_shift, done;
|
|
2183
|
|
2184 // This code can be optimized to use the 64 bit shifts in V9.
|
|
2185 // Here we use the 32 bit shifts.
|
|
2186
|
|
2187 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
|
|
2188 subcc(Rcount, 31, Ralt_count);
|
|
2189 br(greater, true, pn, big_shift);
|
|
2190 delayed()->dec(Ralt_count);
|
|
2191
|
|
2192 // shift < 32 bits, Ralt_count = Rcount-31
|
|
2193
|
|
2194 // We get the transfer bits by shifting left by 32-count the high
|
|
2195 // register. This is done by shifting left by 31-count and then by one
|
|
2196 // more to take care of the special (rare) case where count is zero
|
|
2197 // (shifting by 32 would not work).
|
|
2198
|
|
2199 neg( Ralt_count );
|
|
2200 if (Rcount != Rout_low) {
|
|
2201 srl( Rin_low, Rcount, Rout_low );
|
|
2202 }
|
|
2203
|
|
2204 // The order of the next two instructions is critical in the case where
|
|
2205 // Rin and Rout are the same and should not be reversed.
|
|
2206
|
|
2207 sll( Rin_high, Ralt_count, Rxfer_bits ); // shift left by 31-count
|
|
2208 sra( Rin_high, Rcount, Rout_high ); // high half
|
|
2209 sll( Rxfer_bits, 1, Rxfer_bits ); // shift left by one more
|
|
2210 if (Rcount == Rout_low) {
|
|
2211 srl( Rin_low, Rcount, Rout_low );
|
|
2212 }
|
|
2213 ba (false, done);
|
|
2214 delayed()->
|
|
2215 or3( Rout_low, Rxfer_bits, Rout_low ); // new low value: or shifted old low part and xfer from high
|
|
2216
|
|
2217 // shift >= 32 bits, Ralt_count = Rcount-32
|
|
2218 bind(big_shift);
|
|
2219
|
|
2220 sra( Rin_high, Ralt_count, Rout_low );
|
|
2221 sra( Rin_high, 31, Rout_high ); // sign into hi
|
|
2222
|
|
2223 bind( done );
|
|
2224 }
|
|
2225
|
|
2226
|
|
2227
|
|
2228 void MacroAssembler::lushr( Register Rin_high, Register Rin_low,
|
|
2229 Register Rcount,
|
|
2230 Register Rout_high, Register Rout_low,
|
|
2231 Register Rtemp ) {
|
|
2232
|
|
2233 Register Ralt_count = Rtemp;
|
|
2234 Register Rxfer_bits = Rtemp;
|
|
2235
|
|
2236 assert( Ralt_count != Rin_high
|
|
2237 && Ralt_count != Rin_low
|
|
2238 && Ralt_count != Rcount
|
|
2239 && Rxfer_bits != Rin_low
|
|
2240 && Rxfer_bits != Rin_high
|
|
2241 && Rxfer_bits != Rcount
|
|
2242 && Rxfer_bits != Rout_high
|
|
2243 && Rout_high != Rin_low,
|
|
2244 "register alias checks");
|
|
2245
|
|
2246 Label big_shift, done;
|
|
2247
|
|
2248 // This code can be optimized to use the 64 bit shifts in V9.
|
|
2249 // Here we use the 32 bit shifts.
|
|
2250
|
|
2251 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
|
|
2252 subcc(Rcount, 31, Ralt_count);
|
|
2253 br(greater, true, pn, big_shift);
|
|
2254 delayed()->dec(Ralt_count);
|
|
2255
|
|
2256 // shift < 32 bits, Ralt_count = Rcount-31
|
|
2257
|
|
2258 // We get the transfer bits by shifting left by 32-count the high
|
|
2259 // register. This is done by shifting left by 31-count and then by one
|
|
2260 // more to take care of the special (rare) case where count is zero
|
|
2261 // (shifting by 32 would not work).
|
|
2262
|
|
2263 neg( Ralt_count );
|
|
2264 if (Rcount != Rout_low) {
|
|
2265 srl( Rin_low, Rcount, Rout_low );
|
|
2266 }
|
|
2267
|
|
2268 // The order of the next two instructions is critical in the case where
|
|
2269 // Rin and Rout are the same and should not be reversed.
|
|
2270
|
|
2271 sll( Rin_high, Ralt_count, Rxfer_bits ); // shift left by 31-count
|
|
2272 srl( Rin_high, Rcount, Rout_high ); // high half
|
|
2273 sll( Rxfer_bits, 1, Rxfer_bits ); // shift left by one more
|
|
2274 if (Rcount == Rout_low) {
|
|
2275 srl( Rin_low, Rcount, Rout_low );
|
|
2276 }
|
|
2277 ba (false, done);
|
|
2278 delayed()->
|
|
2279 or3( Rout_low, Rxfer_bits, Rout_low ); // new low value: or shifted old low part and xfer from high
|
|
2280
|
|
2281 // shift >= 32 bits, Ralt_count = Rcount-32
|
|
2282 bind(big_shift);
|
|
2283
|
|
2284 srl( Rin_high, Ralt_count, Rout_low );
|
|
2285 clr( Rout_high );
|
|
2286
|
|
2287 bind( done );
|
|
2288 }
|
|
2289
|
|
2290 #ifdef _LP64
|
|
2291 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
|
|
2292 cmp(Ra, Rb);
|
|
2293 mov( -1, Rresult);
|
|
2294 movcc(equal, false, xcc, 0, Rresult);
|
|
2295 movcc(greater, false, xcc, 1, Rresult);
|
|
2296 }
|
|
2297 #endif
|
|
2298
|
|
2299
|
|
2300 void MacroAssembler::float_cmp( bool is_float, int unordered_result,
|
|
2301 FloatRegister Fa, FloatRegister Fb,
|
|
2302 Register Rresult) {
|
|
2303
|
|
2304 fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
|
|
2305
|
|
2306 Condition lt = unordered_result == -1 ? f_unorderedOrLess : f_less;
|
|
2307 Condition eq = f_equal;
|
|
2308 Condition gt = unordered_result == 1 ? f_unorderedOrGreater : f_greater;
|
|
2309
|
|
2310 if (VM_Version::v9_instructions_work()) {
|
|
2311
|
|
2312 mov( -1, Rresult );
|
|
2313 movcc( eq, true, fcc0, 0, Rresult );
|
|
2314 movcc( gt, true, fcc0, 1, Rresult );
|
|
2315
|
|
2316 } else {
|
|
2317 Label done;
|
|
2318
|
|
2319 set( -1, Rresult );
|
|
2320 //fb(lt, true, pn, done); delayed()->set( -1, Rresult );
|
|
2321 fb( eq, true, pn, done); delayed()->set( 0, Rresult );
|
|
2322 fb( gt, true, pn, done); delayed()->set( 1, Rresult );
|
|
2323
|
|
2324 bind (done);
|
|
2325 }
|
|
2326 }
|
|
2327
|
|
2328
|
|
2329 void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
|
|
2330 {
|
|
2331 if (VM_Version::v9_instructions_work()) {
|
|
2332 Assembler::fneg(w, s, d);
|
|
2333 } else {
|
|
2334 if (w == FloatRegisterImpl::S) {
|
|
2335 Assembler::fneg(w, s, d);
|
|
2336 } else if (w == FloatRegisterImpl::D) {
|
|
2337 // number() does a sanity check on the alignment.
|
|
2338 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
|
|
2339 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
|
|
2340
|
|
2341 Assembler::fneg(FloatRegisterImpl::S, s, d);
|
|
2342 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
|
2343 } else {
|
|
2344 assert(w == FloatRegisterImpl::Q, "Invalid float register width");
|
|
2345
|
|
2346 // number() does a sanity check on the alignment.
|
|
2347 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
|
|
2348 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
|
|
2349
|
|
2350 Assembler::fneg(FloatRegisterImpl::S, s, d);
|
|
2351 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
|
2352 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
|
|
2353 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
|
|
2354 }
|
|
2355 }
|
|
2356 }
|
|
2357
|
|
2358 void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
|
|
2359 {
|
|
2360 if (VM_Version::v9_instructions_work()) {
|
|
2361 Assembler::fmov(w, s, d);
|
|
2362 } else {
|
|
2363 if (w == FloatRegisterImpl::S) {
|
|
2364 Assembler::fmov(w, s, d);
|
|
2365 } else if (w == FloatRegisterImpl::D) {
|
|
2366 // number() does a sanity check on the alignment.
|
|
2367 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
|
|
2368 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
|
|
2369
|
|
2370 Assembler::fmov(FloatRegisterImpl::S, s, d);
|
|
2371 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
|
2372 } else {
|
|
2373 assert(w == FloatRegisterImpl::Q, "Invalid float register width");
|
|
2374
|
|
2375 // number() does a sanity check on the alignment.
|
|
2376 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
|
|
2377 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
|
|
2378
|
|
2379 Assembler::fmov(FloatRegisterImpl::S, s, d);
|
|
2380 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
|
2381 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
|
|
2382 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
|
|
2383 }
|
|
2384 }
|
|
2385 }
|
|
2386
|
|
2387 void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
|
|
2388 {
|
|
2389 if (VM_Version::v9_instructions_work()) {
|
|
2390 Assembler::fabs(w, s, d);
|
|
2391 } else {
|
|
2392 if (w == FloatRegisterImpl::S) {
|
|
2393 Assembler::fabs(w, s, d);
|
|
2394 } else if (w == FloatRegisterImpl::D) {
|
|
2395 // number() does a sanity check on the alignment.
|
|
2396 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
|
|
2397 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
|
|
2398
|
|
2399 Assembler::fabs(FloatRegisterImpl::S, s, d);
|
|
2400 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
|
2401 } else {
|
|
2402 assert(w == FloatRegisterImpl::Q, "Invalid float register width");
|
|
2403
|
|
2404 // number() does a sanity check on the alignment.
|
|
2405 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
|
|
2406 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
|
|
2407
|
|
2408 Assembler::fabs(FloatRegisterImpl::S, s, d);
|
|
2409 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
|
2410 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
|
|
2411 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
|
|
2412 }
|
|
2413 }
|
|
2414 }
|
|
2415
|
|
2416 void MacroAssembler::save_all_globals_into_locals() {
|
|
2417 mov(G1,L1);
|
|
2418 mov(G2,L2);
|
|
2419 mov(G3,L3);
|
|
2420 mov(G4,L4);
|
|
2421 mov(G5,L5);
|
|
2422 mov(G6,L6);
|
|
2423 mov(G7,L7);
|
|
2424 }
|
|
2425
|
|
2426 void MacroAssembler::restore_globals_from_locals() {
|
|
2427 mov(L1,G1);
|
|
2428 mov(L2,G2);
|
|
2429 mov(L3,G3);
|
|
2430 mov(L4,G4);
|
|
2431 mov(L5,G5);
|
|
2432 mov(L6,G6);
|
|
2433 mov(L7,G7);
|
|
2434 }
|
|
2435
|
|
2436 // Use for 64 bit operation.
|
|
2437 void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
|
|
2438 {
|
|
2439 // store ptr_reg as the new top value
|
|
2440 #ifdef _LP64
|
|
2441 casx(top_ptr_reg, top_reg, ptr_reg);
|
|
2442 #else
|
|
2443 cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm);
|
|
2444 #endif // _LP64
|
|
2445 }
|
|
2446
|
|
2447 // [RGV] This routine does not handle 64 bit operations.
|
|
2448 // use casx_under_lock() or casx directly!!!
|
|
2449 void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
|
|
2450 {
|
|
2451 // store ptr_reg as the new top value
|
|
2452 if (VM_Version::v9_instructions_work()) {
|
|
2453 cas(top_ptr_reg, top_reg, ptr_reg);
|
|
2454 } else {
|
|
2455
|
|
2456 // If the register is not an out nor global, it is not visible
|
|
2457 // after the save. Allocate a register for it, save its
|
|
2458 // value in the register save area (the save may not flush
|
|
2459 // registers to the save area).
|
|
2460
|
|
2461 Register top_ptr_reg_after_save;
|
|
2462 Register top_reg_after_save;
|
|
2463 Register ptr_reg_after_save;
|
|
2464
|
|
2465 if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) {
|
|
2466 top_ptr_reg_after_save = top_ptr_reg->after_save();
|
|
2467 } else {
|
|
2468 Address reg_save_addr = top_ptr_reg->address_in_saved_window();
|
|
2469 top_ptr_reg_after_save = L0;
|
|
2470 st(top_ptr_reg, reg_save_addr);
|
|
2471 }
|
|
2472
|
|
2473 if (top_reg->is_out() || top_reg->is_global()) {
|
|
2474 top_reg_after_save = top_reg->after_save();
|
|
2475 } else {
|
|
2476 Address reg_save_addr = top_reg->address_in_saved_window();
|
|
2477 top_reg_after_save = L1;
|
|
2478 st(top_reg, reg_save_addr);
|
|
2479 }
|
|
2480
|
|
2481 if (ptr_reg->is_out() || ptr_reg->is_global()) {
|
|
2482 ptr_reg_after_save = ptr_reg->after_save();
|
|
2483 } else {
|
|
2484 Address reg_save_addr = ptr_reg->address_in_saved_window();
|
|
2485 ptr_reg_after_save = L2;
|
|
2486 st(ptr_reg, reg_save_addr);
|
|
2487 }
|
|
2488
|
|
2489 const Register& lock_reg = L3;
|
|
2490 const Register& lock_ptr_reg = L4;
|
|
2491 const Register& value_reg = L5;
|
|
2492 const Register& yield_reg = L6;
|
|
2493 const Register& yieldall_reg = L7;
|
|
2494
|
|
2495 save_frame();
|
|
2496
|
|
2497 if (top_ptr_reg_after_save == L0) {
|
|
2498 ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save);
|
|
2499 }
|
|
2500
|
|
2501 if (top_reg_after_save == L1) {
|
|
2502 ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save);
|
|
2503 }
|
|
2504
|
|
2505 if (ptr_reg_after_save == L2) {
|
|
2506 ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save);
|
|
2507 }
|
|
2508
|
|
2509 Label(retry_get_lock);
|
|
2510 Label(not_same);
|
|
2511 Label(dont_yield);
|
|
2512
|
|
2513 assert(lock_addr, "lock_address should be non null for v8");
|
|
2514 set((intptr_t)lock_addr, lock_ptr_reg);
|
|
2515 // Initialize yield counter
|
|
2516 mov(G0,yield_reg);
|
|
2517 mov(G0, yieldall_reg);
|
|
2518 set(StubRoutines::Sparc::locked, lock_reg);
|
|
2519
|
|
2520 bind(retry_get_lock);
|
|
2521 cmp(yield_reg, V8AtomicOperationUnderLockSpinCount);
|
|
2522 br(Assembler::less, false, Assembler::pt, dont_yield);
|
|
2523 delayed()->nop();
|
|
2524
|
|
2525 if(use_call_vm) {
|
|
2526 Untested("Need to verify global reg consistancy");
|
|
2527 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg);
|
|
2528 } else {
|
|
2529 // Save the regs and make space for a C call
|
|
2530 save(SP, -96, SP);
|
|
2531 save_all_globals_into_locals();
|
|
2532 call(CAST_FROM_FN_PTR(address,os::yield_all));
|
|
2533 delayed()->mov(yieldall_reg, O0);
|
|
2534 restore_globals_from_locals();
|
|
2535 restore();
|
|
2536 }
|
|
2537
|
|
2538 // reset the counter
|
|
2539 mov(G0,yield_reg);
|
|
2540 add(yieldall_reg, 1, yieldall_reg);
|
|
2541
|
|
2542 bind(dont_yield);
|
|
2543 // try to get lock
|
|
2544 swap(lock_ptr_reg, 0, lock_reg);
|
|
2545
|
|
2546 // did we get the lock?
|
|
2547 cmp(lock_reg, StubRoutines::Sparc::unlocked);
|
|
2548 br(Assembler::notEqual, true, Assembler::pn, retry_get_lock);
|
|
2549 delayed()->add(yield_reg,1,yield_reg);
|
|
2550
|
|
2551 // yes, got lock. do we have the same top?
|
|
2552 ld(top_ptr_reg_after_save, 0, value_reg);
|
|
2553 cmp(value_reg, top_reg_after_save);
|
|
2554 br(Assembler::notEqual, false, Assembler::pn, not_same);
|
|
2555 delayed()->nop();
|
|
2556
|
|
2557 // yes, same top.
|
|
2558 st(ptr_reg_after_save, top_ptr_reg_after_save, 0);
|
|
2559 membar(Assembler::StoreStore);
|
|
2560
|
|
2561 bind(not_same);
|
|
2562 mov(value_reg, ptr_reg_after_save);
|
|
2563 st(lock_reg, lock_ptr_reg, 0); // unlock
|
|
2564
|
|
2565 restore();
|
|
2566 }
|
|
2567 }
|
|
2568
|
|
2569 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
|
|
2570 Label& done, Label* slow_case,
|
|
2571 BiasedLockingCounters* counters) {
|
|
2572 assert(UseBiasedLocking, "why call this otherwise?");
|
|
2573
|
|
2574 if (PrintBiasedLockingStatistics) {
|
|
2575 assert_different_registers(obj_reg, mark_reg, temp_reg, O7);
|
|
2576 if (counters == NULL)
|
|
2577 counters = BiasedLocking::counters();
|
|
2578 }
|
|
2579
|
|
2580 Label cas_label;
|
|
2581
|
|
2582 // Biased locking
|
|
2583 // See whether the lock is currently biased toward our thread and
|
|
2584 // whether the epoch is still valid
|
|
2585 // Note that the runtime guarantees sufficient alignment of JavaThread
|
|
2586 // pointers to allow age to be placed into low bits
|
|
2587 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
|
|
2588 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
|
|
2589 cmp(temp_reg, markOopDesc::biased_lock_pattern);
|
|
2590 brx(Assembler::notEqual, false, Assembler::pn, cas_label);
|
|
2591
|
|
2592 delayed()->ld_ptr(Address(obj_reg, 0, oopDesc::klass_offset_in_bytes()), temp_reg);
|
|
2593 ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
|
|
2594 or3(G2_thread, temp_reg, temp_reg);
|
|
2595 xor3(mark_reg, temp_reg, temp_reg);
|
|
2596 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
|
|
2597 if (counters != NULL) {
|
|
2598 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
|
|
2599 // Reload mark_reg as we may need it later
|
|
2600 ld_ptr(Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()), mark_reg);
|
|
2601 }
|
|
2602 brx(Assembler::equal, true, Assembler::pt, done);
|
|
2603 delayed()->nop();
|
|
2604
|
|
2605 Label try_revoke_bias;
|
|
2606 Label try_rebias;
|
|
2607 Address mark_addr = Address(obj_reg, 0, oopDesc::mark_offset_in_bytes());
|
|
2608 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
|
2609
|
|
2610 // At this point we know that the header has the bias pattern and
|
|
2611 // that we are not the bias owner in the current epoch. We need to
|
|
2612 // figure out more details about the state of the header in order to
|
|
2613 // know what operations can be legally performed on the object's
|
|
2614 // header.
|
|
2615
|
|
2616 // If the low three bits in the xor result aren't clear, that means
|
|
2617 // the prototype header is no longer biased and we have to revoke
|
|
2618 // the bias on this object.
|
|
2619 btst(markOopDesc::biased_lock_mask_in_place, temp_reg);
|
|
2620 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias);
|
|
2621
|
|
2622 // Biasing is still enabled for this data type. See whether the
|
|
2623 // epoch of the current bias is still valid, meaning that the epoch
|
|
2624 // bits of the mark word are equal to the epoch bits of the
|
|
2625 // prototype header. (Note that the prototype header's epoch bits
|
|
2626 // only change at a safepoint.) If not, attempt to rebias the object
|
|
2627 // toward the current thread. Note that we must be absolutely sure
|
|
2628 // that the current epoch is invalid in order to do this because
|
|
2629 // otherwise the manipulations it performs on the mark word are
|
|
2630 // illegal.
|
|
2631 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg);
|
|
2632 brx(Assembler::notZero, false, Assembler::pn, try_rebias);
|
|
2633
|
|
2634 // The epoch of the current bias is still valid but we know nothing
|
|
2635 // about the owner; it might be set or it might be clear. Try to
|
|
2636 // acquire the bias of the object using an atomic operation. If this
|
|
2637 // fails we will go in to the runtime to revoke the object's bias.
|
|
2638 // Note that we first construct the presumed unbiased header so we
|
|
2639 // don't accidentally blow away another thread's valid bias.
|
|
2640 delayed()->and3(mark_reg,
|
|
2641 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
|
|
2642 mark_reg);
|
|
2643 or3(G2_thread, mark_reg, temp_reg);
|
|
2644 casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
|
|
2645 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
|
2646 // If the biasing toward our thread failed, this means that
|
|
2647 // another thread succeeded in biasing it toward itself and we
|
|
2648 // need to revoke that bias. The revocation will occur in the
|
|
2649 // interpreter runtime in the slow case.
|
|
2650 cmp(mark_reg, temp_reg);
|
|
2651 if (counters != NULL) {
|
|
2652 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg);
|
|
2653 }
|
|
2654 if (slow_case != NULL) {
|
|
2655 brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
|
|
2656 delayed()->nop();
|
|
2657 }
|
|
2658 br(Assembler::always, false, Assembler::pt, done);
|
|
2659 delayed()->nop();
|
|
2660
|
|
2661 bind(try_rebias);
|
|
2662 // At this point we know the epoch has expired, meaning that the
|
|
2663 // current "bias owner", if any, is actually invalid. Under these
|
|
2664 // circumstances _only_, we are allowed to use the current header's
|
|
2665 // value as the comparison value when doing the cas to acquire the
|
|
2666 // bias in the current epoch. In other words, we allow transfer of
|
|
2667 // the bias from one thread to another directly in this situation.
|
|
2668 //
|
|
2669 // FIXME: due to a lack of registers we currently blow away the age
|
|
2670 // bits in this situation. Should attempt to preserve them.
|
|
2671 ld_ptr(Address(obj_reg, 0, oopDesc::klass_offset_in_bytes()), temp_reg);
|
|
2672 ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
|
|
2673 or3(G2_thread, temp_reg, temp_reg);
|
|
2674 casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
|
|
2675 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
|
2676 // If the biasing toward our thread failed, this means that
|
|
2677 // another thread succeeded in biasing it toward itself and we
|
|
2678 // need to revoke that bias. The revocation will occur in the
|
|
2679 // interpreter runtime in the slow case.
|
|
2680 cmp(mark_reg, temp_reg);
|
|
2681 if (counters != NULL) {
|
|
2682 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg);
|
|
2683 }
|
|
2684 if (slow_case != NULL) {
|
|
2685 brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
|
|
2686 delayed()->nop();
|
|
2687 }
|
|
2688 br(Assembler::always, false, Assembler::pt, done);
|
|
2689 delayed()->nop();
|
|
2690
|
|
2691 bind(try_revoke_bias);
|
|
2692 // The prototype mark in the klass doesn't have the bias bit set any
|
|
2693 // more, indicating that objects of this data type are not supposed
|
|
2694 // to be biased any more. We are going to try to reset the mark of
|
|
2695 // this object to the prototype value and fall through to the
|
|
2696 // CAS-based locking scheme. Note that if our CAS fails, it means
|
|
2697 // that another thread raced us for the privilege of revoking the
|
|
2698 // bias of this particular object, so it's okay to continue in the
|
|
2699 // normal locking code.
|
|
2700 //
|
|
2701 // FIXME: due to a lack of registers we currently blow away the age
|
|
2702 // bits in this situation. Should attempt to preserve them.
|
|
2703 ld_ptr(Address(obj_reg, 0, oopDesc::klass_offset_in_bytes()), temp_reg);
|
|
2704 ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
|
|
2705 casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
|
|
2706 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
|
2707 // Fall through to the normal CAS-based lock, because no matter what
|
|
2708 // the result of the above CAS, some thread must have succeeded in
|
|
2709 // removing the bias bit from the object's header.
|
|
2710 if (counters != NULL) {
|
|
2711 cmp(mark_reg, temp_reg);
|
|
2712 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg);
|
|
2713 }
|
|
2714
|
|
2715 bind(cas_label);
|
|
2716 }
|
|
2717
|
|
2718 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done,
|
|
2719 bool allow_delay_slot_filling) {
|
|
2720 // Check for biased locking unlock case, which is a no-op
|
|
2721 // Note: we do not have to check the thread ID for two reasons.
|
|
2722 // First, the interpreter checks for IllegalMonitorStateException at
|
|
2723 // a higher level. Second, if the bias was revoked while we held the
|
|
2724 // lock, the object could not be rebiased toward another thread, so
|
|
2725 // the bias bit would be clear.
|
|
2726 ld_ptr(mark_addr, temp_reg);
|
|
2727 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
|
|
2728 cmp(temp_reg, markOopDesc::biased_lock_pattern);
|
|
2729 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done);
|
|
2730 delayed();
|
|
2731 if (!allow_delay_slot_filling) {
|
|
2732 nop();
|
|
2733 }
|
|
2734 }
|
|
2735
|
|
2736
|
|
2737 // CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by
|
|
2738 // Solaris/SPARC's "as". Another apt name would be cas_ptr()
|
|
2739
|
|
2740 void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) {
|
|
2741 casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()) ;
|
|
2742 }
|
|
2743
|
|
2744
|
|
2745
|
|
2746 // compiler_lock_object() and compiler_unlock_object() are direct transliterations
|
|
2747 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments.
|
|
2748 // The code could be tightened up considerably.
|
|
2749 //
|
|
2750 // box->dhw disposition - post-conditions at DONE_LABEL.
|
|
2751 // - Successful inflated lock: box->dhw != 0.
|
|
2752 // Any non-zero value suffices.
|
|
2753 // Consider G2_thread, rsp, boxReg, or unused_mark()
|
|
2754 // - Successful Stack-lock: box->dhw == mark.
|
|
2755 // box->dhw must contain the displaced mark word value
|
|
2756 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined.
|
|
2757 // The slow-path fast_enter() and slow_enter() operators
|
|
2758 // are responsible for setting box->dhw = NonZero (typically ::unused_mark).
|
|
2759 // - Biased: box->dhw is undefined
|
|
2760 //
|
|
2761 // SPARC refworkload performance - specifically jetstream and scimark - are
|
|
2762 // extremely sensitive to the size of the code emitted by compiler_lock_object
|
|
2763 // and compiler_unlock_object. Critically, the key factor is code size, not path
|
|
2764 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the
|
|
2765 // effect).
|
|
2766
|
|
2767
|
|
2768 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch,
|
|
2769 BiasedLockingCounters* counters) {
|
|
2770 Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes());
|
|
2771
|
|
2772 verify_oop(Roop);
|
|
2773 Label done ;
|
|
2774
|
|
2775 if (counters != NULL) {
|
|
2776 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch);
|
|
2777 }
|
|
2778
|
|
2779 if (EmitSync & 1) {
|
|
2780 mov (3, Rscratch) ;
|
|
2781 st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
|
|
2782 cmp (SP, G0) ;
|
|
2783 return ;
|
|
2784 }
|
|
2785
|
|
2786 if (EmitSync & 2) {
|
|
2787
|
|
2788 // Fetch object's markword
|
|
2789 ld_ptr(mark_addr, Rmark);
|
|
2790
|
|
2791 if (UseBiasedLocking) {
|
|
2792 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
|
|
2793 }
|
|
2794
|
|
2795 // Save Rbox in Rscratch to be used for the cas operation
|
|
2796 mov(Rbox, Rscratch);
|
|
2797
|
|
2798 // set Rmark to markOop | markOopDesc::unlocked_value
|
|
2799 or3(Rmark, markOopDesc::unlocked_value, Rmark);
|
|
2800
|
|
2801 // Initialize the box. (Must happen before we update the object mark!)
|
|
2802 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
|
|
2803
|
|
2804 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
|
|
2805 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
|
2806 casx_under_lock(mark_addr.base(), Rmark, Rscratch,
|
|
2807 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
|
2808
|
|
2809 // if compare/exchange succeeded we found an unlocked object and we now have locked it
|
|
2810 // hence we are done
|
|
2811 cmp(Rmark, Rscratch);
|
|
2812 #ifdef _LP64
|
|
2813 sub(Rscratch, STACK_BIAS, Rscratch);
|
|
2814 #endif
|
|
2815 brx(Assembler::equal, false, Assembler::pt, done);
|
|
2816 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot
|
|
2817
|
|
2818 // we did not find an unlocked object so see if this is a recursive case
|
|
2819 // sub(Rscratch, SP, Rscratch);
|
|
2820 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
|
|
2821 andcc(Rscratch, 0xfffff003, Rscratch);
|
|
2822 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
|
|
2823 bind (done) ;
|
|
2824 return ;
|
|
2825 }
|
|
2826
|
|
2827 Label Egress ;
|
|
2828
|
|
2829 if (EmitSync & 256) {
|
|
2830 Label IsInflated ;
|
|
2831
|
|
2832 ld_ptr (mark_addr, Rmark); // fetch obj->mark
|
|
2833 // Triage: biased, stack-locked, neutral, inflated
|
|
2834 if (UseBiasedLocking) {
|
|
2835 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
|
|
2836 // Invariant: if control reaches this point in the emitted stream
|
|
2837 // then Rmark has not been modified.
|
|
2838 }
|
|
2839
|
|
2840 // Store mark into displaced mark field in the on-stack basic-lock "box"
|
|
2841 // Critically, this must happen before the CAS
|
|
2842 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty.
|
|
2843 st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
|
|
2844 andcc (Rmark, 2, G0) ;
|
|
2845 brx (Assembler::notZero, false, Assembler::pn, IsInflated) ;
|
|
2846 delayed() ->
|
|
2847
|
|
2848 // Try stack-lock acquisition.
|
|
2849 // Beware: the 1st instruction is in a delay slot
|
|
2850 mov (Rbox, Rscratch);
|
|
2851 or3 (Rmark, markOopDesc::unlocked_value, Rmark);
|
|
2852 assert (mark_addr.disp() == 0, "cas must take a zero displacement");
|
|
2853 casn (mark_addr.base(), Rmark, Rscratch) ;
|
|
2854 cmp (Rmark, Rscratch);
|
|
2855 brx (Assembler::equal, false, Assembler::pt, done);
|
|
2856 delayed()->sub(Rscratch, SP, Rscratch);
|
|
2857
|
|
2858 // Stack-lock attempt failed - check for recursive stack-lock.
|
|
2859 // See the comments below about how we might remove this case.
|
|
2860 #ifdef _LP64
|
|
2861 sub (Rscratch, STACK_BIAS, Rscratch);
|
|
2862 #endif
|
|
2863 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
|
|
2864 andcc (Rscratch, 0xfffff003, Rscratch);
|
|
2865 br (Assembler::always, false, Assembler::pt, done) ;
|
|
2866 delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
|
|
2867
|
|
2868 bind (IsInflated) ;
|
|
2869 if (EmitSync & 64) {
|
|
2870 // If m->owner != null goto IsLocked
|
|
2871 // Pessimistic form: Test-and-CAS vs CAS
|
|
2872 // The optimistic form avoids RTS->RTO cache line upgrades.
|
|
2873 ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ;
|
|
2874 andcc (Rscratch, Rscratch, G0) ;
|
|
2875 brx (Assembler::notZero, false, Assembler::pn, done) ;
|
|
2876 delayed()->nop() ;
|
|
2877 // m->owner == null : it's unlocked.
|
|
2878 }
|
|
2879
|
|
2880 // Try to CAS m->owner from null to Self
|
|
2881 // Invariant: if we acquire the lock then _recursions should be 0.
|
|
2882 add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
|
|
2883 mov (G2_thread, Rscratch) ;
|
|
2884 casn (Rmark, G0, Rscratch) ;
|
|
2885 cmp (Rscratch, G0) ;
|
|
2886 // Intentional fall-through into done
|
|
2887 } else {
|
|
2888 // Aggressively avoid the Store-before-CAS penalty
|
|
2889 // Defer the store into box->dhw until after the CAS
|
|
2890 Label IsInflated, Recursive ;
|
|
2891
|
|
2892 // Anticipate CAS -- Avoid RTS->RTO upgrade
|
|
2893 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ;
|
|
2894
|
|
2895 ld_ptr (mark_addr, Rmark); // fetch obj->mark
|
|
2896 // Triage: biased, stack-locked, neutral, inflated
|
|
2897
|
|
2898 if (UseBiasedLocking) {
|
|
2899 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
|
|
2900 // Invariant: if control reaches this point in the emitted stream
|
|
2901 // then Rmark has not been modified.
|
|
2902 }
|
|
2903 andcc (Rmark, 2, G0) ;
|
|
2904 brx (Assembler::notZero, false, Assembler::pn, IsInflated) ;
|
|
2905 delayed()-> // Beware - dangling delay-slot
|
|
2906
|
|
2907 // Try stack-lock acquisition.
|
|
2908 // Transiently install BUSY (0) encoding in the mark word.
|
|
2909 // if the CAS of 0 into the mark was successful then we execute:
|
|
2910 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box
|
|
2911 // ST obj->mark = box -- overwrite transient 0 value
|
|
2912 // This presumes TSO, of course.
|
|
2913
|
|
2914 mov (0, Rscratch) ;
|
|
2915 or3 (Rmark, markOopDesc::unlocked_value, Rmark);
|
|
2916 assert (mark_addr.disp() == 0, "cas must take a zero displacement");
|
|
2917 casn (mark_addr.base(), Rmark, Rscratch) ;
|
|
2918 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ;
|
|
2919 cmp (Rscratch, Rmark) ;
|
|
2920 brx (Assembler::notZero, false, Assembler::pn, Recursive) ;
|
|
2921 delayed() ->
|
|
2922 st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
|
|
2923 if (counters != NULL) {
|
|
2924 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
|
|
2925 }
|
|
2926 br (Assembler::always, false, Assembler::pt, done);
|
|
2927 delayed() ->
|
|
2928 st_ptr (Rbox, mark_addr) ;
|
|
2929
|
|
2930 bind (Recursive) ;
|
|
2931 // Stack-lock attempt failed - check for recursive stack-lock.
|
|
2932 // Tests show that we can remove the recursive case with no impact
|
|
2933 // on refworkload 0.83. If we need to reduce the size of the code
|
|
2934 // emitted by compiler_lock_object() the recursive case is perfect
|
|
2935 // candidate.
|
|
2936 //
|
|
2937 // A more extreme idea is to always inflate on stack-lock recursion.
|
|
2938 // This lets us eliminate the recursive checks in compiler_lock_object
|
|
2939 // and compiler_unlock_object and the (box->dhw == 0) encoding.
|
|
2940 // A brief experiment - requiring changes to synchronizer.cpp, interpreter,
|
|
2941 // and showed a performance *increase*. In the same experiment I eliminated
|
|
2942 // the fast-path stack-lock code from the interpreter and always passed
|
|
2943 // control to the "slow" operators in synchronizer.cpp.
|
|
2944
|
|
2945 // RScratch contains the fetched obj->mark value from the failed CASN.
|
|
2946 #ifdef _LP64
|
|
2947 sub (Rscratch, STACK_BIAS, Rscratch);
|
|
2948 #endif
|
|
2949 sub(Rscratch, SP, Rscratch);
|
|
2950 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
|
|
2951 andcc (Rscratch, 0xfffff003, Rscratch);
|
|
2952 if (counters != NULL) {
|
|
2953 // Accounting needs the Rscratch register
|
|
2954 st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
|
|
2955 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
|
|
2956 br (Assembler::always, false, Assembler::pt, done) ;
|
|
2957 delayed()->nop() ;
|
|
2958 } else {
|
|
2959 br (Assembler::always, false, Assembler::pt, done) ;
|
|
2960 delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
|
|
2961 }
|
|
2962
|
|
2963 bind (IsInflated) ;
|
|
2964 if (EmitSync & 64) {
|
|
2965 // If m->owner != null goto IsLocked
|
|
2966 // Test-and-CAS vs CAS
|
|
2967 // Pessimistic form avoids futile (doomed) CAS attempts
|
|
2968 // The optimistic form avoids RTS->RTO cache line upgrades.
|
|
2969 ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ;
|
|
2970 andcc (Rscratch, Rscratch, G0) ;
|
|
2971 brx (Assembler::notZero, false, Assembler::pn, done) ;
|
|
2972 delayed()->nop() ;
|
|
2973 // m->owner == null : it's unlocked.
|
|
2974 }
|
|
2975
|
|
2976 // Try to CAS m->owner from null to Self
|
|
2977 // Invariant: if we acquire the lock then _recursions should be 0.
|
|
2978 add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
|
|
2979 mov (G2_thread, Rscratch) ;
|
|
2980 casn (Rmark, G0, Rscratch) ;
|
|
2981 cmp (Rscratch, G0) ;
|
|
2982 // ST box->displaced_header = NonZero.
|
|
2983 // Any non-zero value suffices:
|
|
2984 // unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
|
|
2985 st_ptr (Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
|
|
2986 // Intentional fall-through into done
|
|
2987 }
|
|
2988
|
|
2989 bind (done) ;
|
|
2990 }
|
|
2991
|
|
2992 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch) {
|
|
2993 Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes());
|
|
2994
|
|
2995 Label done ;
|
|
2996
|
|
2997 if (EmitSync & 4) {
|
|
2998 cmp (SP, G0) ;
|
|
2999 return ;
|
|
3000 }
|
|
3001
|
|
3002 if (EmitSync & 8) {
|
|
3003 if (UseBiasedLocking) {
|
|
3004 biased_locking_exit(mark_addr, Rscratch, done);
|
|
3005 }
|
|
3006
|
|
3007 // Test first if it is a fast recursive unlock
|
|
3008 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark);
|
|
3009 cmp(Rmark, G0);
|
|
3010 brx(Assembler::equal, false, Assembler::pt, done);
|
|
3011 delayed()->nop();
|
|
3012
|
|
3013 // Check if it is still a light weight lock, this is is true if we see
|
|
3014 // the stack address of the basicLock in the markOop of the object
|
|
3015 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
|
3016 casx_under_lock(mark_addr.base(), Rbox, Rmark,
|
|
3017 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
|
3018 br (Assembler::always, false, Assembler::pt, done);
|
|
3019 delayed()->cmp(Rbox, Rmark);
|
|
3020 bind (done) ;
|
|
3021 return ;
|
|
3022 }
|
|
3023
|
|
3024 // Beware ... If the aggregate size of the code emitted by CLO and CUO is
|
|
3025 // is too large performance rolls abruptly off a cliff.
|
|
3026 // This could be related to inlining policies, code cache management, or
|
|
3027 // I$ effects.
|
|
3028 Label LStacked ;
|
|
3029
|
|
3030 if (UseBiasedLocking) {
|
|
3031 // TODO: eliminate redundant LDs of obj->mark
|
|
3032 biased_locking_exit(mark_addr, Rscratch, done);
|
|
3033 }
|
|
3034
|
|
3035 ld_ptr (Roop, oopDesc::mark_offset_in_bytes(), Rmark) ;
|
|
3036 ld_ptr (Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch);
|
|
3037 andcc (Rscratch, Rscratch, G0);
|
|
3038 brx (Assembler::zero, false, Assembler::pn, done);
|
|
3039 delayed()-> nop() ; // consider: relocate fetch of mark, above, into this DS
|
|
3040 andcc (Rmark, 2, G0) ;
|
|
3041 brx (Assembler::zero, false, Assembler::pt, LStacked) ;
|
|
3042 delayed()-> nop() ;
|
|
3043
|
|
3044 // It's inflated
|
|
3045 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
|
|
3046 // the ST of 0 into _owner which releases the lock. This prevents loads
|
|
3047 // and stores within the critical section from reordering (floating)
|
|
3048 // past the store that releases the lock. But TSO is a strong memory model
|
|
3049 // and that particular flavor of barrier is a noop, so we can safely elide it.
|
|
3050 // Note that we use 1-0 locking by default for the inflated case. We
|
|
3051 // close the resultant (and rare) race by having contented threads in
|
|
3052 // monitorenter periodically poll _owner.
|
|
3053 ld_ptr (Address(Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ;
|
|
3054 ld_ptr (Address(Rmark, 0, ObjectMonitor::recursions_offset_in_bytes()-2), Rbox) ;
|
|
3055 xor3 (Rscratch, G2_thread, Rscratch) ;
|
|
3056 orcc (Rbox, Rscratch, Rbox) ;
|
|
3057 brx (Assembler::notZero, false, Assembler::pn, done) ;
|
|
3058 delayed()->
|
|
3059 ld_ptr (Address (Rmark, 0, ObjectMonitor::EntryList_offset_in_bytes()-2), Rscratch) ;
|
|
3060 ld_ptr (Address (Rmark, 0, ObjectMonitor::cxq_offset_in_bytes()-2), Rbox) ;
|
|
3061 orcc (Rbox, Rscratch, G0) ;
|
|
3062 if (EmitSync & 65536) {
|
|
3063 Label LSucc ;
|
|
3064 brx (Assembler::notZero, false, Assembler::pn, LSucc) ;
|
|
3065 delayed()->nop() ;
|
|
3066 br (Assembler::always, false, Assembler::pt, done) ;
|
|
3067 delayed()->
|
|
3068 st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ;
|
|
3069
|
|
3070 bind (LSucc) ;
|
|
3071 st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ;
|
|
3072 if (os::is_MP()) { membar (StoreLoad) ; }
|
|
3073 ld_ptr (Address (Rmark, 0, ObjectMonitor::succ_offset_in_bytes()-2), Rscratch) ;
|
|
3074 andcc (Rscratch, Rscratch, G0) ;
|
|
3075 brx (Assembler::notZero, false, Assembler::pt, done) ;
|
|
3076 delayed()-> andcc (G0, G0, G0) ;
|
|
3077 add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
|
|
3078 mov (G2_thread, Rscratch) ;
|
|
3079 casn (Rmark, G0, Rscratch) ;
|
|
3080 cmp (Rscratch, G0) ;
|
|
3081 // invert icc.zf and goto done
|
|
3082 brx (Assembler::notZero, false, Assembler::pt, done) ;
|
|
3083 delayed() -> cmp (G0, G0) ;
|
|
3084 br (Assembler::always, false, Assembler::pt, done);
|
|
3085 delayed() -> cmp (G0, 1) ;
|
|
3086 } else {
|
|
3087 brx (Assembler::notZero, false, Assembler::pn, done) ;
|
|
3088 delayed()->nop() ;
|
|
3089 br (Assembler::always, false, Assembler::pt, done) ;
|
|
3090 delayed()->
|
|
3091 st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ;
|
|
3092 }
|
|
3093
|
|
3094 bind (LStacked) ;
|
|
3095 // Consider: we could replace the expensive CAS in the exit
|
|
3096 // path with a simple ST of the displaced mark value fetched from
|
|
3097 // the on-stack basiclock box. That admits a race where a thread T2
|
|
3098 // in the slow lock path -- inflating with monitor M -- could race a
|
|
3099 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2.
|
|
3100 // More precisely T1 in the stack-lock unlock path could "stomp" the
|
|
3101 // inflated mark value M installed by T2, resulting in an orphan
|
|
3102 // object monitor M and T2 becoming stranded. We can remedy that situation
|
|
3103 // by having T2 periodically poll the object's mark word using timed wait
|
|
3104 // operations. If T2 discovers that a stomp has occurred it vacates
|
|
3105 // the monitor M and wakes any other threads stranded on the now-orphan M.
|
|
3106 // In addition the monitor scavenger, which performs deflation,
|
|
3107 // would also need to check for orpan monitors and stranded threads.
|
|
3108 //
|
|
3109 // Finally, inflation is also used when T2 needs to assign a hashCode
|
|
3110 // to O and O is stack-locked by T1. The "stomp" race could cause
|
|
3111 // an assigned hashCode value to be lost. We can avoid that condition
|
|
3112 // and provide the necessary hashCode stability invariants by ensuring
|
|
3113 // that hashCode generation is idempotent between copying GCs.
|
|
3114 // For example we could compute the hashCode of an object O as
|
|
3115 // O's heap address XOR some high quality RNG value that is refreshed
|
|
3116 // at GC-time. The monitor scavenger would install the hashCode
|
|
3117 // found in any orphan monitors. Again, the mechanism admits a
|
|
3118 // lost-update "stomp" WAW race but detects and recovers as needed.
|
|
3119 //
|
|
3120 // A prototype implementation showed excellent results, although
|
|
3121 // the scavenger and timeout code was rather involved.
|
|
3122
|
|
3123 casn (mark_addr.base(), Rbox, Rscratch) ;
|
|
3124 cmp (Rbox, Rscratch);
|
|
3125 // Intentional fall through into done ...
|
|
3126
|
|
3127 bind (done) ;
|
|
3128 }
|
|
3129
|
|
3130
|
|
3131
|
|
3132 void MacroAssembler::print_CPU_state() {
|
|
3133 // %%%%% need to implement this
|
|
3134 }
|
|
3135
|
|
3136 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
|
|
3137 // %%%%% need to implement this
|
|
3138 }
|
|
3139
|
|
3140 void MacroAssembler::push_IU_state() {
|
|
3141 // %%%%% need to implement this
|
|
3142 }
|
|
3143
|
|
3144
|
|
3145 void MacroAssembler::pop_IU_state() {
|
|
3146 // %%%%% need to implement this
|
|
3147 }
|
|
3148
|
|
3149
|
|
3150 void MacroAssembler::push_FPU_state() {
|
|
3151 // %%%%% need to implement this
|
|
3152 }
|
|
3153
|
|
3154
|
|
3155 void MacroAssembler::pop_FPU_state() {
|
|
3156 // %%%%% need to implement this
|
|
3157 }
|
|
3158
|
|
3159
|
|
3160 void MacroAssembler::push_CPU_state() {
|
|
3161 // %%%%% need to implement this
|
|
3162 }
|
|
3163
|
|
3164
|
|
3165 void MacroAssembler::pop_CPU_state() {
|
|
3166 // %%%%% need to implement this
|
|
3167 }
|
|
3168
|
|
3169
|
|
3170
|
|
3171 void MacroAssembler::verify_tlab() {
|
|
3172 #ifdef ASSERT
|
|
3173 if (UseTLAB && VerifyOops) {
|
|
3174 Label next, next2, ok;
|
|
3175 Register t1 = L0;
|
|
3176 Register t2 = L1;
|
|
3177 Register t3 = L2;
|
|
3178
|
|
3179 save_frame(0);
|
|
3180 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
|
|
3181 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2);
|
|
3182 or3(t1, t2, t3);
|
|
3183 cmp(t1, t2);
|
|
3184 br(Assembler::greaterEqual, false, Assembler::pn, next);
|
|
3185 delayed()->nop();
|
|
3186 stop("assert(top >= start)");
|
|
3187 should_not_reach_here();
|
|
3188
|
|
3189 bind(next);
|
|
3190 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
|
|
3191 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2);
|
|
3192 or3(t3, t2, t3);
|
|
3193 cmp(t1, t2);
|
|
3194 br(Assembler::lessEqual, false, Assembler::pn, next2);
|
|
3195 delayed()->nop();
|
|
3196 stop("assert(top <= end)");
|
|
3197 should_not_reach_here();
|
|
3198
|
|
3199 bind(next2);
|
|
3200 and3(t3, MinObjAlignmentInBytesMask, t3);
|
|
3201 cmp(t3, 0);
|
|
3202 br(Assembler::lessEqual, false, Assembler::pn, ok);
|
|
3203 delayed()->nop();
|
|
3204 stop("assert(aligned)");
|
|
3205 should_not_reach_here();
|
|
3206
|
|
3207 bind(ok);
|
|
3208 restore();
|
|
3209 }
|
|
3210 #endif
|
|
3211 }
|
|
3212
|
|
3213
|
|
3214 void MacroAssembler::eden_allocate(
|
|
3215 Register obj, // result: pointer to object after successful allocation
|
|
3216 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
|
|
3217 int con_size_in_bytes, // object size in bytes if known at compile time
|
|
3218 Register t1, // temp register
|
|
3219 Register t2, // temp register
|
|
3220 Label& slow_case // continuation point if fast allocation fails
|
|
3221 ){
|
|
3222 // make sure arguments make sense
|
|
3223 assert_different_registers(obj, var_size_in_bytes, t1, t2);
|
|
3224 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
|
|
3225 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
|
|
3226
|
|
3227 // get eden boundaries
|
|
3228 // note: we need both top & top_addr!
|
|
3229 const Register top_addr = t1;
|
|
3230 const Register end = t2;
|
|
3231
|
|
3232 CollectedHeap* ch = Universe::heap();
|
|
3233 set((intx)ch->top_addr(), top_addr);
|
|
3234 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr();
|
|
3235 ld_ptr(top_addr, delta, end);
|
|
3236 ld_ptr(top_addr, 0, obj);
|
|
3237
|
|
3238 // try to allocate
|
|
3239 Label retry;
|
|
3240 bind(retry);
|
|
3241 #ifdef ASSERT
|
|
3242 // make sure eden top is properly aligned
|
|
3243 {
|
|
3244 Label L;
|
|
3245 btst(MinObjAlignmentInBytesMask, obj);
|
|
3246 br(Assembler::zero, false, Assembler::pt, L);
|
|
3247 delayed()->nop();
|
|
3248 stop("eden top is not properly aligned");
|
|
3249 bind(L);
|
|
3250 }
|
|
3251 #endif // ASSERT
|
|
3252 const Register free = end;
|
|
3253 sub(end, obj, free); // compute amount of free space
|
|
3254 if (var_size_in_bytes->is_valid()) {
|
|
3255 // size is unknown at compile time
|
|
3256 cmp(free, var_size_in_bytes);
|
|
3257 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
|
|
3258 delayed()->add(obj, var_size_in_bytes, end);
|
|
3259 } else {
|
|
3260 // size is known at compile time
|
|
3261 cmp(free, con_size_in_bytes);
|
|
3262 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
|
|
3263 delayed()->add(obj, con_size_in_bytes, end);
|
|
3264 }
|
|
3265 // Compare obj with the value at top_addr; if still equal, swap the value of
|
|
3266 // end with the value at top_addr. If not equal, read the value at top_addr
|
|
3267 // into end.
|
|
3268 casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
|
3269 // if someone beat us on the allocation, try again, otherwise continue
|
|
3270 cmp(obj, end);
|
|
3271 brx(Assembler::notEqual, false, Assembler::pn, retry);
|
|
3272 delayed()->mov(end, obj); // nop if successfull since obj == end
|
|
3273
|
|
3274 #ifdef ASSERT
|
|
3275 // make sure eden top is properly aligned
|
|
3276 {
|
|
3277 Label L;
|
|
3278 const Register top_addr = t1;
|
|
3279
|
|
3280 set((intx)ch->top_addr(), top_addr);
|
|
3281 ld_ptr(top_addr, 0, top_addr);
|
|
3282 btst(MinObjAlignmentInBytesMask, top_addr);
|
|
3283 br(Assembler::zero, false, Assembler::pt, L);
|
|
3284 delayed()->nop();
|
|
3285 stop("eden top is not properly aligned");
|
|
3286 bind(L);
|
|
3287 }
|
|
3288 #endif // ASSERT
|
|
3289 }
|
|
3290
|
|
3291
|
|
3292 void MacroAssembler::tlab_allocate(
|
|
3293 Register obj, // result: pointer to object after successful allocation
|
|
3294 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
|
|
3295 int con_size_in_bytes, // object size in bytes if known at compile time
|
|
3296 Register t1, // temp register
|
|
3297 Label& slow_case // continuation point if fast allocation fails
|
|
3298 ){
|
|
3299 // make sure arguments make sense
|
|
3300 assert_different_registers(obj, var_size_in_bytes, t1);
|
|
3301 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size");
|
|
3302 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
|
|
3303
|
|
3304 const Register free = t1;
|
|
3305
|
|
3306 verify_tlab();
|
|
3307
|
|
3308 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj);
|
|
3309
|
|
3310 // calculate amount of free space
|
|
3311 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free);
|
|
3312 sub(free, obj, free);
|
|
3313
|
|
3314 Label done;
|
|
3315 if (var_size_in_bytes == noreg) {
|
|
3316 cmp(free, con_size_in_bytes);
|
|
3317 } else {
|
|
3318 cmp(free, var_size_in_bytes);
|
|
3319 }
|
|
3320 br(Assembler::less, false, Assembler::pn, slow_case);
|
|
3321 // calculate the new top pointer
|
|
3322 if (var_size_in_bytes == noreg) {
|
|
3323 delayed()->add(obj, con_size_in_bytes, free);
|
|
3324 } else {
|
|
3325 delayed()->add(obj, var_size_in_bytes, free);
|
|
3326 }
|
|
3327
|
|
3328 bind(done);
|
|
3329
|
|
3330 #ifdef ASSERT
|
|
3331 // make sure new free pointer is properly aligned
|
|
3332 {
|
|
3333 Label L;
|
|
3334 btst(MinObjAlignmentInBytesMask, free);
|
|
3335 br(Assembler::zero, false, Assembler::pt, L);
|
|
3336 delayed()->nop();
|
|
3337 stop("updated TLAB free is not properly aligned");
|
|
3338 bind(L);
|
|
3339 }
|
|
3340 #endif // ASSERT
|
|
3341
|
|
3342 // update the tlab top pointer
|
|
3343 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
|
|
3344 verify_tlab();
|
|
3345 }
|
|
3346
|
|
3347
|
|
3348 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) {
|
|
3349 Register top = O0;
|
|
3350 Register t1 = G1;
|
|
3351 Register t2 = G3;
|
|
3352 Register t3 = O1;
|
|
3353 assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
|
|
3354 Label do_refill, discard_tlab;
|
|
3355
|
|
3356 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
|
|
3357 // No allocation in the shared eden.
|
|
3358 br(Assembler::always, false, Assembler::pt, slow_case);
|
|
3359 delayed()->nop();
|
|
3360 }
|
|
3361
|
|
3362 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top);
|
|
3363 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1);
|
|
3364 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2);
|
|
3365
|
|
3366 // calculate amount of free space
|
|
3367 sub(t1, top, t1);
|
|
3368 srl_ptr(t1, LogHeapWordSize, t1);
|
|
3369
|
|
3370 // Retain tlab and allocate object in shared space if
|
|
3371 // the amount free in the tlab is too large to discard.
|
|
3372 cmp(t1, t2);
|
|
3373 brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab);
|
|
3374
|
|
3375 // increment waste limit to prevent getting stuck on this slow path
|
|
3376 delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2);
|
|
3377 st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
|
|
3378 if (TLABStats) {
|
|
3379 // increment number of slow_allocations
|
|
3380 ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2);
|
|
3381 add(t2, 1, t2);
|
|
3382 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()));
|
|
3383 }
|
|
3384 br(Assembler::always, false, Assembler::pt, try_eden);
|
|
3385 delayed()->nop();
|
|
3386
|
|
3387 bind(discard_tlab);
|
|
3388 if (TLABStats) {
|
|
3389 // increment number of refills
|
|
3390 ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2);
|
|
3391 add(t2, 1, t2);
|
|
3392 stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()));
|
|
3393 // accumulate wastage
|
|
3394 ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2);
|
|
3395 add(t2, t1, t2);
|
|
3396 stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()));
|
|
3397 }
|
|
3398
|
|
3399 // if tlab is currently allocated (top or end != null) then
|
|
3400 // fill [top, end + alignment_reserve) with array object
|
|
3401 br_null(top, false, Assembler::pn, do_refill);
|
|
3402 delayed()->nop();
|
|
3403
|
|
3404 set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2);
|
|
3405 st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word
|
|
3406 // set klass to intArrayKlass
|
|
3407 set((intptr_t)Universe::intArrayKlassObj_addr(), t2);
|
|
3408 ld_ptr(t2, 0, t2);
|
|
3409 st_ptr(t2, top, oopDesc::klass_offset_in_bytes());
|
|
3410 sub(t1, typeArrayOopDesc::header_size(T_INT), t1);
|
|
3411 add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1);
|
|
3412 sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1);
|
|
3413 st(t1, top, arrayOopDesc::length_offset_in_bytes());
|
|
3414 verify_oop(top);
|
|
3415
|
|
3416 // refill the tlab with an eden allocation
|
|
3417 bind(do_refill);
|
|
3418 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1);
|
|
3419 sll_ptr(t1, LogHeapWordSize, t1);
|
|
3420 // add object_size ??
|
|
3421 eden_allocate(top, t1, 0, t2, t3, slow_case);
|
|
3422
|
|
3423 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset()));
|
|
3424 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
|
|
3425 #ifdef ASSERT
|
|
3426 // check that tlab_size (t1) is still valid
|
|
3427 {
|
|
3428 Label ok;
|
|
3429 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2);
|
|
3430 sll_ptr(t2, LogHeapWordSize, t2);
|
|
3431 cmp(t1, t2);
|
|
3432 br(Assembler::equal, false, Assembler::pt, ok);
|
|
3433 delayed()->nop();
|
|
3434 stop("assert(t1 == tlab_size)");
|
|
3435 should_not_reach_here();
|
|
3436
|
|
3437 bind(ok);
|
|
3438 }
|
|
3439 #endif // ASSERT
|
|
3440 add(top, t1, top); // t1 is tlab_size
|
|
3441 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top);
|
|
3442 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset()));
|
|
3443 verify_tlab();
|
|
3444 br(Assembler::always, false, Assembler::pt, retry);
|
|
3445 delayed()->nop();
|
|
3446 }
|
|
3447
|
|
3448 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
|
|
3449 switch (cond) {
|
|
3450 // Note some conditions are synonyms for others
|
|
3451 case Assembler::never: return Assembler::always;
|
|
3452 case Assembler::zero: return Assembler::notZero;
|
|
3453 case Assembler::lessEqual: return Assembler::greater;
|
|
3454 case Assembler::less: return Assembler::greaterEqual;
|
|
3455 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned;
|
|
3456 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned;
|
|
3457 case Assembler::negative: return Assembler::positive;
|
|
3458 case Assembler::overflowSet: return Assembler::overflowClear;
|
|
3459 case Assembler::always: return Assembler::never;
|
|
3460 case Assembler::notZero: return Assembler::zero;
|
|
3461 case Assembler::greater: return Assembler::lessEqual;
|
|
3462 case Assembler::greaterEqual: return Assembler::less;
|
|
3463 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned;
|
|
3464 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned;
|
|
3465 case Assembler::positive: return Assembler::negative;
|
|
3466 case Assembler::overflowClear: return Assembler::overflowSet;
|
|
3467 }
|
|
3468
|
|
3469 ShouldNotReachHere(); return Assembler::overflowClear;
|
|
3470 }
|
|
3471
|
|
3472 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr,
|
|
3473 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) {
|
|
3474 Condition negated_cond = negate_condition(cond);
|
|
3475 Label L;
|
|
3476 brx(negated_cond, false, Assembler::pt, L);
|
|
3477 delayed()->nop();
|
|
3478 inc_counter(counter_ptr, Rtmp1, Rtmp2);
|
|
3479 bind(L);
|
|
3480 }
|
|
3481
|
|
3482 void MacroAssembler::inc_counter(address counter_ptr, Register Rtmp1, Register Rtmp2) {
|
|
3483 Address counter_addr(Rtmp1, counter_ptr);
|
|
3484 load_contents(counter_addr, Rtmp2);
|
|
3485 inc(Rtmp2);
|
|
3486 store_contents(Rtmp2, counter_addr);
|
|
3487 }
|
|
3488
|
|
3489 SkipIfEqual::SkipIfEqual(
|
|
3490 MacroAssembler* masm, Register temp, const bool* flag_addr,
|
|
3491 Assembler::Condition condition) {
|
|
3492 _masm = masm;
|
|
3493 Address flag(temp, (address)flag_addr, relocInfo::none);
|
|
3494 _masm->sethi(flag);
|
|
3495 _masm->ldub(flag, temp);
|
|
3496 _masm->tst(temp);
|
|
3497 _masm->br(condition, false, Assembler::pt, _label);
|
|
3498 _masm->delayed()->nop();
|
|
3499 }
|
|
3500
|
|
3501 SkipIfEqual::~SkipIfEqual() {
|
|
3502 _masm->bind(_label);
|
|
3503 }
|
|
3504
|
|
3505
|
|
3506 // Writes to stack successive pages until offset reached to check for
|
|
3507 // stack overflow + shadow pages. This clobbers tsp and scratch.
|
|
3508 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
|
|
3509 Register Rscratch) {
|
|
3510 // Use stack pointer in temp stack pointer
|
|
3511 mov(SP, Rtsp);
|
|
3512
|
|
3513 // Bang stack for total size given plus stack shadow page size.
|
|
3514 // Bang one page at a time because a large size can overflow yellow and
|
|
3515 // red zones (the bang will fail but stack overflow handling can't tell that
|
|
3516 // it was a stack overflow bang vs a regular segv).
|
|
3517 int offset = os::vm_page_size();
|
|
3518 Register Roffset = Rscratch;
|
|
3519
|
|
3520 Label loop;
|
|
3521 bind(loop);
|
|
3522 set((-offset)+STACK_BIAS, Rscratch);
|
|
3523 st(G0, Rtsp, Rscratch);
|
|
3524 set(offset, Roffset);
|
|
3525 sub(Rsize, Roffset, Rsize);
|
|
3526 cmp(Rsize, G0);
|
|
3527 br(Assembler::greater, false, Assembler::pn, loop);
|
|
3528 delayed()->sub(Rtsp, Roffset, Rtsp);
|
|
3529
|
|
3530 // Bang down shadow pages too.
|
|
3531 // The -1 because we already subtracted 1 page.
|
|
3532 for (int i = 0; i< StackShadowPages-1; i++) {
|
|
3533 set((-i*offset)+STACK_BIAS, Rscratch);
|
|
3534 st(G0, Rtsp, Rscratch);
|
|
3535 }
|
|
3536 }
|