Mercurial > hg > truffle
annotate src/cpu/sparc/vm/interp_masm_sparc.cpp @ 1367:9e321dcfa5b7
6940726: Use BIS instruction for allocation prefetch on Sparc
Summary: Use BIS instruction for allocation prefetch on Sparc
Reviewed-by: twisti
author | kvn |
---|---|
date | Wed, 07 Apr 2010 12:39:27 -0700 |
parents | 3cf667df43ef |
children | c640000b7cc1 |
rev | line source |
---|---|
0 | 1 /* |
1251
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1206
diff
changeset
|
2 * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_interp_masm_sparc.cpp.incl" | |
27 | |
28 #ifndef CC_INTERP | |
29 #ifndef FAST_DISPATCH | |
30 #define FAST_DISPATCH 1 | |
31 #endif | |
32 #undef FAST_DISPATCH | |
33 | |
34 // Implementation of InterpreterMacroAssembler | |
35 | |
36 // This file specializes the assember with interpreter-specific macros | |
37 | |
727 | 38 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS); |
39 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS); | |
0 | 40 |
41 #else // CC_INTERP | |
42 #ifndef STATE | |
43 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name)) | |
44 #endif // STATE | |
45 | |
46 #endif // CC_INTERP | |
47 | |
48 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) { | |
49 // Note: this algorithm is also used by C1's OSR entry sequence. | |
50 // Any changes should also be applied to CodeEmitter::emit_osr_entry(). | |
51 assert_different_registers(args_size, locals_size); | |
52 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted. | |
53 if (TaggedStackInterpreter) sll(locals_size, 1, locals_size); | |
54 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words | |
55 // Use br/mov combination because it works on both V8 and V9 and is | |
56 // faster. | |
57 Label skip_move; | |
58 br(Assembler::negative, true, Assembler::pt, skip_move); | |
59 delayed()->mov(G0, delta); | |
60 bind(skip_move); | |
61 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned) | |
62 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes | |
63 } | |
64 | |
65 #ifndef CC_INTERP | |
66 | |
67 // Dispatch code executed in the prolog of a bytecode which does not do it's | |
68 // own dispatch. The dispatch address is computed and placed in IdispatchAddress | |
69 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) { | |
70 assert_not_delayed(); | |
71 #ifdef FAST_DISPATCH | |
72 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since | |
73 // they both use I2. | |
74 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); | |
75 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode | |
76 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); | |
77 // add offset to correct dispatch table | |
78 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize | |
79 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr | |
80 #else | |
727 | 81 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode |
0 | 82 // dispatch table to use |
727 | 83 AddressLiteral tbl(Interpreter::dispatch_table(state)); |
84 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize | |
85 set(tbl, G3_scratch); // compute addr of table | |
86 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr | |
0 | 87 #endif |
88 } | |
89 | |
90 | |
91 // Dispatch code executed in the epilog of a bytecode which does not do it's | |
92 // own dispatch. The dispatch address in IdispatchAddress is used for the | |
93 // dispatch. | |
94 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) { | |
95 assert_not_delayed(); | |
96 verify_FPU(1, state); | |
97 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
98 jmp( IdispatchAddress, 0 ); | |
99 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); | |
100 else delayed()->nop(); | |
101 } | |
102 | |
103 | |
104 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) { | |
105 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) | |
106 assert_not_delayed(); | |
107 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode | |
108 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr); | |
109 } | |
110 | |
111 | |
112 void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) { | |
113 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) | |
114 assert_not_delayed(); | |
115 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode | |
116 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false); | |
117 } | |
118 | |
119 | |
120 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { | |
121 // load current bytecode | |
122 assert_not_delayed(); | |
123 ldub( Lbcp, 0, Lbyte_code); // load next bytecode | |
124 dispatch_base(state, table); | |
125 } | |
126 | |
127 | |
128 void InterpreterMacroAssembler::call_VM_leaf_base( | |
129 Register java_thread, | |
130 address entry_point, | |
131 int number_of_arguments | |
132 ) { | |
133 if (!java_thread->is_valid()) | |
134 java_thread = L7_thread_cache; | |
135 // super call | |
136 MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments); | |
137 } | |
138 | |
139 | |
140 void InterpreterMacroAssembler::call_VM_base( | |
141 Register oop_result, | |
142 Register java_thread, | |
143 Register last_java_sp, | |
144 address entry_point, | |
145 int number_of_arguments, | |
146 bool check_exception | |
147 ) { | |
148 if (!java_thread->is_valid()) | |
149 java_thread = L7_thread_cache; | |
150 // See class ThreadInVMfromInterpreter, which assumes that the interpreter | |
151 // takes responsibility for setting its own thread-state on call-out. | |
152 // However, ThreadInVMfromInterpreter resets the state to "in_Java". | |
153 | |
154 //save_bcp(); // save bcp | |
155 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception); | |
156 //restore_bcp(); // restore bcp | |
157 //restore_locals(); // restore locals pointer | |
158 } | |
159 | |
160 | |
161 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) { | |
162 if (JvmtiExport::can_pop_frame()) { | |
163 Label L; | |
164 | |
165 // Check the "pending popframe condition" flag in the current thread | |
727 | 166 ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg); |
0 | 167 |
168 // Initiate popframe handling only if it is not already being processed. If the flag | |
169 // has the popframe_processing bit set, it means that this code is called *during* popframe | |
170 // handling - we don't want to reenter. | |
171 btst(JavaThread::popframe_pending_bit, scratch_reg); | |
172 br(zero, false, pt, L); | |
173 delayed()->nop(); | |
174 btst(JavaThread::popframe_processing_bit, scratch_reg); | |
175 br(notZero, false, pt, L); | |
176 delayed()->nop(); | |
177 | |
178 // Call Interpreter::remove_activation_preserving_args_entry() to get the | |
179 // address of the same-named entrypoint in the generated interpreter code. | |
180 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); | |
181 | |
182 // Jump to Interpreter::_remove_activation_preserving_args_entry | |
183 jmpl(O0, G0, G0); | |
184 delayed()->nop(); | |
185 bind(L); | |
186 } | |
187 } | |
188 | |
189 | |
190 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { | |
191 Register thr_state = G4_scratch; | |
727 | 192 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); |
193 const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset()); | |
194 const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset()); | |
195 const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset()); | |
0 | 196 switch (state) { |
197 case ltos: ld_long(val_addr, Otos_l); break; | |
198 case atos: ld_ptr(oop_addr, Otos_l); | |
199 st_ptr(G0, oop_addr); break; | |
200 case btos: // fall through | |
201 case ctos: // fall through | |
202 case stos: // fall through | |
203 case itos: ld(val_addr, Otos_l1); break; | |
204 case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break; | |
205 case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break; | |
206 case vtos: /* nothing to do */ break; | |
207 default : ShouldNotReachHere(); | |
208 } | |
209 // Clean up tos value in the jvmti thread state | |
210 or3(G0, ilgl, G3_scratch); | |
211 stw(G3_scratch, tos_addr); | |
212 st_long(G0, val_addr); | |
213 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
214 } | |
215 | |
216 | |
217 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) { | |
218 if (JvmtiExport::can_force_early_return()) { | |
219 Label L; | |
220 Register thr_state = G3_scratch; | |
727 | 221 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); |
0 | 222 tst(thr_state); |
223 br(zero, false, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; | |
224 delayed()->nop(); | |
225 | |
226 // Initiate earlyret handling only if it is not already being processed. | |
227 // If the flag has the earlyret_processing bit set, it means that this code | |
228 // is called *during* earlyret handling - we don't want to reenter. | |
727 | 229 ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch); |
0 | 230 cmp(G4_scratch, JvmtiThreadState::earlyret_pending); |
231 br(Assembler::notEqual, false, pt, L); | |
232 delayed()->nop(); | |
233 | |
234 // Call Interpreter::remove_activation_early_entry() to get the address of the | |
235 // same-named entrypoint in the generated interpreter code | |
727 | 236 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1); |
0 | 237 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1); |
238 | |
239 // Jump to Interpreter::_remove_activation_early_entry | |
240 jmpl(O0, G0, G0); | |
241 delayed()->nop(); | |
242 bind(L); | |
243 } | |
244 } | |
245 | |
246 | |
1295 | 247 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { |
0 | 248 mov(arg_1, O0); |
1295 | 249 mov(arg_2, O1); |
250 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2); | |
0 | 251 } |
252 #endif /* CC_INTERP */ | |
253 | |
254 | |
255 #ifndef CC_INTERP | |
256 | |
257 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) { | |
258 assert_not_delayed(); | |
259 dispatch_Lbyte_code(state, table); | |
260 } | |
261 | |
262 | |
263 void InterpreterMacroAssembler::dispatch_normal(TosState state) { | |
264 dispatch_base(state, Interpreter::normal_table(state)); | |
265 } | |
266 | |
267 | |
268 void InterpreterMacroAssembler::dispatch_only(TosState state) { | |
269 dispatch_base(state, Interpreter::dispatch_table(state)); | |
270 } | |
271 | |
272 | |
273 // common code to dispatch and dispatch_only | |
274 // dispatch value in Lbyte_code and increment Lbcp | |
275 | |
276 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) { | |
277 verify_FPU(1, state); | |
278 // %%%%% maybe implement +VerifyActivationFrameSize here | |
279 //verify_thread(); //too slow; we will just verify on method entry & exit | |
280 if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
281 #ifdef FAST_DISPATCH | |
282 if (table == Interpreter::dispatch_table(state)) { | |
283 // use IdispatchTables | |
284 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); | |
285 // add offset to correct dispatch table | |
286 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize | |
287 ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr | |
288 } else { | |
289 #endif | |
290 // dispatch table to use | |
727 | 291 AddressLiteral tbl(table); |
0 | 292 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize |
727 | 293 set(tbl, G3_scratch); // compute addr of table |
0 | 294 ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr |
295 #ifdef FAST_DISPATCH | |
296 } | |
297 #endif | |
298 jmp( G3_scratch, 0 ); | |
299 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); | |
300 else delayed()->nop(); | |
301 } | |
302 | |
303 | |
304 // Helpers for expression stack | |
305 | |
306 // Longs and doubles are Category 2 computational types in the | |
307 // JVM specification (section 3.11.1) and take 2 expression stack or | |
308 // local slots. | |
309 // Aligning them on 32 bit with tagged stacks is hard because the code generated | |
310 // for the dup* bytecodes depends on what types are already on the stack. | |
311 // If the types are split into the two stack/local slots, that is much easier | |
312 // (and we can use 0 for non-reference tags). | |
313 | |
314 // Known good alignment in _LP64 but unknown otherwise | |
315 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) { | |
316 assert_not_delayed(); | |
317 | |
318 #ifdef _LP64 | |
319 ldf(FloatRegisterImpl::D, r1, offset, d); | |
320 #else | |
321 ldf(FloatRegisterImpl::S, r1, offset, d); | |
322 ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize(), d->successor()); | |
323 #endif | |
324 } | |
325 | |
326 // Known good alignment in _LP64 but unknown otherwise | |
327 void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) { | |
328 assert_not_delayed(); | |
329 | |
330 #ifdef _LP64 | |
331 stf(FloatRegisterImpl::D, d, r1, offset); | |
332 // store something more useful here | |
333 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize());) | |
334 #else | |
335 stf(FloatRegisterImpl::S, d, r1, offset); | |
336 stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize()); | |
337 #endif | |
338 } | |
339 | |
340 | |
341 // Known good alignment in _LP64 but unknown otherwise | |
342 void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) { | |
343 assert_not_delayed(); | |
344 #ifdef _LP64 | |
345 ldx(r1, offset, rd); | |
346 #else | |
347 ld(r1, offset, rd); | |
348 ld(r1, offset + Interpreter::stackElementSize(), rd->successor()); | |
349 #endif | |
350 } | |
351 | |
352 // Known good alignment in _LP64 but unknown otherwise | |
353 void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) { | |
354 assert_not_delayed(); | |
355 | |
356 #ifdef _LP64 | |
357 stx(l, r1, offset); | |
358 // store something more useful here | |
359 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize());) | |
360 #else | |
361 st(l, r1, offset); | |
362 st(l->successor(), r1, offset + Interpreter::stackElementSize()); | |
363 #endif | |
364 } | |
365 | |
366 #ifdef ASSERT | |
367 void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t, | |
368 Register r, | |
369 Register scratch) { | |
370 if (TaggedStackInterpreter) { | |
371 Label ok, long_ok; | |
372 ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(0), r); | |
373 if (t == frame::TagCategory2) { | |
374 cmp(r, G0); | |
375 brx(Assembler::equal, false, Assembler::pt, long_ok); | |
376 delayed()->ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(1), r); | |
377 stop("stack long/double tag value bad"); | |
378 bind(long_ok); | |
379 cmp(r, G0); | |
380 } else if (t == frame::TagValue) { | |
381 cmp(r, G0); | |
382 } else { | |
383 assert_different_registers(r, scratch); | |
384 mov(t, scratch); | |
385 cmp(r, scratch); | |
386 } | |
387 brx(Assembler::equal, false, Assembler::pt, ok); | |
388 delayed()->nop(); | |
389 // Also compare if the stack value is zero, then the tag might | |
390 // not have been set coming from deopt. | |
391 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r); | |
392 cmp(r, G0); | |
393 brx(Assembler::equal, false, Assembler::pt, ok); | |
394 delayed()->nop(); | |
395 stop("Stack tag value is bad"); | |
396 bind(ok); | |
397 } | |
398 } | |
399 #endif // ASSERT | |
400 | |
401 void InterpreterMacroAssembler::pop_i(Register r) { | |
402 assert_not_delayed(); | |
403 // Uses destination register r for scratch | |
404 debug_only(verify_stack_tag(frame::TagValue, r)); | |
405 ld(Lesp, Interpreter::expr_offset_in_bytes(0), r); | |
406 inc(Lesp, Interpreter::stackElementSize()); | |
407 debug_only(verify_esp(Lesp)); | |
408 } | |
409 | |
410 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) { | |
411 assert_not_delayed(); | |
412 // Uses destination register r for scratch | |
413 debug_only(verify_stack_tag(frame::TagReference, r, scratch)); | |
414 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r); | |
415 inc(Lesp, Interpreter::stackElementSize()); | |
416 debug_only(verify_esp(Lesp)); | |
417 } | |
418 | |
419 void InterpreterMacroAssembler::pop_l(Register r) { | |
420 assert_not_delayed(); | |
421 // Uses destination register r for scratch | |
422 debug_only(verify_stack_tag(frame::TagCategory2, r)); | |
423 load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r); | |
424 inc(Lesp, 2*Interpreter::stackElementSize()); | |
425 debug_only(verify_esp(Lesp)); | |
426 } | |
427 | |
428 | |
429 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) { | |
430 assert_not_delayed(); | |
431 debug_only(verify_stack_tag(frame::TagValue, scratch)); | |
432 ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f); | |
433 inc(Lesp, Interpreter::stackElementSize()); | |
434 debug_only(verify_esp(Lesp)); | |
435 } | |
436 | |
437 | |
438 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) { | |
439 assert_not_delayed(); | |
440 debug_only(verify_stack_tag(frame::TagCategory2, scratch)); | |
441 load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f); | |
442 inc(Lesp, 2*Interpreter::stackElementSize()); | |
443 debug_only(verify_esp(Lesp)); | |
444 } | |
445 | |
446 | |
447 // (Note use register first, then decrement so dec can be done during store stall) | |
448 void InterpreterMacroAssembler::tag_stack(Register r) { | |
449 if (TaggedStackInterpreter) { | |
450 st_ptr(r, Lesp, Interpreter::tag_offset_in_bytes()); | |
451 } | |
452 } | |
453 | |
454 void InterpreterMacroAssembler::tag_stack(frame::Tag t, Register r) { | |
455 if (TaggedStackInterpreter) { | |
456 assert (frame::TagValue == 0, "TagValue must be zero"); | |
457 if (t == frame::TagValue) { | |
458 st_ptr(G0, Lesp, Interpreter::tag_offset_in_bytes()); | |
459 } else if (t == frame::TagCategory2) { | |
460 st_ptr(G0, Lesp, Interpreter::tag_offset_in_bytes()); | |
461 // Tag next slot down too | |
462 st_ptr(G0, Lesp, -Interpreter::stackElementSize() + Interpreter::tag_offset_in_bytes()); | |
463 } else { | |
464 assert_different_registers(r, O3); | |
465 mov(t, O3); | |
466 st_ptr(O3, Lesp, Interpreter::tag_offset_in_bytes()); | |
467 } | |
468 } | |
469 } | |
470 | |
471 void InterpreterMacroAssembler::push_i(Register r) { | |
472 assert_not_delayed(); | |
473 debug_only(verify_esp(Lesp)); | |
474 tag_stack(frame::TagValue, r); | |
475 st( r, Lesp, Interpreter::value_offset_in_bytes()); | |
476 dec( Lesp, Interpreter::stackElementSize()); | |
477 } | |
478 | |
479 void InterpreterMacroAssembler::push_ptr(Register r) { | |
480 assert_not_delayed(); | |
481 tag_stack(frame::TagReference, r); | |
482 st_ptr( r, Lesp, Interpreter::value_offset_in_bytes()); | |
483 dec( Lesp, Interpreter::stackElementSize()); | |
484 } | |
485 | |
486 void InterpreterMacroAssembler::push_ptr(Register r, Register tag) { | |
487 assert_not_delayed(); | |
488 tag_stack(tag); | |
489 st_ptr(r, Lesp, Interpreter::value_offset_in_bytes()); | |
490 dec( Lesp, Interpreter::stackElementSize()); | |
491 } | |
492 | |
493 // remember: our convention for longs in SPARC is: | |
494 // O0 (Otos_l1) has high-order part in first word, | |
495 // O1 (Otos_l2) has low-order part in second word | |
496 | |
497 void InterpreterMacroAssembler::push_l(Register r) { | |
498 assert_not_delayed(); | |
499 debug_only(verify_esp(Lesp)); | |
500 tag_stack(frame::TagCategory2, r); | |
501 // Longs are in stored in memory-correct order, even if unaligned. | |
502 // and may be separated by stack tags. | |
503 int offset = -Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes(); | |
504 store_unaligned_long(r, Lesp, offset); | |
505 dec(Lesp, 2 * Interpreter::stackElementSize()); | |
506 } | |
507 | |
508 | |
509 void InterpreterMacroAssembler::push_f(FloatRegister f) { | |
510 assert_not_delayed(); | |
511 debug_only(verify_esp(Lesp)); | |
512 tag_stack(frame::TagValue, Otos_i); | |
513 stf(FloatRegisterImpl::S, f, Lesp, Interpreter::value_offset_in_bytes()); | |
514 dec(Lesp, Interpreter::stackElementSize()); | |
515 } | |
516 | |
517 | |
518 void InterpreterMacroAssembler::push_d(FloatRegister d) { | |
519 assert_not_delayed(); | |
520 debug_only(verify_esp(Lesp)); | |
521 tag_stack(frame::TagCategory2, Otos_i); | |
522 // Longs are in stored in memory-correct order, even if unaligned. | |
523 // and may be separated by stack tags. | |
524 int offset = -Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes(); | |
525 store_unaligned_double(d, Lesp, offset); | |
526 dec(Lesp, 2 * Interpreter::stackElementSize()); | |
527 } | |
528 | |
529 | |
530 void InterpreterMacroAssembler::push(TosState state) { | |
531 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
532 switch (state) { | |
533 case atos: push_ptr(); break; | |
534 case btos: push_i(); break; | |
535 case ctos: | |
536 case stos: push_i(); break; | |
537 case itos: push_i(); break; | |
538 case ltos: push_l(); break; | |
539 case ftos: push_f(); break; | |
540 case dtos: push_d(); break; | |
541 case vtos: /* nothing to do */ break; | |
542 default : ShouldNotReachHere(); | |
543 } | |
544 } | |
545 | |
546 | |
547 void InterpreterMacroAssembler::pop(TosState state) { | |
548 switch (state) { | |
549 case atos: pop_ptr(); break; | |
550 case btos: pop_i(); break; | |
551 case ctos: | |
552 case stos: pop_i(); break; | |
553 case itos: pop_i(); break; | |
554 case ltos: pop_l(); break; | |
555 case ftos: pop_f(); break; | |
556 case dtos: pop_d(); break; | |
557 case vtos: /* nothing to do */ break; | |
558 default : ShouldNotReachHere(); | |
559 } | |
560 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
561 } | |
562 | |
563 | |
564 // Tagged stack helpers for swap and dup | |
565 void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val, | |
566 Register tag) { | |
567 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val); | |
568 if (TaggedStackInterpreter) { | |
569 ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(n), tag); | |
570 } | |
571 } | |
572 void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val, | |
573 Register tag) { | |
574 st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n)); | |
575 if (TaggedStackInterpreter) { | |
576 st_ptr(tag, Lesp, Interpreter::expr_tag_offset_in_bytes(n)); | |
577 } | |
578 } | |
579 | |
580 | |
581 void InterpreterMacroAssembler::load_receiver(Register param_count, | |
582 Register recv) { | |
583 | |
584 sll(param_count, Interpreter::logStackElementSize(), param_count); | |
585 if (TaggedStackInterpreter) { | |
586 add(param_count, Interpreter::value_offset_in_bytes(), param_count); // get obj address | |
587 } | |
588 ld_ptr(Lesp, param_count, recv); // gets receiver Oop | |
589 } | |
590 | |
591 void InterpreterMacroAssembler::empty_expression_stack() { | |
592 // Reset Lesp. | |
593 sub( Lmonitors, wordSize, Lesp ); | |
594 | |
595 // Reset SP by subtracting more space from Lesp. | |
596 Label done; | |
597 verify_oop(Lmethod); | |
727 | 598 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!"); |
0 | 599 |
600 // A native does not need to do this, since its callee does not change SP. | |
727 | 601 ld(Lmethod, methodOopDesc::access_flags_offset(), Gframe_size); // Load access flags. |
0 | 602 btst(JVM_ACC_NATIVE, Gframe_size); |
603 br(Assembler::notZero, false, Assembler::pt, done); | |
604 delayed()->nop(); | |
605 | |
606 // Compute max expression stack+register save area | |
727 | 607 lduh(Lmethod, in_bytes(methodOopDesc::max_stack_offset()), Gframe_size); // Load max stack. |
0 | 608 if (TaggedStackInterpreter) sll ( Gframe_size, 1, Gframe_size); // max_stack * 2 for TAGS |
609 add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size ); | |
610 | |
611 // | |
612 // now set up a stack frame with the size computed above | |
613 // | |
614 //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below | |
615 sll( Gframe_size, LogBytesPerWord, Gframe_size ); | |
616 sub( Lesp, Gframe_size, Gframe_size ); | |
617 and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary | |
618 debug_only(verify_sp(Gframe_size, G4_scratch)); | |
619 #ifdef _LP64 | |
620 sub(Gframe_size, STACK_BIAS, Gframe_size ); | |
621 #endif | |
622 mov(Gframe_size, SP); | |
623 | |
624 bind(done); | |
625 } | |
626 | |
627 | |
628 #ifdef ASSERT | |
629 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) { | |
630 Label Bad, OK; | |
631 | |
632 // Saved SP must be aligned. | |
633 #ifdef _LP64 | |
634 btst(2*BytesPerWord-1, Rsp); | |
635 #else | |
636 btst(LongAlignmentMask, Rsp); | |
637 #endif | |
638 br(Assembler::notZero, false, Assembler::pn, Bad); | |
639 delayed()->nop(); | |
640 | |
641 // Saved SP, plus register window size, must not be above FP. | |
642 add(Rsp, frame::register_save_words * wordSize, Rtemp); | |
643 #ifdef _LP64 | |
644 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP | |
645 #endif | |
646 cmp(Rtemp, FP); | |
647 brx(Assembler::greaterUnsigned, false, Assembler::pn, Bad); | |
648 delayed()->nop(); | |
649 | |
650 // Saved SP must not be ridiculously below current SP. | |
651 size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K); | |
652 set(maxstack, Rtemp); | |
653 sub(SP, Rtemp, Rtemp); | |
654 #ifdef _LP64 | |
655 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp | |
656 #endif | |
657 cmp(Rsp, Rtemp); | |
658 brx(Assembler::lessUnsigned, false, Assembler::pn, Bad); | |
659 delayed()->nop(); | |
660 | |
661 br(Assembler::always, false, Assembler::pn, OK); | |
662 delayed()->nop(); | |
663 | |
664 bind(Bad); | |
665 stop("on return to interpreted call, restored SP is corrupted"); | |
666 | |
667 bind(OK); | |
668 } | |
669 | |
670 | |
671 void InterpreterMacroAssembler::verify_esp(Register Resp) { | |
672 // about to read or write Resp[0] | |
673 // make sure it is not in the monitors or the register save area | |
674 Label OK1, OK2; | |
675 | |
676 cmp(Resp, Lmonitors); | |
677 brx(Assembler::lessUnsigned, true, Assembler::pt, OK1); | |
678 delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp); | |
679 stop("too many pops: Lesp points into monitor area"); | |
680 bind(OK1); | |
681 #ifdef _LP64 | |
682 sub(Resp, STACK_BIAS, Resp); | |
683 #endif | |
684 cmp(Resp, SP); | |
685 brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2); | |
686 delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp); | |
687 stop("too many pushes: Lesp points into register window"); | |
688 bind(OK2); | |
689 } | |
690 #endif // ASSERT | |
691 | |
692 // Load compiled (i2c) or interpreter entry when calling from interpreted and | |
693 // do the call. Centralized so that all interpreter calls will do the same actions. | |
694 // If jvmti single stepping is on for a thread we must not call compiled code. | |
695 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) { | |
696 | |
697 // Assume we want to go compiled if available | |
698 | |
699 ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target); | |
700 | |
701 if (JvmtiExport::can_post_interpreter_events()) { | |
702 // JVMTI events, such as single-stepping, are implemented partly by avoiding running | |
703 // compiled code in threads for which the event is enabled. Check here for | |
704 // interp_only_mode if these events CAN be enabled. | |
705 verify_thread(); | |
706 Label skip_compiled_code; | |
707 | |
727 | 708 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
0 | 709 ld(interp_only, scratch); |
710 tst(scratch); | |
711 br(Assembler::notZero, true, Assembler::pn, skip_compiled_code); | |
712 delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target); | |
713 bind(skip_compiled_code); | |
714 } | |
715 | |
716 // the i2c_adapters need methodOop in G5_method (right? %%%) | |
717 // do the call | |
718 #ifdef ASSERT | |
719 { | |
720 Label ok; | |
721 br_notnull(target, false, Assembler::pt, ok); | |
722 delayed()->nop(); | |
723 stop("null entry point"); | |
724 bind(ok); | |
725 } | |
726 #endif // ASSERT | |
727 | |
728 // Adjust Rret first so Llast_SP can be same as Rret | |
729 add(Rret, -frame::pc_return_offset, O7); | |
730 add(Lesp, BytesPerWord, Gargs); // setup parameter pointer | |
731 // Record SP so we can remove any stack space allocated by adapter transition | |
732 jmp(target, 0); | |
733 delayed()->mov(SP, Llast_SP); | |
734 } | |
735 | |
736 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) { | |
737 assert_not_delayed(); | |
738 | |
739 Label not_taken; | |
740 if (ptr_compare) brx(cc, false, Assembler::pn, not_taken); | |
741 else br (cc, false, Assembler::pn, not_taken); | |
742 delayed()->nop(); | |
743 | |
744 TemplateTable::branch(false,false); | |
745 | |
746 bind(not_taken); | |
747 | |
748 profile_not_taken_branch(G3_scratch); | |
749 } | |
750 | |
751 | |
752 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp( | |
753 int bcp_offset, | |
754 Register Rtmp, | |
755 Register Rdst, | |
756 signedOrNot is_signed, | |
757 setCCOrNot should_set_CC ) { | |
758 assert(Rtmp != Rdst, "need separate temp register"); | |
759 assert_not_delayed(); | |
760 switch (is_signed) { | |
761 default: ShouldNotReachHere(); | |
762 | |
763 case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte | |
764 case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte | |
765 } | |
766 ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte | |
767 sll( Rdst, BitsPerByte, Rdst); | |
768 switch (should_set_CC ) { | |
769 default: ShouldNotReachHere(); | |
770 | |
771 case set_CC: orcc( Rdst, Rtmp, Rdst ); break; | |
772 case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break; | |
773 } | |
774 } | |
775 | |
776 | |
777 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp( | |
778 int bcp_offset, | |
779 Register Rtmp, | |
780 Register Rdst, | |
781 setCCOrNot should_set_CC ) { | |
782 assert(Rtmp != Rdst, "need separate temp register"); | |
783 assert_not_delayed(); | |
784 add( Lbcp, bcp_offset, Rtmp); | |
785 andcc( Rtmp, 3, G0); | |
786 Label aligned; | |
787 switch (should_set_CC ) { | |
788 default: ShouldNotReachHere(); | |
789 | |
790 case set_CC: break; | |
791 case dont_set_CC: break; | |
792 } | |
793 | |
794 br(Assembler::zero, true, Assembler::pn, aligned); | |
795 #ifdef _LP64 | |
796 delayed()->ldsw(Rtmp, 0, Rdst); | |
797 #else | |
798 delayed()->ld(Rtmp, 0, Rdst); | |
799 #endif | |
800 | |
801 ldub(Lbcp, bcp_offset + 3, Rdst); | |
802 ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst); | |
803 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst); | |
804 #ifdef _LP64 | |
805 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); | |
806 #else | |
807 // Unsigned load is faster than signed on some implementations | |
808 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); | |
809 #endif | |
810 or3(Rtmp, Rdst, Rdst ); | |
811 | |
812 bind(aligned); | |
813 if (should_set_CC == set_CC) tst(Rdst); | |
814 } | |
815 | |
816 | |
817 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset) { | |
818 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); | |
819 assert_different_registers(cache, tmp); | |
820 assert_not_delayed(); | |
821 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); | |
822 // convert from field index to ConstantPoolCacheEntry index | |
823 // and from word index to byte offset | |
824 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); | |
825 add(LcpoolCache, tmp, cache); | |
826 } | |
827 | |
828 | |
829 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset) { | |
830 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); | |
831 assert_different_registers(cache, tmp); | |
832 assert_not_delayed(); | |
833 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); | |
834 // convert from field index to ConstantPoolCacheEntry index | |
835 // and from word index to byte offset | |
836 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); | |
837 // skip past the header | |
838 add(tmp, in_bytes(constantPoolCacheOopDesc::base_offset()), tmp); | |
839 // construct pointer to cache entry | |
840 add(LcpoolCache, tmp, cache); | |
841 } | |
842 | |
843 | |
844 // Generate a subtype check: branch to ok_is_subtype if sub_klass is | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
845 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2. |
0 | 846 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, |
847 Register Rsuper_klass, | |
848 Register Rtmp1, | |
849 Register Rtmp2, | |
850 Register Rtmp3, | |
851 Label &ok_is_subtype ) { | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
852 Label not_subtype; |
0 | 853 |
854 // Profile the not-null value's klass. | |
855 profile_typecheck(Rsub_klass, Rtmp1); | |
856 | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
857 check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
858 Rtmp1, Rtmp2, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
859 &ok_is_subtype, ¬_subtype, NULL); |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
860 |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
861 check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
862 Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
863 &ok_is_subtype, NULL); |
0 | 864 |
865 bind(not_subtype); | |
866 profile_typecheck_failed(Rtmp1); | |
867 } | |
868 | |
869 // Separate these two to allow for delay slot in middle | |
870 // These are used to do a test and full jump to exception-throwing code. | |
871 | |
872 // %%%%% Could possibly reoptimize this by testing to see if could use | |
873 // a single conditional branch (i.e. if span is small enough. | |
874 // If you go that route, than get rid of the split and give up | |
875 // on the delay-slot hack. | |
876 | |
877 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition, | |
878 Label& ok ) { | |
879 assert_not_delayed(); | |
880 br(ok_condition, true, pt, ok); | |
881 // DELAY SLOT | |
882 } | |
883 | |
884 void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition, | |
885 Label& ok ) { | |
886 assert_not_delayed(); | |
887 bp( ok_condition, true, Assembler::xcc, pt, ok); | |
888 // DELAY SLOT | |
889 } | |
890 | |
891 void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition, | |
892 Label& ok ) { | |
893 assert_not_delayed(); | |
894 brx(ok_condition, true, pt, ok); | |
895 // DELAY SLOT | |
896 } | |
897 | |
898 void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point, | |
899 Register Rscratch, | |
900 Label& ok ) { | |
901 assert(throw_entry_point != NULL, "entry point must be generated by now"); | |
727 | 902 AddressLiteral dest(throw_entry_point); |
903 jump_to(dest, Rscratch); | |
0 | 904 delayed()->nop(); |
905 bind(ok); | |
906 } | |
907 | |
908 | |
909 // And if you cannot use the delay slot, here is a shorthand: | |
910 | |
911 void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition, | |
912 address throw_entry_point, | |
913 Register Rscratch ) { | |
914 Label ok; | |
915 if (ok_condition != never) { | |
916 throw_if_not_1_icc( ok_condition, ok); | |
917 delayed()->nop(); | |
918 } | |
919 throw_if_not_2( throw_entry_point, Rscratch, ok); | |
920 } | |
921 void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition, | |
922 address throw_entry_point, | |
923 Register Rscratch ) { | |
924 Label ok; | |
925 if (ok_condition != never) { | |
926 throw_if_not_1_xcc( ok_condition, ok); | |
927 delayed()->nop(); | |
928 } | |
929 throw_if_not_2( throw_entry_point, Rscratch, ok); | |
930 } | |
931 void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition, | |
932 address throw_entry_point, | |
933 Register Rscratch ) { | |
934 Label ok; | |
935 if (ok_condition != never) { | |
936 throw_if_not_1_x( ok_condition, ok); | |
937 delayed()->nop(); | |
938 } | |
939 throw_if_not_2( throw_entry_point, Rscratch, ok); | |
940 } | |
941 | |
942 // Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res | |
943 // Note: res is still shy of address by array offset into object. | |
944 | |
945 void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) { | |
946 assert_not_delayed(); | |
947 | |
948 verify_oop(array); | |
949 #ifdef _LP64 | |
950 // sign extend since tos (index) can be a 32bit value | |
951 sra(index, G0, index); | |
952 #endif // _LP64 | |
953 | |
954 // check array | |
955 Label ptr_ok; | |
956 tst(array); | |
957 throw_if_not_1_x( notZero, ptr_ok ); | |
958 delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index | |
959 throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok); | |
960 | |
961 Label index_ok; | |
962 cmp(index, tmp); | |
963 throw_if_not_1_icc( lessUnsigned, index_ok ); | |
964 if (index_shift > 0) delayed()->sll(index, index_shift, index); | |
965 else delayed()->add(array, index, res); // addr - const offset in index | |
966 // convention: move aberrant index into G3_scratch for exception message | |
967 mov(index, G3_scratch); | |
968 throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok); | |
969 | |
970 // add offset if didn't do it in delay slot | |
971 if (index_shift > 0) add(array, index, res); // addr - const offset in index | |
972 } | |
973 | |
974 | |
975 void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) { | |
976 assert_not_delayed(); | |
977 | |
978 // pop array | |
979 pop_ptr(array); | |
980 | |
981 // check array | |
982 index_check_without_pop(array, index, index_shift, tmp, res); | |
983 } | |
984 | |
985 | |
986 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) { | |
987 ld_ptr(Lmethod, in_bytes(methodOopDesc::constants_offset()), Rdst); | |
988 } | |
989 | |
990 | |
991 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) { | |
992 get_constant_pool(Rdst); | |
993 ld_ptr(Rdst, constantPoolOopDesc::cache_offset_in_bytes(), Rdst); | |
994 } | |
995 | |
996 | |
997 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) { | |
998 get_constant_pool(Rcpool); | |
999 ld_ptr(Rcpool, constantPoolOopDesc::tags_offset_in_bytes(), Rtags); | |
1000 } | |
1001 | |
1002 | |
1003 // unlock if synchronized method | |
1004 // | |
1005 // Unlock the receiver if this is a synchronized method. | |
1006 // Unlock any Java monitors from syncronized blocks. | |
1007 // | |
1008 // If there are locked Java monitors | |
1009 // If throw_monitor_exception | |
1010 // throws IllegalMonitorStateException | |
1011 // Else if install_monitor_exception | |
1012 // installs IllegalMonitorStateException | |
1013 // Else | |
1014 // no error processing | |
1015 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state, | |
1016 bool throw_monitor_exception, | |
1017 bool install_monitor_exception) { | |
1018 Label unlocked, unlock, no_unlock; | |
1019 | |
1020 // get the value of _do_not_unlock_if_synchronized into G1_scratch | |
727 | 1021 const Address do_not_unlock_if_synchronized(G2_thread, |
1022 JavaThread::do_not_unlock_if_synchronized_offset()); | |
0 | 1023 ldbool(do_not_unlock_if_synchronized, G1_scratch); |
1024 stbool(G0, do_not_unlock_if_synchronized); // reset the flag | |
1025 | |
1026 // check if synchronized method | |
727 | 1027 const Address access_flags(Lmethod, methodOopDesc::access_flags_offset()); |
0 | 1028 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); |
1029 push(state); // save tos | |
727 | 1030 ld(access_flags, G3_scratch); // Load access flags. |
0 | 1031 btst(JVM_ACC_SYNCHRONIZED, G3_scratch); |
727 | 1032 br(zero, false, pt, unlocked); |
0 | 1033 delayed()->nop(); |
1034 | |
1035 // Don't unlock anything if the _do_not_unlock_if_synchronized flag | |
1036 // is set. | |
1037 tstbool(G1_scratch); | |
1038 br(Assembler::notZero, false, pn, no_unlock); | |
1039 delayed()->nop(); | |
1040 | |
1041 // BasicObjectLock will be first in list, since this is a synchronized method. However, need | |
1042 // to check that the object has not been unlocked by an explicit monitorexit bytecode. | |
1043 | |
1044 //Intel: if (throw_monitor_exception) ... else ... | |
1045 // Entry already unlocked, need to throw exception | |
1046 //... | |
1047 | |
1048 // pass top-most monitor elem | |
1049 add( top_most_monitor(), O1 ); | |
1050 | |
1051 ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch); | |
1052 br_notnull(G3_scratch, false, pt, unlock); | |
1053 delayed()->nop(); | |
1054 | |
1055 if (throw_monitor_exception) { | |
1056 // Entry already unlocked need to throw an exception | |
1057 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); | |
1058 should_not_reach_here(); | |
1059 } else { | |
1060 // Monitor already unlocked during a stack unroll. | |
1061 // If requested, install an illegal_monitor_state_exception. | |
1062 // Continue with stack unrolling. | |
1063 if (install_monitor_exception) { | |
1064 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); | |
1065 } | |
1066 ba(false, unlocked); | |
1067 delayed()->nop(); | |
1068 } | |
1069 | |
1070 bind(unlock); | |
1071 | |
1072 unlock_object(O1); | |
1073 | |
1074 bind(unlocked); | |
1075 | |
1076 // I0, I1: Might contain return value | |
1077 | |
1078 // Check that all monitors are unlocked | |
1079 { Label loop, exception, entry, restart; | |
1080 | |
1081 Register Rmptr = O0; | |
1082 Register Rtemp = O1; | |
1083 Register Rlimit = Lmonitors; | |
1084 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; | |
1085 assert( (delta & LongAlignmentMask) == 0, | |
1086 "sizeof BasicObjectLock must be even number of doublewords"); | |
1087 | |
1088 #ifdef ASSERT | |
1089 add(top_most_monitor(), Rmptr, delta); | |
1090 { Label L; | |
1091 // ensure that Rmptr starts out above (or at) Rlimit | |
1092 cmp(Rmptr, Rlimit); | |
1093 brx(Assembler::greaterEqualUnsigned, false, pn, L); | |
1094 delayed()->nop(); | |
1095 stop("monitor stack has negative size"); | |
1096 bind(L); | |
1097 } | |
1098 #endif | |
1099 bind(restart); | |
1100 ba(false, entry); | |
1101 delayed()-> | |
1102 add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry | |
1103 | |
1104 // Entry is still locked, need to throw exception | |
1105 bind(exception); | |
1106 if (throw_monitor_exception) { | |
1107 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); | |
1108 should_not_reach_here(); | |
1109 } else { | |
1110 // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception. | |
1111 // Unlock does not block, so don't have to worry about the frame | |
1112 unlock_object(Rmptr); | |
1113 if (install_monitor_exception) { | |
1114 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); | |
1115 } | |
1116 ba(false, restart); | |
1117 delayed()->nop(); | |
1118 } | |
1119 | |
1120 bind(loop); | |
1121 cmp(Rtemp, G0); // check if current entry is used | |
1122 brx(Assembler::notEqual, false, pn, exception); | |
1123 delayed()-> | |
1124 dec(Rmptr, delta); // otherwise advance to next entry | |
1125 #ifdef ASSERT | |
1126 { Label L; | |
1127 // ensure that Rmptr has not somehow stepped below Rlimit | |
1128 cmp(Rmptr, Rlimit); | |
1129 brx(Assembler::greaterEqualUnsigned, false, pn, L); | |
1130 delayed()->nop(); | |
1131 stop("ran off the end of the monitor stack"); | |
1132 bind(L); | |
1133 } | |
1134 #endif | |
1135 bind(entry); | |
1136 cmp(Rmptr, Rlimit); // check if bottom reached | |
1137 brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry | |
1138 delayed()-> | |
1139 ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp); | |
1140 } | |
1141 | |
1142 bind(no_unlock); | |
1143 pop(state); | |
1144 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
1145 } | |
1146 | |
1147 | |
1148 // remove activation | |
1149 // | |
1150 // Unlock the receiver if this is a synchronized method. | |
1151 // Unlock any Java monitors from syncronized blocks. | |
1152 // Remove the activation from the stack. | |
1153 // | |
1154 // If there are locked Java monitors | |
1155 // If throw_monitor_exception | |
1156 // throws IllegalMonitorStateException | |
1157 // Else if install_monitor_exception | |
1158 // installs IllegalMonitorStateException | |
1159 // Else | |
1160 // no error processing | |
1161 void InterpreterMacroAssembler::remove_activation(TosState state, | |
1162 bool throw_monitor_exception, | |
1163 bool install_monitor_exception) { | |
1164 | |
1165 unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception); | |
1166 | |
1167 // save result (push state before jvmti call and pop it afterwards) and notify jvmti | |
1168 notify_method_exit(false, state, NotifyJVMTI); | |
1169 | |
1170 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
1171 verify_oop(Lmethod); | |
1172 verify_thread(); | |
1173 | |
1174 // return tos | |
1175 assert(Otos_l1 == Otos_i, "adjust code below"); | |
1176 switch (state) { | |
1177 #ifdef _LP64 | |
1178 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0 | |
1179 #else | |
1180 case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1 | |
1181 #endif | |
1182 case btos: // fall through | |
1183 case ctos: | |
1184 case stos: // fall through | |
1185 case atos: // fall through | |
1186 case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I0 | |
1187 case ftos: // fall through | |
1188 case dtos: // fall through | |
1189 case vtos: /* nothing to do */ break; | |
1190 default : ShouldNotReachHere(); | |
1191 } | |
1192 | |
1193 #if defined(COMPILER2) && !defined(_LP64) | |
1194 if (state == ltos) { | |
1195 // C2 expects long results in G1 we can't tell if we're returning to interpreted | |
1196 // or compiled so just be safe use G1 and O0/O1 | |
1197 | |
1198 // Shift bits into high (msb) of G1 | |
1199 sllx(Otos_l1->after_save(), 32, G1); | |
1200 // Zero extend low bits | |
1201 srl (Otos_l2->after_save(), 0, Otos_l2->after_save()); | |
1202 or3 (Otos_l2->after_save(), G1, G1); | |
1203 } | |
1204 #endif /* COMPILER2 */ | |
1205 | |
1206 } | |
1207 #endif /* CC_INTERP */ | |
1208 | |
1209 | |
1210 // Lock object | |
1211 // | |
1212 // Argument - lock_reg points to the BasicObjectLock to be used for locking, | |
1213 // it must be initialized with the object to lock | |
1214 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) { | |
1215 if (UseHeavyMonitors) { | |
1216 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); | |
1217 } | |
1218 else { | |
1219 Register obj_reg = Object; | |
1220 Register mark_reg = G4_scratch; | |
1221 Register temp_reg = G1_scratch; | |
727 | 1222 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes()); |
1223 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); | |
0 | 1224 Label done; |
1225 | |
1226 Label slow_case; | |
1227 | |
1228 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg); | |
1229 | |
1230 // load markOop from object into mark_reg | |
1231 ld_ptr(mark_addr, mark_reg); | |
1232 | |
1233 if (UseBiasedLocking) { | |
1234 biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case); | |
1235 } | |
1236 | |
1237 // get the address of basicLock on stack that will be stored in the object | |
1238 // we need a temporary register here as we do not want to clobber lock_reg | |
1239 // (cas clobbers the destination register) | |
1240 mov(lock_reg, temp_reg); | |
1241 // set mark reg to be (markOop of object | UNLOCK_VALUE) | |
1242 or3(mark_reg, markOopDesc::unlocked_value, mark_reg); | |
1243 // initialize the box (Must happen before we update the object mark!) | |
1244 st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes()); | |
1245 // compare and exchange object_addr, markOop | 1, stack address of basicLock | |
1246 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); | |
1247 casx_under_lock(mark_addr.base(), mark_reg, temp_reg, | |
1248 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); | |
1249 | |
1250 // if the compare and exchange succeeded we are done (we saw an unlocked object) | |
1251 cmp(mark_reg, temp_reg); | |
1252 brx(Assembler::equal, true, Assembler::pt, done); | |
1253 delayed()->nop(); | |
1254 | |
1255 // We did not see an unlocked object so try the fast recursive case | |
1256 | |
1257 // Check if owner is self by comparing the value in the markOop of object | |
1258 // with the stack pointer | |
1259 sub(temp_reg, SP, temp_reg); | |
1260 #ifdef _LP64 | |
1261 sub(temp_reg, STACK_BIAS, temp_reg); | |
1262 #endif | |
1263 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); | |
1264 | |
1265 // Composite "andcc" test: | |
1266 // (a) %sp -vs- markword proximity check, and, | |
1267 // (b) verify mark word LSBs == 0 (Stack-locked). | |
1268 // | |
1269 // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size()) | |
1270 // Note that the page size used for %sp proximity testing is arbitrary and is | |
1271 // unrelated to the actual MMU page size. We use a 'logical' page size of | |
1272 // 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate | |
1273 // field of the andcc instruction. | |
1274 andcc (temp_reg, 0xFFFFF003, G0) ; | |
1275 | |
1276 // if condition is true we are done and hence we can store 0 in the displaced | |
1277 // header indicating it is a recursive lock and be done | |
1278 brx(Assembler::zero, true, Assembler::pt, done); | |
1279 delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes()); | |
1280 | |
1281 // none of the above fast optimizations worked so we have to get into the | |
1282 // slow case of monitor enter | |
1283 bind(slow_case); | |
1284 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); | |
1285 | |
1286 bind(done); | |
1287 } | |
1288 } | |
1289 | |
1290 // Unlocks an object. Used in monitorexit bytecode and remove_activation. | |
1291 // | |
1292 // Argument - lock_reg points to the BasicObjectLock for lock | |
1293 // Throw IllegalMonitorException if object is not locked by current thread | |
1294 void InterpreterMacroAssembler::unlock_object(Register lock_reg) { | |
1295 if (UseHeavyMonitors) { | |
1296 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); | |
1297 } else { | |
1298 Register obj_reg = G3_scratch; | |
1299 Register mark_reg = G4_scratch; | |
1300 Register displaced_header_reg = G1_scratch; | |
727 | 1301 Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes()); |
1302 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); | |
0 | 1303 Label done; |
1304 | |
1305 if (UseBiasedLocking) { | |
1306 // load the object out of the BasicObjectLock | |
1307 ld_ptr(lockobj_addr, obj_reg); | |
1308 biased_locking_exit(mark_addr, mark_reg, done, true); | |
1309 st_ptr(G0, lockobj_addr); // free entry | |
1310 } | |
1311 | |
1312 // Test first if we are in the fast recursive case | |
727 | 1313 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes()); |
1314 ld_ptr(lock_addr, displaced_header_reg); | |
0 | 1315 br_null(displaced_header_reg, true, Assembler::pn, done); |
1316 delayed()->st_ptr(G0, lockobj_addr); // free entry | |
1317 | |
1318 // See if it is still a light weight lock, if so we just unlock | |
1319 // the object and we are done | |
1320 | |
1321 if (!UseBiasedLocking) { | |
1322 // load the object out of the BasicObjectLock | |
1323 ld_ptr(lockobj_addr, obj_reg); | |
1324 } | |
1325 | |
1326 // we have the displaced header in displaced_header_reg | |
1327 // we expect to see the stack address of the basicLock in case the | |
1328 // lock is still a light weight lock (lock_reg) | |
1329 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); | |
1330 casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg, | |
1331 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); | |
1332 cmp(lock_reg, displaced_header_reg); | |
1333 brx(Assembler::equal, true, Assembler::pn, done); | |
1334 delayed()->st_ptr(G0, lockobj_addr); // free entry | |
1335 | |
1336 // The lock has been converted into a heavy lock and hence | |
1337 // we need to get into the slow case | |
1338 | |
1339 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); | |
1340 | |
1341 bind(done); | |
1342 } | |
1343 } | |
1344 | |
1345 #ifndef CC_INTERP | |
1346 | |
1347 // Get the method data pointer from the methodOop and set the | |
1348 // specified register to its value. | |
1349 | |
1350 void InterpreterMacroAssembler::set_method_data_pointer_offset(Register Roff) { | |
1351 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1352 Label get_continue; | |
1353 | |
1354 ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr); | |
1355 test_method_data_pointer(get_continue); | |
1356 add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr); | |
1357 if (Roff != noreg) | |
1358 // Roff contains a method data index ("mdi"). It defaults to zero. | |
1359 add(ImethodDataPtr, Roff, ImethodDataPtr); | |
1360 bind(get_continue); | |
1361 } | |
1362 | |
1363 // Set the method data pointer for the current bcp. | |
1364 | |
1365 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { | |
1366 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1367 Label zero_continue; | |
1368 | |
1369 // Test MDO to avoid the call if it is NULL. | |
727 | 1370 ld_ptr(Lmethod, methodOopDesc::method_data_offset(), ImethodDataPtr); |
0 | 1371 test_method_data_pointer(zero_continue); |
1372 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp); | |
1373 set_method_data_pointer_offset(O0); | |
1374 bind(zero_continue); | |
1375 } | |
1376 | |
1377 // Test ImethodDataPtr. If it is null, continue at the specified label | |
1378 | |
1379 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) { | |
1380 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1381 #ifdef _LP64 | |
1382 bpr(Assembler::rc_z, false, Assembler::pn, ImethodDataPtr, zero_continue); | |
1383 #else | |
1384 tst(ImethodDataPtr); | |
1385 br(Assembler::zero, false, Assembler::pn, zero_continue); | |
1386 #endif | |
1387 delayed()->nop(); | |
1388 } | |
1389 | |
1390 void InterpreterMacroAssembler::verify_method_data_pointer() { | |
1391 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1392 #ifdef ASSERT | |
1393 Label verify_continue; | |
1394 test_method_data_pointer(verify_continue); | |
1395 | |
1396 // If the mdp is valid, it will point to a DataLayout header which is | |
1397 // consistent with the bcp. The converse is highly probable also. | |
1398 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch); | |
727 | 1399 ld_ptr(Lmethod, methodOopDesc::const_offset(), O5); |
0 | 1400 add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), G3_scratch); |
1401 add(G3_scratch, O5, G3_scratch); | |
1402 cmp(Lbcp, G3_scratch); | |
1403 brx(Assembler::equal, false, Assembler::pt, verify_continue); | |
1404 | |
1405 Register temp_reg = O5; | |
1406 delayed()->mov(ImethodDataPtr, temp_reg); | |
1407 // %%% should use call_VM_leaf here? | |
1408 //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr); | |
1409 save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1); | |
727 | 1410 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS); |
0 | 1411 stf(FloatRegisterImpl::D, Ftos_d, d_save); |
1412 mov(temp_reg->after_save(), O2); | |
1413 save_thread(L7_thread_cache); | |
1414 call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none); | |
1415 delayed()->nop(); | |
1416 restore_thread(L7_thread_cache); | |
1417 ldf(FloatRegisterImpl::D, d_save, Ftos_d); | |
1418 restore(); | |
1419 bind(verify_continue); | |
1420 #endif // ASSERT | |
1421 } | |
1422 | |
1423 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count, | |
1424 Register cur_bcp, | |
1425 Register Rtmp, | |
1426 Label &profile_continue) { | |
1427 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1428 // Control will flow to "profile_continue" if the counter is less than the | |
1429 // limit or if we call profile_method() | |
1430 | |
1431 Label done; | |
1432 | |
1433 // if no method data exists, and the counter is high enough, make one | |
1434 #ifdef _LP64 | |
1435 bpr(Assembler::rc_nz, false, Assembler::pn, ImethodDataPtr, done); | |
1436 #else | |
1437 tst(ImethodDataPtr); | |
1438 br(Assembler::notZero, false, Assembler::pn, done); | |
1439 #endif | |
1440 | |
1441 // Test to see if we should create a method data oop | |
727 | 1442 AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit); |
0 | 1443 #ifdef _LP64 |
1444 delayed()->nop(); | |
727 | 1445 sethi(profile_limit, Rtmp); |
0 | 1446 #else |
727 | 1447 delayed()->sethi(profile_limit, Rtmp); |
0 | 1448 #endif |
727 | 1449 ld(Rtmp, profile_limit.low10(), Rtmp); |
0 | 1450 cmp(invocation_count, Rtmp); |
1451 br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue); | |
1452 delayed()->nop(); | |
1453 | |
1454 // Build it now. | |
1455 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), cur_bcp); | |
1456 set_method_data_pointer_offset(O0); | |
1457 ba(false, profile_continue); | |
1458 delayed()->nop(); | |
1459 bind(done); | |
1460 } | |
1461 | |
1462 // Store a value at some constant offset from the method data pointer. | |
1463 | |
1464 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) { | |
1465 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1466 st_ptr(value, ImethodDataPtr, constant); | |
1467 } | |
1468 | |
1469 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter, | |
1470 Register bumped_count, | |
1471 bool decrement) { | |
1472 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1473 | |
1474 // Load the counter. | |
1475 ld_ptr(counter, bumped_count); | |
1476 | |
1477 if (decrement) { | |
1478 // Decrement the register. Set condition codes. | |
1479 subcc(bumped_count, DataLayout::counter_increment, bumped_count); | |
1480 | |
1481 // If the decrement causes the counter to overflow, stay negative | |
1482 Label L; | |
1483 brx(Assembler::negative, true, Assembler::pn, L); | |
1484 | |
1485 // Store the decremented counter, if it is still negative. | |
1486 delayed()->st_ptr(bumped_count, counter); | |
1487 bind(L); | |
1488 } else { | |
1489 // Increment the register. Set carry flag. | |
1490 addcc(bumped_count, DataLayout::counter_increment, bumped_count); | |
1491 | |
1492 // If the increment causes the counter to overflow, pull back by 1. | |
1493 assert(DataLayout::counter_increment == 1, "subc works"); | |
1494 subc(bumped_count, G0, bumped_count); | |
1495 | |
1496 // Store the incremented counter. | |
1497 st_ptr(bumped_count, counter); | |
1498 } | |
1499 } | |
1500 | |
1501 // Increment the value at some constant offset from the method data pointer. | |
1502 | |
1503 void InterpreterMacroAssembler::increment_mdp_data_at(int constant, | |
1504 Register bumped_count, | |
1505 bool decrement) { | |
1506 // Locate the counter at a fixed offset from the mdp: | |
727 | 1507 Address counter(ImethodDataPtr, constant); |
0 | 1508 increment_mdp_data_at(counter, bumped_count, decrement); |
1509 } | |
1510 | |
1511 // Increment the value at some non-fixed (reg + constant) offset from | |
1512 // the method data pointer. | |
1513 | |
1514 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg, | |
1515 int constant, | |
1516 Register bumped_count, | |
1517 Register scratch2, | |
1518 bool decrement) { | |
1519 // Add the constant to reg to get the offset. | |
1520 add(ImethodDataPtr, reg, scratch2); | |
727 | 1521 Address counter(scratch2, constant); |
0 | 1522 increment_mdp_data_at(counter, bumped_count, decrement); |
1523 } | |
1524 | |
1525 // Set a flag value at the current method data pointer position. | |
1526 // Updates a single byte of the header, to avoid races with other header bits. | |
1527 | |
1528 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant, | |
1529 Register scratch) { | |
1530 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1531 // Load the data header | |
1532 ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch); | |
1533 | |
1534 // Set the flag | |
1535 or3(scratch, flag_constant, scratch); | |
1536 | |
1537 // Store the modified header. | |
1538 stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset())); | |
1539 } | |
1540 | |
1541 // Test the location at some offset from the method data pointer. | |
1542 // If it is not equal to value, branch to the not_equal_continue Label. | |
1543 // Set condition codes to match the nullness of the loaded value. | |
1544 | |
1545 void InterpreterMacroAssembler::test_mdp_data_at(int offset, | |
1546 Register value, | |
1547 Label& not_equal_continue, | |
1548 Register scratch) { | |
1549 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1550 ld_ptr(ImethodDataPtr, offset, scratch); | |
1551 cmp(value, scratch); | |
1552 brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue); | |
1553 delayed()->tst(scratch); | |
1554 } | |
1555 | |
1556 // Update the method data pointer by the displacement located at some fixed | |
1557 // offset from the method data pointer. | |
1558 | |
1559 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp, | |
1560 Register scratch) { | |
1561 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1562 ld_ptr(ImethodDataPtr, offset_of_disp, scratch); | |
1563 add(ImethodDataPtr, scratch, ImethodDataPtr); | |
1564 } | |
1565 | |
1566 // Update the method data pointer by the displacement located at the | |
1567 // offset (reg + offset_of_disp). | |
1568 | |
1569 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg, | |
1570 int offset_of_disp, | |
1571 Register scratch) { | |
1572 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1573 add(reg, offset_of_disp, scratch); | |
1574 ld_ptr(ImethodDataPtr, scratch, scratch); | |
1575 add(ImethodDataPtr, scratch, ImethodDataPtr); | |
1576 } | |
1577 | |
1578 // Update the method data pointer by a simple constant displacement. | |
1579 | |
1580 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) { | |
1581 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1582 add(ImethodDataPtr, constant, ImethodDataPtr); | |
1583 } | |
1584 | |
1585 // Update the method data pointer for a _ret bytecode whose target | |
1586 // was not among our cached targets. | |
1587 | |
1588 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state, | |
1589 Register return_bci) { | |
1590 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1591 push(state); | |
1592 st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile | |
1593 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); | |
1594 ld_ptr(l_tmp, return_bci); | |
1595 pop(state); | |
1596 } | |
1597 | |
1598 // Count a taken branch in the bytecodes. | |
1599 | |
1600 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) { | |
1601 if (ProfileInterpreter) { | |
1602 Label profile_continue; | |
1603 | |
1604 // If no method data exists, go to profile_continue. | |
1605 test_method_data_pointer(profile_continue); | |
1606 | |
1607 // We are taking a branch. Increment the taken count. | |
1608 increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count); | |
1609 | |
1610 // The method data pointer needs to be updated to reflect the new target. | |
1611 update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch); | |
1612 bind (profile_continue); | |
1613 } | |
1614 } | |
1615 | |
1616 | |
1617 // Count a not-taken branch in the bytecodes. | |
1618 | |
1619 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) { | |
1620 if (ProfileInterpreter) { | |
1621 Label profile_continue; | |
1622 | |
1623 // If no method data exists, go to profile_continue. | |
1624 test_method_data_pointer(profile_continue); | |
1625 | |
1626 // We are taking a branch. Increment the not taken count. | |
1627 increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch); | |
1628 | |
1629 // The method data pointer needs to be updated to correspond to the | |
1630 // next bytecode. | |
1631 update_mdp_by_constant(in_bytes(BranchData::branch_data_size())); | |
1632 bind (profile_continue); | |
1633 } | |
1634 } | |
1635 | |
1636 | |
1637 // Count a non-virtual call in the bytecodes. | |
1638 | |
1639 void InterpreterMacroAssembler::profile_call(Register scratch) { | |
1640 if (ProfileInterpreter) { | |
1641 Label profile_continue; | |
1642 | |
1643 // If no method data exists, go to profile_continue. | |
1644 test_method_data_pointer(profile_continue); | |
1645 | |
1646 // We are making a call. Increment the count. | |
1647 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); | |
1648 | |
1649 // The method data pointer needs to be updated to reflect the new target. | |
1650 update_mdp_by_constant(in_bytes(CounterData::counter_data_size())); | |
1651 bind (profile_continue); | |
1652 } | |
1653 } | |
1654 | |
1655 | |
1656 // Count a final call in the bytecodes. | |
1657 | |
1658 void InterpreterMacroAssembler::profile_final_call(Register scratch) { | |
1659 if (ProfileInterpreter) { | |
1660 Label profile_continue; | |
1661 | |
1662 // If no method data exists, go to profile_continue. | |
1663 test_method_data_pointer(profile_continue); | |
1664 | |
1665 // We are making a call. Increment the count. | |
1666 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); | |
1667 | |
1668 // The method data pointer needs to be updated to reflect the new target. | |
1669 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); | |
1670 bind (profile_continue); | |
1671 } | |
1672 } | |
1673 | |
1674 | |
1675 // Count a virtual call in the bytecodes. | |
1676 | |
1677 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, | |
1678 Register scratch) { | |
1679 if (ProfileInterpreter) { | |
1680 Label profile_continue; | |
1681 | |
1682 // If no method data exists, go to profile_continue. | |
1683 test_method_data_pointer(profile_continue); | |
1684 | |
1685 // Record the receiver type. | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1686 record_klass_in_profile(receiver, scratch, true); |
0 | 1687 |
1688 // The method data pointer needs to be updated to reflect the new target. | |
1689 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); | |
1690 bind (profile_continue); | |
1691 } | |
1692 } | |
1693 | |
1694 void InterpreterMacroAssembler::record_klass_in_profile_helper( | |
1695 Register receiver, Register scratch, | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1696 int start_row, Label& done, bool is_virtual_call) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1697 if (TypeProfileWidth == 0) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1698 if (is_virtual_call) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1699 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1700 } |
967
6918603297f7
6858208: jvm crash when specifying TypeProfileWidth=0 on jdk 6.0
poonam
parents:
727
diff
changeset
|
1701 return; |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1702 } |
967
6918603297f7
6858208: jvm crash when specifying TypeProfileWidth=0 on jdk 6.0
poonam
parents:
727
diff
changeset
|
1703 |
0 | 1704 int last_row = VirtualCallData::row_limit() - 1; |
1705 assert(start_row <= last_row, "must be work left to do"); | |
1706 // Test this row for both the receiver and for null. | |
1707 // Take any of three different outcomes: | |
1708 // 1. found receiver => increment count and goto done | |
1709 // 2. found null => keep looking for case 1, maybe allocate this cell | |
1710 // 3. found something else => keep looking for cases 1 and 2 | |
1711 // Case 3 is handled by a recursive call. | |
1712 for (int row = start_row; row <= last_row; row++) { | |
1713 Label next_test; | |
1714 bool test_for_null_also = (row == start_row); | |
1715 | |
1716 // See if the receiver is receiver[n]. | |
1717 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row)); | |
1718 test_mdp_data_at(recvr_offset, receiver, next_test, scratch); | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1719 // delayed()->tst(scratch); |
0 | 1720 |
1721 // The receiver is receiver[n]. Increment count[n]. | |
1722 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row)); | |
1723 increment_mdp_data_at(count_offset, scratch); | |
1724 ba(false, done); | |
1725 delayed()->nop(); | |
1726 bind(next_test); | |
1727 | |
1728 if (test_for_null_also) { | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1729 Label found_null; |
0 | 1730 // Failed the equality check on receiver[n]... Test for null. |
1731 if (start_row == last_row) { | |
1732 // The only thing left to do is handle the null case. | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1733 if (is_virtual_call) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1734 brx(Assembler::zero, false, Assembler::pn, found_null); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1735 delayed()->nop(); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1736 // Receiver did not match any saved receiver and there is no empty row for it. |
1251
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1206
diff
changeset
|
1737 // Increment total counter to indicate polymorphic case. |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1738 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1739 ba(false, done); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1740 delayed()->nop(); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1741 bind(found_null); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1742 } else { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1743 brx(Assembler::notZero, false, Assembler::pt, done); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1744 delayed()->nop(); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1745 } |
0 | 1746 break; |
1747 } | |
1748 // Since null is rare, make it be the branch-taken case. | |
1749 brx(Assembler::zero, false, Assembler::pn, found_null); | |
1750 delayed()->nop(); | |
1751 | |
1752 // Put all the "Case 3" tests here. | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1753 record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call); |
0 | 1754 |
1755 // Found a null. Keep searching for a matching receiver, | |
1756 // but remember that this is an empty (unused) slot. | |
1757 bind(found_null); | |
1758 } | |
1759 } | |
1760 | |
1761 // In the fall-through case, we found no matching receiver, but we | |
1762 // observed the receiver[start_row] is NULL. | |
1763 | |
1764 // Fill in the receiver field and increment the count. | |
1765 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); | |
1766 set_mdp_data_at(recvr_offset, receiver); | |
1767 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); | |
1768 mov(DataLayout::counter_increment, scratch); | |
1769 set_mdp_data_at(count_offset, scratch); | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1770 if (start_row > 0) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1771 ba(false, done); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1772 delayed()->nop(); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1773 } |
0 | 1774 } |
1775 | |
1776 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1777 Register scratch, bool is_virtual_call) { |
0 | 1778 assert(ProfileInterpreter, "must be profiling"); |
1779 Label done; | |
1780 | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1781 record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call); |
0 | 1782 |
1783 bind (done); | |
1784 } | |
1785 | |
1786 | |
1787 // Count a ret in the bytecodes. | |
1788 | |
1789 void InterpreterMacroAssembler::profile_ret(TosState state, | |
1790 Register return_bci, | |
1791 Register scratch) { | |
1792 if (ProfileInterpreter) { | |
1793 Label profile_continue; | |
1794 uint row; | |
1795 | |
1796 // If no method data exists, go to profile_continue. | |
1797 test_method_data_pointer(profile_continue); | |
1798 | |
1799 // Update the total ret count. | |
1800 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); | |
1801 | |
1802 for (row = 0; row < RetData::row_limit(); row++) { | |
1803 Label next_test; | |
1804 | |
1805 // See if return_bci is equal to bci[n]: | |
1806 test_mdp_data_at(in_bytes(RetData::bci_offset(row)), | |
1807 return_bci, next_test, scratch); | |
1808 | |
1809 // return_bci is equal to bci[n]. Increment the count. | |
1810 increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch); | |
1811 | |
1812 // The method data pointer needs to be updated to reflect the new target. | |
1813 update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch); | |
1814 ba(false, profile_continue); | |
1815 delayed()->nop(); | |
1816 bind(next_test); | |
1817 } | |
1818 | |
1819 update_mdp_for_ret(state, return_bci); | |
1820 | |
1821 bind (profile_continue); | |
1822 } | |
1823 } | |
1824 | |
1825 // Profile an unexpected null in the bytecodes. | |
1826 void InterpreterMacroAssembler::profile_null_seen(Register scratch) { | |
1827 if (ProfileInterpreter) { | |
1828 Label profile_continue; | |
1829 | |
1830 // If no method data exists, go to profile_continue. | |
1831 test_method_data_pointer(profile_continue); | |
1832 | |
1833 set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch); | |
1834 | |
1835 // The method data pointer needs to be updated. | |
1836 int mdp_delta = in_bytes(BitData::bit_data_size()); | |
1837 if (TypeProfileCasts) { | |
1838 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); | |
1839 } | |
1840 update_mdp_by_constant(mdp_delta); | |
1841 | |
1842 bind (profile_continue); | |
1843 } | |
1844 } | |
1845 | |
1846 void InterpreterMacroAssembler::profile_typecheck(Register klass, | |
1847 Register scratch) { | |
1848 if (ProfileInterpreter) { | |
1849 Label profile_continue; | |
1850 | |
1851 // If no method data exists, go to profile_continue. | |
1852 test_method_data_pointer(profile_continue); | |
1853 | |
1854 int mdp_delta = in_bytes(BitData::bit_data_size()); | |
1855 if (TypeProfileCasts) { | |
1856 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); | |
1857 | |
1858 // Record the object type. | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1859 record_klass_in_profile(klass, scratch, false); |
0 | 1860 } |
1861 | |
1862 // The method data pointer needs to be updated. | |
1863 update_mdp_by_constant(mdp_delta); | |
1864 | |
1865 bind (profile_continue); | |
1866 } | |
1867 } | |
1868 | |
1869 void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) { | |
1870 if (ProfileInterpreter && TypeProfileCasts) { | |
1871 Label profile_continue; | |
1872 | |
1873 // If no method data exists, go to profile_continue. | |
1874 test_method_data_pointer(profile_continue); | |
1875 | |
1876 int count_offset = in_bytes(CounterData::count_offset()); | |
1877 // Back up the address, since we have already bumped the mdp. | |
1878 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); | |
1879 | |
1880 // *Decrement* the counter. We expect to see zero or small negatives. | |
1881 increment_mdp_data_at(count_offset, scratch, true); | |
1882 | |
1883 bind (profile_continue); | |
1884 } | |
1885 } | |
1886 | |
1887 // Count the default case of a switch construct. | |
1888 | |
1889 void InterpreterMacroAssembler::profile_switch_default(Register scratch) { | |
1890 if (ProfileInterpreter) { | |
1891 Label profile_continue; | |
1892 | |
1893 // If no method data exists, go to profile_continue. | |
1894 test_method_data_pointer(profile_continue); | |
1895 | |
1896 // Update the default case count | |
1897 increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()), | |
1898 scratch); | |
1899 | |
1900 // The method data pointer needs to be updated. | |
1901 update_mdp_by_offset( | |
1902 in_bytes(MultiBranchData::default_displacement_offset()), | |
1903 scratch); | |
1904 | |
1905 bind (profile_continue); | |
1906 } | |
1907 } | |
1908 | |
1909 // Count the index'th case of a switch construct. | |
1910 | |
1911 void InterpreterMacroAssembler::profile_switch_case(Register index, | |
1912 Register scratch, | |
1913 Register scratch2, | |
1914 Register scratch3) { | |
1915 if (ProfileInterpreter) { | |
1916 Label profile_continue; | |
1917 | |
1918 // If no method data exists, go to profile_continue. | |
1919 test_method_data_pointer(profile_continue); | |
1920 | |
1921 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes() | |
1922 set(in_bytes(MultiBranchData::per_case_size()), scratch); | |
1923 smul(index, scratch, scratch); | |
1924 add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch); | |
1925 | |
1926 // Update the case count | |
1927 increment_mdp_data_at(scratch, | |
1928 in_bytes(MultiBranchData::relative_count_offset()), | |
1929 scratch2, | |
1930 scratch3); | |
1931 | |
1932 // The method data pointer needs to be updated. | |
1933 update_mdp_by_offset(scratch, | |
1934 in_bytes(MultiBranchData::relative_displacement_offset()), | |
1935 scratch2); | |
1936 | |
1937 bind (profile_continue); | |
1938 } | |
1939 } | |
1940 | |
1941 // add a InterpMonitorElem to stack (see frame_sparc.hpp) | |
1942 | |
1943 void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty, | |
1944 Register Rtemp, | |
1945 Register Rtemp2 ) { | |
1946 | |
1947 Register Rlimit = Lmonitors; | |
1948 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; | |
1949 assert( (delta & LongAlignmentMask) == 0, | |
1950 "sizeof BasicObjectLock must be even number of doublewords"); | |
1951 | |
1952 sub( SP, delta, SP); | |
1953 sub( Lesp, delta, Lesp); | |
1954 sub( Lmonitors, delta, Lmonitors); | |
1955 | |
1956 if (!stack_is_empty) { | |
1957 | |
1958 // must copy stack contents down | |
1959 | |
1960 Label start_copying, next; | |
1961 | |
1962 // untested("monitor stack expansion"); | |
1963 compute_stack_base(Rtemp); | |
1964 ba( false, start_copying ); | |
1965 delayed()->cmp( Rtemp, Rlimit); // done? duplicated below | |
1966 | |
1967 // note: must copy from low memory upwards | |
1968 // On entry to loop, | |
1969 // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS) | |
1970 // Loop mutates Rtemp | |
1971 | |
1972 bind( next); | |
1973 | |
1974 st_ptr(Rtemp2, Rtemp, 0); | |
1975 inc(Rtemp, wordSize); | |
1976 cmp(Rtemp, Rlimit); // are we done? (duplicated above) | |
1977 | |
1978 bind( start_copying ); | |
1979 | |
1980 brx( notEqual, true, pn, next ); | |
1981 delayed()->ld_ptr( Rtemp, delta, Rtemp2 ); | |
1982 | |
1983 // done copying stack | |
1984 } | |
1985 } | |
1986 | |
1987 // Locals | |
1988 #ifdef ASSERT | |
1989 void InterpreterMacroAssembler::verify_local_tag(frame::Tag t, | |
1990 Register base, | |
1991 Register scratch, | |
1992 int n) { | |
1993 if (TaggedStackInterpreter) { | |
1994 Label ok, long_ok; | |
1995 // Use dst for scratch | |
1996 assert_different_registers(base, scratch); | |
1997 ld_ptr(base, Interpreter::local_tag_offset_in_bytes(n), scratch); | |
1998 if (t == frame::TagCategory2) { | |
1999 cmp(scratch, G0); | |
2000 brx(Assembler::equal, false, Assembler::pt, long_ok); | |
2001 delayed()->ld_ptr(base, Interpreter::local_tag_offset_in_bytes(n+1), scratch); | |
2002 stop("local long/double tag value bad"); | |
2003 bind(long_ok); | |
2004 // compare second half tag | |
2005 cmp(scratch, G0); | |
2006 } else if (t == frame::TagValue) { | |
2007 cmp(scratch, G0); | |
2008 } else { | |
2009 assert_different_registers(O3, base, scratch); | |
2010 mov(t, O3); | |
2011 cmp(scratch, O3); | |
2012 } | |
2013 brx(Assembler::equal, false, Assembler::pt, ok); | |
2014 delayed()->nop(); | |
2015 // Also compare if the local value is zero, then the tag might | |
2016 // not have been set coming from deopt. | |
2017 ld_ptr(base, Interpreter::local_offset_in_bytes(n), scratch); | |
2018 cmp(scratch, G0); | |
2019 brx(Assembler::equal, false, Assembler::pt, ok); | |
2020 delayed()->nop(); | |
2021 stop("Local tag value is bad"); | |
2022 bind(ok); | |
2023 } | |
2024 } | |
2025 #endif // ASSERT | |
2026 | |
2027 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) { | |
2028 assert_not_delayed(); | |
2029 sll(index, Interpreter::logStackElementSize(), index); | |
2030 sub(Llocals, index, index); | |
2031 debug_only(verify_local_tag(frame::TagReference, index, dst)); | |
2032 ld_ptr(index, Interpreter::value_offset_in_bytes(), dst); | |
2033 // Note: index must hold the effective address--the iinc template uses it | |
2034 } | |
2035 | |
2036 // Just like access_local_ptr but the tag is a returnAddress | |
2037 void InterpreterMacroAssembler::access_local_returnAddress(Register index, | |
2038 Register dst ) { | |
2039 assert_not_delayed(); | |
2040 sll(index, Interpreter::logStackElementSize(), index); | |
2041 sub(Llocals, index, index); | |
2042 debug_only(verify_local_tag(frame::TagValue, index, dst)); | |
2043 ld_ptr(index, Interpreter::value_offset_in_bytes(), dst); | |
2044 } | |
2045 | |
2046 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) { | |
2047 assert_not_delayed(); | |
2048 sll(index, Interpreter::logStackElementSize(), index); | |
2049 sub(Llocals, index, index); | |
2050 debug_only(verify_local_tag(frame::TagValue, index, dst)); | |
2051 ld(index, Interpreter::value_offset_in_bytes(), dst); | |
2052 // Note: index must hold the effective address--the iinc template uses it | |
2053 } | |
2054 | |
2055 | |
2056 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) { | |
2057 assert_not_delayed(); | |
2058 sll(index, Interpreter::logStackElementSize(), index); | |
2059 sub(Llocals, index, index); | |
2060 debug_only(verify_local_tag(frame::TagCategory2, index, dst)); | |
2061 // First half stored at index n+1 (which grows down from Llocals[n]) | |
2062 load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst); | |
2063 } | |
2064 | |
2065 | |
2066 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) { | |
2067 assert_not_delayed(); | |
2068 sll(index, Interpreter::logStackElementSize(), index); | |
2069 sub(Llocals, index, index); | |
2070 debug_only(verify_local_tag(frame::TagValue, index, G1_scratch)); | |
2071 ldf(FloatRegisterImpl::S, index, Interpreter::value_offset_in_bytes(), dst); | |
2072 } | |
2073 | |
2074 | |
2075 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) { | |
2076 assert_not_delayed(); | |
2077 sll(index, Interpreter::logStackElementSize(), index); | |
2078 sub(Llocals, index, index); | |
2079 debug_only(verify_local_tag(frame::TagCategory2, index, G1_scratch)); | |
2080 load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst); | |
2081 } | |
2082 | |
2083 | |
2084 #ifdef ASSERT | |
2085 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) { | |
2086 Label L; | |
2087 | |
2088 assert(Rindex != Rscratch, "Registers cannot be same"); | |
2089 assert(Rindex != Rscratch1, "Registers cannot be same"); | |
2090 assert(Rlimit != Rscratch, "Registers cannot be same"); | |
2091 assert(Rlimit != Rscratch1, "Registers cannot be same"); | |
2092 assert(Rscratch1 != Rscratch, "Registers cannot be same"); | |
2093 | |
2094 // untested("reg area corruption"); | |
2095 add(Rindex, offset, Rscratch); | |
2096 add(Rlimit, 64 + STACK_BIAS, Rscratch1); | |
2097 cmp(Rscratch, Rscratch1); | |
2098 brx(Assembler::greaterEqualUnsigned, false, pn, L); | |
2099 delayed()->nop(); | |
2100 stop("regsave area is being clobbered"); | |
2101 bind(L); | |
2102 } | |
2103 #endif // ASSERT | |
2104 | |
2105 void InterpreterMacroAssembler::tag_local(frame::Tag t, | |
2106 Register base, | |
2107 Register src, | |
2108 int n) { | |
2109 if (TaggedStackInterpreter) { | |
2110 // have to store zero because local slots can be reused (rats!) | |
2111 if (t == frame::TagValue) { | |
2112 st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n)); | |
2113 } else if (t == frame::TagCategory2) { | |
2114 st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n)); | |
2115 st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n+1)); | |
2116 } else { | |
2117 // assert that we don't stomp the value in 'src' | |
2118 // O3 is arbitrary because it's not used. | |
2119 assert_different_registers(src, base, O3); | |
2120 mov( t, O3); | |
2121 st_ptr(O3, base, Interpreter::local_tag_offset_in_bytes(n)); | |
2122 } | |
2123 } | |
2124 } | |
2125 | |
2126 | |
2127 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) { | |
2128 assert_not_delayed(); | |
2129 sll(index, Interpreter::logStackElementSize(), index); | |
2130 sub(Llocals, index, index); | |
2131 debug_only(check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch);) | |
2132 tag_local(frame::TagValue, index, src); | |
2133 st(src, index, Interpreter::value_offset_in_bytes()); | |
2134 } | |
2135 | |
2136 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src, | |
2137 Register tag ) { | |
2138 assert_not_delayed(); | |
2139 sll(index, Interpreter::logStackElementSize(), index); | |
2140 sub(Llocals, index, index); | |
2141 #ifdef ASSERT | |
2142 check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch); | |
2143 #endif | |
2144 st_ptr(src, index, Interpreter::value_offset_in_bytes()); | |
2145 // Store tag register directly | |
2146 if (TaggedStackInterpreter) { | |
2147 st_ptr(tag, index, Interpreter::tag_offset_in_bytes()); | |
2148 } | |
2149 } | |
2150 | |
2151 | |
2152 | |
2153 void InterpreterMacroAssembler::store_local_ptr( int n, Register src, | |
2154 Register tag ) { | |
2155 st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n)); | |
2156 if (TaggedStackInterpreter) { | |
2157 st_ptr(tag, Llocals, Interpreter::local_tag_offset_in_bytes(n)); | |
2158 } | |
2159 } | |
2160 | |
2161 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) { | |
2162 assert_not_delayed(); | |
2163 sll(index, Interpreter::logStackElementSize(), index); | |
2164 sub(Llocals, index, index); | |
2165 #ifdef ASSERT | |
2166 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); | |
2167 #endif | |
2168 tag_local(frame::TagCategory2, index, src); | |
2169 store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1 | |
2170 } | |
2171 | |
2172 | |
2173 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) { | |
2174 assert_not_delayed(); | |
2175 sll(index, Interpreter::logStackElementSize(), index); | |
2176 sub(Llocals, index, index); | |
2177 #ifdef ASSERT | |
2178 check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch); | |
2179 #endif | |
2180 tag_local(frame::TagValue, index, G1_scratch); | |
2181 stf(FloatRegisterImpl::S, src, index, Interpreter::value_offset_in_bytes()); | |
2182 } | |
2183 | |
2184 | |
2185 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) { | |
2186 assert_not_delayed(); | |
2187 sll(index, Interpreter::logStackElementSize(), index); | |
2188 sub(Llocals, index, index); | |
2189 #ifdef ASSERT | |
2190 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); | |
2191 #endif | |
2192 tag_local(frame::TagCategory2, index, G1_scratch); | |
2193 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1)); | |
2194 } | |
2195 | |
2196 | |
2197 int InterpreterMacroAssembler::top_most_monitor_byte_offset() { | |
2198 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; | |
2199 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong); | |
2200 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS; | |
2201 } | |
2202 | |
2203 | |
2204 Address InterpreterMacroAssembler::top_most_monitor() { | |
727 | 2205 return Address(FP, top_most_monitor_byte_offset()); |
0 | 2206 } |
2207 | |
2208 | |
2209 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) { | |
2210 add( Lesp, wordSize, Rdest ); | |
2211 } | |
2212 | |
2213 #endif /* CC_INTERP */ | |
2214 | |
2215 void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) { | |
2216 assert(UseCompiler, "incrementing must be useful"); | |
2217 #ifdef CC_INTERP | |
727 | 2218 Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() + |
2219 InvocationCounter::counter_offset()); | |
2220 Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() + | |
2221 InvocationCounter::counter_offset()); | |
0 | 2222 #else |
727 | 2223 Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() + |
2224 InvocationCounter::counter_offset()); | |
2225 Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() + | |
2226 InvocationCounter::counter_offset()); | |
0 | 2227 #endif /* CC_INTERP */ |
2228 int delta = InvocationCounter::count_increment; | |
2229 | |
2230 // Load each counter in a register | |
2231 ld( inv_counter, Rtmp ); | |
2232 ld( be_counter, Rtmp2 ); | |
2233 | |
2234 assert( is_simm13( delta ), " delta too large."); | |
2235 | |
2236 // Add the delta to the invocation counter and store the result | |
2237 add( Rtmp, delta, Rtmp ); | |
2238 | |
2239 // Mask the backedge counter | |
2240 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); | |
2241 | |
2242 // Store value | |
2243 st( Rtmp, inv_counter); | |
2244 | |
2245 // Add invocation counter + backedge counter | |
2246 add( Rtmp, Rtmp2, Rtmp); | |
2247 | |
2248 // Note that this macro must leave the backedge_count + invocation_count in Rtmp! | |
2249 } | |
2250 | |
2251 void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) { | |
2252 assert(UseCompiler, "incrementing must be useful"); | |
2253 #ifdef CC_INTERP | |
727 | 2254 Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() + |
2255 InvocationCounter::counter_offset()); | |
2256 Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() + | |
2257 InvocationCounter::counter_offset()); | |
0 | 2258 #else |
727 | 2259 Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() + |
2260 InvocationCounter::counter_offset()); | |
2261 Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() + | |
2262 InvocationCounter::counter_offset()); | |
0 | 2263 #endif /* CC_INTERP */ |
2264 int delta = InvocationCounter::count_increment; | |
2265 // Load each counter in a register | |
2266 ld( be_counter, Rtmp ); | |
2267 ld( inv_counter, Rtmp2 ); | |
2268 | |
2269 // Add the delta to the backedge counter | |
2270 add( Rtmp, delta, Rtmp ); | |
2271 | |
2272 // Mask the invocation counter, add to backedge counter | |
2273 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); | |
2274 | |
2275 // and store the result to memory | |
2276 st( Rtmp, be_counter ); | |
2277 | |
2278 // Add backedge + invocation counter | |
2279 add( Rtmp, Rtmp2, Rtmp ); | |
2280 | |
2281 // Note that this macro must leave backedge_count + invocation_count in Rtmp! | |
2282 } | |
2283 | |
2284 #ifndef CC_INTERP | |
2285 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count, | |
2286 Register branch_bcp, | |
2287 Register Rtmp ) { | |
2288 Label did_not_overflow; | |
2289 Label overflow_with_error; | |
2290 assert_different_registers(backedge_count, Rtmp, branch_bcp); | |
2291 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); | |
2292 | |
727 | 2293 AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit); |
0 | 2294 load_contents(limit, Rtmp); |
2295 cmp(backedge_count, Rtmp); | |
2296 br(Assembler::lessUnsigned, false, Assembler::pt, did_not_overflow); | |
2297 delayed()->nop(); | |
2298 | |
2299 // When ProfileInterpreter is on, the backedge_count comes from the | |
2300 // methodDataOop, which value does not get reset on the call to | |
2301 // frequency_counter_overflow(). To avoid excessive calls to the overflow | |
2302 // routine while the method is being compiled, add a second test to make sure | |
2303 // the overflow function is called only once every overflow_frequency. | |
2304 if (ProfileInterpreter) { | |
2305 const int overflow_frequency = 1024; | |
2306 andcc(backedge_count, overflow_frequency-1, Rtmp); | |
2307 brx(Assembler::notZero, false, Assembler::pt, did_not_overflow); | |
2308 delayed()->nop(); | |
2309 } | |
2310 | |
2311 // overflow in loop, pass branch bytecode | |
2312 set(6,Rtmp); | |
2313 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp); | |
2314 | |
2315 // Was an OSR adapter generated? | |
2316 // O0 = osr nmethod | |
2317 tst(O0); | |
2318 brx(Assembler::zero, false, Assembler::pn, overflow_with_error); | |
2319 delayed()->nop(); | |
2320 | |
2321 // Has the nmethod been invalidated already? | |
2322 ld(O0, nmethod::entry_bci_offset(), O2); | |
2323 cmp(O2, InvalidOSREntryBci); | |
2324 br(Assembler::equal, false, Assembler::pn, overflow_with_error); | |
2325 delayed()->nop(); | |
2326 | |
2327 // migrate the interpreter frame off of the stack | |
2328 | |
2329 mov(G2_thread, L7); | |
2330 // save nmethod | |
2331 mov(O0, L6); | |
2332 set_last_Java_frame(SP, noreg); | |
2333 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); | |
2334 reset_last_Java_frame(); | |
2335 mov(L7, G2_thread); | |
2336 | |
2337 // move OSR nmethod to I1 | |
2338 mov(L6, I1); | |
2339 | |
2340 // OSR buffer to I0 | |
2341 mov(O0, I0); | |
2342 | |
2343 // remove the interpreter frame | |
2344 restore(I5_savedSP, 0, SP); | |
2345 | |
2346 // Jump to the osr code. | |
2347 ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); | |
2348 jmp(O2, G0); | |
2349 delayed()->nop(); | |
2350 | |
2351 bind(overflow_with_error); | |
2352 | |
2353 bind(did_not_overflow); | |
2354 } | |
2355 | |
2356 | |
2357 | |
2358 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) { | |
2359 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); } | |
2360 } | |
2361 | |
2362 | |
2363 // local helper function for the verify_oop_or_return_address macro | |
2364 static bool verify_return_address(methodOopDesc* m, int bci) { | |
2365 #ifndef PRODUCT | |
2366 address pc = (address)(m->constMethod()) | |
2367 + in_bytes(constMethodOopDesc::codes_offset()) + bci; | |
2368 // assume it is a valid return address if it is inside m and is preceded by a jsr | |
2369 if (!m->contains(pc)) return false; | |
2370 address jsr_pc; | |
2371 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr); | |
2372 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true; | |
2373 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w); | |
2374 if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true; | |
2375 #endif // PRODUCT | |
2376 return false; | |
2377 } | |
2378 | |
2379 | |
2380 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) { | |
2381 if (!VerifyOops) return; | |
2382 // the VM documentation for the astore[_wide] bytecode allows | |
2383 // the TOS to be not only an oop but also a return address | |
2384 Label test; | |
2385 Label skip; | |
2386 // See if it is an address (in the current method): | |
2387 | |
2388 mov(reg, Rtmp); | |
2389 const int log2_bytecode_size_limit = 16; | |
2390 srl(Rtmp, log2_bytecode_size_limit, Rtmp); | |
2391 br_notnull( Rtmp, false, pt, test ); | |
2392 delayed()->nop(); | |
2393 | |
2394 // %%% should use call_VM_leaf here? | |
2395 save_frame_and_mov(0, Lmethod, O0, reg, O1); | |
2396 save_thread(L7_thread_cache); | |
2397 call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none); | |
2398 delayed()->nop(); | |
2399 restore_thread(L7_thread_cache); | |
2400 br_notnull( O0, false, pt, skip ); | |
2401 delayed()->restore(); | |
2402 | |
2403 // Perform a more elaborate out-of-line call | |
2404 // Not an address; verify it: | |
2405 bind(test); | |
2406 verify_oop(reg); | |
2407 bind(skip); | |
2408 } | |
2409 | |
2410 | |
2411 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { | |
2412 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth); | |
2413 } | |
2414 #endif /* CC_INTERP */ | |
2415 | |
2416 // Inline assembly for: | |
2417 // | |
2418 // if (thread is in interp_only_mode) { | |
2419 // InterpreterRuntime::post_method_entry(); | |
2420 // } | |
2421 // if (DTraceMethodProbes) { | |
605 | 2422 // SharedRuntime::dtrace_method_entry(method, receiver); |
0 | 2423 // } |
610
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2424 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2425 // SharedRuntime::rc_trace_method_entry(method, receiver); |
0 | 2426 // } |
2427 | |
2428 void InterpreterMacroAssembler::notify_method_entry() { | |
2429 | |
2430 // C++ interpreter only uses this for native methods. | |
2431 | |
2432 // Whenever JVMTI puts a thread in interp_only_mode, method | |
2433 // entry/exit events are sent for that thread to track stack | |
2434 // depth. If it is possible to enter interp_only_mode we add | |
2435 // the code to check if the event should be sent. | |
2436 if (JvmtiExport::can_post_interpreter_events()) { | |
2437 Label L; | |
2438 Register temp_reg = O5; | |
727 | 2439 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
0 | 2440 ld(interp_only, temp_reg); |
2441 tst(temp_reg); | |
2442 br(zero, false, pt, L); | |
2443 delayed()->nop(); | |
2444 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry)); | |
2445 bind(L); | |
2446 } | |
2447 | |
2448 { | |
2449 Register temp_reg = O5; | |
2450 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); | |
2451 call_VM_leaf(noreg, | |
2452 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), | |
2453 G2_thread, Lmethod); | |
2454 } | |
610
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2455 |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2456 // RedefineClasses() tracing support for obsolete method entry |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2457 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2458 call_VM_leaf(noreg, |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2459 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2460 G2_thread, Lmethod); |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2461 } |
0 | 2462 } |
2463 | |
2464 | |
2465 // Inline assembly for: | |
2466 // | |
2467 // if (thread is in interp_only_mode) { | |
2468 // // save result | |
2469 // InterpreterRuntime::post_method_exit(); | |
2470 // // restore result | |
2471 // } | |
2472 // if (DTraceMethodProbes) { | |
2473 // SharedRuntime::dtrace_method_exit(thread, method); | |
2474 // } | |
2475 // | |
2476 // Native methods have their result stored in d_tmp and l_tmp | |
2477 // Java methods have their result stored in the expression stack | |
2478 | |
2479 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, | |
2480 TosState state, | |
2481 NotifyMethodExitMode mode) { | |
2482 // C++ interpreter only uses this for native methods. | |
2483 | |
2484 // Whenever JVMTI puts a thread in interp_only_mode, method | |
2485 // entry/exit events are sent for that thread to track stack | |
2486 // depth. If it is possible to enter interp_only_mode we add | |
2487 // the code to check if the event should be sent. | |
2488 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { | |
2489 Label L; | |
2490 Register temp_reg = O5; | |
727 | 2491 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
0 | 2492 ld(interp_only, temp_reg); |
2493 tst(temp_reg); | |
2494 br(zero, false, pt, L); | |
2495 delayed()->nop(); | |
2496 | |
2497 // Note: frame::interpreter_frame_result has a dependency on how the | |
2498 // method result is saved across the call to post_method_exit. For | |
2499 // native methods it assumes the result registers are saved to | |
2500 // l_scratch and d_scratch. If this changes then the interpreter_frame_result | |
2501 // implementation will need to be updated too. | |
2502 | |
2503 save_return_value(state, is_native_method); | |
2504 call_VM(noreg, | |
2505 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); | |
2506 restore_return_value(state, is_native_method); | |
2507 bind(L); | |
2508 } | |
2509 | |
2510 { | |
2511 Register temp_reg = O5; | |
2512 // Dtrace notification | |
2513 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); | |
2514 save_return_value(state, is_native_method); | |
2515 call_VM_leaf( | |
2516 noreg, | |
2517 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), | |
2518 G2_thread, Lmethod); | |
2519 restore_return_value(state, is_native_method); | |
2520 } | |
2521 } | |
2522 | |
2523 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) { | |
2524 #ifdef CC_INTERP | |
2525 // result potentially in O0/O1: save it across calls | |
2526 stf(FloatRegisterImpl::D, F0, STATE(_native_fresult)); | |
2527 #ifdef _LP64 | |
2528 stx(O0, STATE(_native_lresult)); | |
2529 #else | |
2530 std(O0, STATE(_native_lresult)); | |
2531 #endif | |
2532 #else // CC_INTERP | |
2533 if (is_native_call) { | |
2534 stf(FloatRegisterImpl::D, F0, d_tmp); | |
2535 #ifdef _LP64 | |
2536 stx(O0, l_tmp); | |
2537 #else | |
2538 std(O0, l_tmp); | |
2539 #endif | |
2540 } else { | |
2541 push(state); | |
2542 } | |
2543 #endif // CC_INTERP | |
2544 } | |
2545 | |
2546 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) { | |
2547 #ifdef CC_INTERP | |
2548 ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0); | |
2549 #ifdef _LP64 | |
2550 ldx(STATE(_native_lresult), O0); | |
2551 #else | |
2552 ldd(STATE(_native_lresult), O0); | |
2553 #endif | |
2554 #else // CC_INTERP | |
2555 if (is_native_call) { | |
2556 ldf(FloatRegisterImpl::D, d_tmp, F0); | |
2557 #ifdef _LP64 | |
2558 ldx(l_tmp, O0); | |
2559 #else | |
2560 ldd(l_tmp, O0); | |
2561 #endif | |
2562 } else { | |
2563 pop(state); | |
2564 } | |
2565 #endif // CC_INTERP | |
2566 } |