Mercurial > hg > truffle
annotate src/cpu/sparc/vm/interp_masm_sparc.cpp @ 20495:58925d1f325e
8057722: G1: Code root hashtable updated incorrectly when evacuation failed
Reviewed-by: brutisso, jwilhelm
author | mgerdin |
---|---|
date | Mon, 08 Sep 2014 17:47:43 +0200 |
parents | ce9fd31ffd14 |
children | 52b4284cb496 |
rev | line source |
---|---|
0 | 1 /* |
10105
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1506
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1506
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1506
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "interp_masm_sparc.hpp" | |
27 #include "interpreter/interpreter.hpp" | |
28 #include "interpreter/interpreterRuntime.hpp" | |
29 #include "oops/arrayOop.hpp" | |
30 #include "oops/markOop.hpp" | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
31 #include "oops/methodData.hpp" |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
32 #include "oops/method.hpp" |
10105
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
33 #include "oops/methodCounters.hpp" |
1972 | 34 #include "prims/jvmtiExport.hpp" |
35 #include "prims/jvmtiRedefineClassesTrace.hpp" | |
36 #include "prims/jvmtiThreadState.hpp" | |
37 #include "runtime/basicLock.hpp" | |
38 #include "runtime/biasedLocking.hpp" | |
39 #include "runtime/sharedRuntime.hpp" | |
7180
f34d701e952e
8003935: Simplify the needed includes for using Thread::current()
stefank
parents:
6805
diff
changeset
|
40 #include "runtime/thread.inline.hpp" |
0 | 41 |
42 #ifndef CC_INTERP | |
43 #ifndef FAST_DISPATCH | |
44 #define FAST_DISPATCH 1 | |
45 #endif | |
46 #undef FAST_DISPATCH | |
47 | |
48 // Implementation of InterpreterMacroAssembler | |
49 | |
50 // This file specializes the assember with interpreter-specific macros | |
51 | |
727 | 52 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS); |
53 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS); | |
0 | 54 |
55 #else // CC_INTERP | |
56 #ifndef STATE | |
57 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name)) | |
58 #endif // STATE | |
59 | |
60 #endif // CC_INTERP | |
61 | |
62 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) { | |
63 // Note: this algorithm is also used by C1's OSR entry sequence. | |
64 // Any changes should also be applied to CodeEmitter::emit_osr_entry(). | |
65 assert_different_registers(args_size, locals_size); | |
66 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted. | |
67 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words | |
68 // Use br/mov combination because it works on both V8 and V9 and is | |
69 // faster. | |
70 Label skip_move; | |
71 br(Assembler::negative, true, Assembler::pt, skip_move); | |
72 delayed()->mov(G0, delta); | |
73 bind(skip_move); | |
74 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned) | |
75 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes | |
76 } | |
77 | |
78 #ifndef CC_INTERP | |
79 | |
80 // Dispatch code executed in the prolog of a bytecode which does not do it's | |
81 // own dispatch. The dispatch address is computed and placed in IdispatchAddress | |
82 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) { | |
83 assert_not_delayed(); | |
84 #ifdef FAST_DISPATCH | |
85 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since | |
86 // they both use I2. | |
87 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); | |
88 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode | |
89 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); | |
90 // add offset to correct dispatch table | |
91 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize | |
92 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr | |
93 #else | |
727 | 94 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode |
0 | 95 // dispatch table to use |
727 | 96 AddressLiteral tbl(Interpreter::dispatch_table(state)); |
97 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize | |
98 set(tbl, G3_scratch); // compute addr of table | |
99 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr | |
0 | 100 #endif |
101 } | |
102 | |
103 | |
104 // Dispatch code executed in the epilog of a bytecode which does not do it's | |
105 // own dispatch. The dispatch address in IdispatchAddress is used for the | |
106 // dispatch. | |
107 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) { | |
108 assert_not_delayed(); | |
109 verify_FPU(1, state); | |
110 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
111 jmp( IdispatchAddress, 0 ); | |
112 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); | |
113 else delayed()->nop(); | |
114 } | |
115 | |
116 | |
117 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) { | |
118 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) | |
119 assert_not_delayed(); | |
120 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode | |
121 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr); | |
122 } | |
123 | |
124 | |
125 void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) { | |
126 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) | |
127 assert_not_delayed(); | |
128 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode | |
129 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false); | |
130 } | |
131 | |
132 | |
133 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { | |
134 // load current bytecode | |
135 assert_not_delayed(); | |
136 ldub( Lbcp, 0, Lbyte_code); // load next bytecode | |
137 dispatch_base(state, table); | |
138 } | |
139 | |
140 | |
141 void InterpreterMacroAssembler::call_VM_leaf_base( | |
142 Register java_thread, | |
143 address entry_point, | |
144 int number_of_arguments | |
145 ) { | |
146 if (!java_thread->is_valid()) | |
147 java_thread = L7_thread_cache; | |
148 // super call | |
149 MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments); | |
150 } | |
151 | |
152 | |
153 void InterpreterMacroAssembler::call_VM_base( | |
154 Register oop_result, | |
155 Register java_thread, | |
156 Register last_java_sp, | |
157 address entry_point, | |
158 int number_of_arguments, | |
159 bool check_exception | |
160 ) { | |
161 if (!java_thread->is_valid()) | |
162 java_thread = L7_thread_cache; | |
163 // See class ThreadInVMfromInterpreter, which assumes that the interpreter | |
164 // takes responsibility for setting its own thread-state on call-out. | |
165 // However, ThreadInVMfromInterpreter resets the state to "in_Java". | |
166 | |
167 //save_bcp(); // save bcp | |
168 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception); | |
169 //restore_bcp(); // restore bcp | |
170 //restore_locals(); // restore locals pointer | |
171 } | |
172 | |
173 | |
174 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) { | |
175 if (JvmtiExport::can_pop_frame()) { | |
176 Label L; | |
177 | |
178 // Check the "pending popframe condition" flag in the current thread | |
727 | 179 ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg); |
0 | 180 |
181 // Initiate popframe handling only if it is not already being processed. If the flag | |
182 // has the popframe_processing bit set, it means that this code is called *during* popframe | |
183 // handling - we don't want to reenter. | |
184 btst(JavaThread::popframe_pending_bit, scratch_reg); | |
185 br(zero, false, pt, L); | |
186 delayed()->nop(); | |
187 btst(JavaThread::popframe_processing_bit, scratch_reg); | |
188 br(notZero, false, pt, L); | |
189 delayed()->nop(); | |
190 | |
191 // Call Interpreter::remove_activation_preserving_args_entry() to get the | |
192 // address of the same-named entrypoint in the generated interpreter code. | |
193 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); | |
194 | |
195 // Jump to Interpreter::_remove_activation_preserving_args_entry | |
196 jmpl(O0, G0, G0); | |
197 delayed()->nop(); | |
198 bind(L); | |
199 } | |
200 } | |
201 | |
202 | |
203 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { | |
204 Register thr_state = G4_scratch; | |
727 | 205 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); |
206 const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset()); | |
207 const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset()); | |
208 const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset()); | |
0 | 209 switch (state) { |
210 case ltos: ld_long(val_addr, Otos_l); break; | |
211 case atos: ld_ptr(oop_addr, Otos_l); | |
212 st_ptr(G0, oop_addr); break; | |
213 case btos: // fall through | |
214 case ctos: // fall through | |
215 case stos: // fall through | |
216 case itos: ld(val_addr, Otos_l1); break; | |
217 case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break; | |
218 case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break; | |
219 case vtos: /* nothing to do */ break; | |
220 default : ShouldNotReachHere(); | |
221 } | |
222 // Clean up tos value in the jvmti thread state | |
223 or3(G0, ilgl, G3_scratch); | |
224 stw(G3_scratch, tos_addr); | |
225 st_long(G0, val_addr); | |
226 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
227 } | |
228 | |
229 | |
230 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) { | |
231 if (JvmtiExport::can_force_early_return()) { | |
232 Label L; | |
233 Register thr_state = G3_scratch; | |
727 | 234 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); |
3839 | 235 br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; |
0 | 236 |
237 // Initiate earlyret handling only if it is not already being processed. | |
238 // If the flag has the earlyret_processing bit set, it means that this code | |
239 // is called *during* earlyret handling - we don't want to reenter. | |
727 | 240 ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch); |
3839 | 241 cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L); |
0 | 242 |
243 // Call Interpreter::remove_activation_early_entry() to get the address of the | |
244 // same-named entrypoint in the generated interpreter code | |
727 | 245 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1); |
0 | 246 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1); |
247 | |
248 // Jump to Interpreter::_remove_activation_early_entry | |
249 jmpl(O0, G0, G0); | |
250 delayed()->nop(); | |
251 bind(L); | |
252 } | |
253 } | |
254 | |
255 | |
1295 | 256 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { |
0 | 257 mov(arg_1, O0); |
1295 | 258 mov(arg_2, O1); |
259 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2); | |
0 | 260 } |
261 #endif /* CC_INTERP */ | |
262 | |
263 | |
264 #ifndef CC_INTERP | |
265 | |
266 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) { | |
267 assert_not_delayed(); | |
268 dispatch_Lbyte_code(state, table); | |
269 } | |
270 | |
271 | |
272 void InterpreterMacroAssembler::dispatch_normal(TosState state) { | |
273 dispatch_base(state, Interpreter::normal_table(state)); | |
274 } | |
275 | |
276 | |
277 void InterpreterMacroAssembler::dispatch_only(TosState state) { | |
278 dispatch_base(state, Interpreter::dispatch_table(state)); | |
279 } | |
280 | |
281 | |
282 // common code to dispatch and dispatch_only | |
283 // dispatch value in Lbyte_code and increment Lbcp | |
284 | |
285 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) { | |
286 verify_FPU(1, state); | |
287 // %%%%% maybe implement +VerifyActivationFrameSize here | |
288 //verify_thread(); //too slow; we will just verify on method entry & exit | |
289 if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
290 #ifdef FAST_DISPATCH | |
291 if (table == Interpreter::dispatch_table(state)) { | |
292 // use IdispatchTables | |
293 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); | |
294 // add offset to correct dispatch table | |
295 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize | |
296 ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr | |
297 } else { | |
298 #endif | |
299 // dispatch table to use | |
727 | 300 AddressLiteral tbl(table); |
0 | 301 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize |
727 | 302 set(tbl, G3_scratch); // compute addr of table |
0 | 303 ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr |
304 #ifdef FAST_DISPATCH | |
305 } | |
306 #endif | |
307 jmp( G3_scratch, 0 ); | |
308 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); | |
309 else delayed()->nop(); | |
310 } | |
311 | |
312 | |
313 // Helpers for expression stack | |
314 | |
315 // Longs and doubles are Category 2 computational types in the | |
316 // JVM specification (section 3.11.1) and take 2 expression stack or | |
317 // local slots. | |
318 // Aligning them on 32 bit with tagged stacks is hard because the code generated | |
319 // for the dup* bytecodes depends on what types are already on the stack. | |
320 // If the types are split into the two stack/local slots, that is much easier | |
321 // (and we can use 0 for non-reference tags). | |
322 | |
323 // Known good alignment in _LP64 but unknown otherwise | |
324 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) { | |
325 assert_not_delayed(); | |
326 | |
327 #ifdef _LP64 | |
328 ldf(FloatRegisterImpl::D, r1, offset, d); | |
329 #else | |
330 ldf(FloatRegisterImpl::S, r1, offset, d); | |
1506 | 331 ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor()); |
0 | 332 #endif |
333 } | |
334 | |
335 // Known good alignment in _LP64 but unknown otherwise | |
336 void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) { | |
337 assert_not_delayed(); | |
338 | |
339 #ifdef _LP64 | |
340 stf(FloatRegisterImpl::D, d, r1, offset); | |
341 // store something more useful here | |
1506 | 342 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) |
0 | 343 #else |
344 stf(FloatRegisterImpl::S, d, r1, offset); | |
1506 | 345 stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize); |
0 | 346 #endif |
347 } | |
348 | |
349 | |
350 // Known good alignment in _LP64 but unknown otherwise | |
351 void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) { | |
352 assert_not_delayed(); | |
353 #ifdef _LP64 | |
354 ldx(r1, offset, rd); | |
355 #else | |
356 ld(r1, offset, rd); | |
1506 | 357 ld(r1, offset + Interpreter::stackElementSize, rd->successor()); |
0 | 358 #endif |
359 } | |
360 | |
361 // Known good alignment in _LP64 but unknown otherwise | |
362 void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) { | |
363 assert_not_delayed(); | |
364 | |
365 #ifdef _LP64 | |
366 stx(l, r1, offset); | |
367 // store something more useful here | |
1506 | 368 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) |
0 | 369 #else |
370 st(l, r1, offset); | |
1506 | 371 st(l->successor(), r1, offset + Interpreter::stackElementSize); |
0 | 372 #endif |
373 } | |
374 | |
375 void InterpreterMacroAssembler::pop_i(Register r) { | |
376 assert_not_delayed(); | |
377 ld(Lesp, Interpreter::expr_offset_in_bytes(0), r); | |
1506 | 378 inc(Lesp, Interpreter::stackElementSize); |
0 | 379 debug_only(verify_esp(Lesp)); |
380 } | |
381 | |
382 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) { | |
383 assert_not_delayed(); | |
384 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r); | |
1506 | 385 inc(Lesp, Interpreter::stackElementSize); |
0 | 386 debug_only(verify_esp(Lesp)); |
387 } | |
388 | |
389 void InterpreterMacroAssembler::pop_l(Register r) { | |
390 assert_not_delayed(); | |
391 load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r); | |
1506 | 392 inc(Lesp, 2*Interpreter::stackElementSize); |
0 | 393 debug_only(verify_esp(Lesp)); |
394 } | |
395 | |
396 | |
397 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) { | |
398 assert_not_delayed(); | |
399 ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f); | |
1506 | 400 inc(Lesp, Interpreter::stackElementSize); |
0 | 401 debug_only(verify_esp(Lesp)); |
402 } | |
403 | |
404 | |
405 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) { | |
406 assert_not_delayed(); | |
407 load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f); | |
1506 | 408 inc(Lesp, 2*Interpreter::stackElementSize); |
0 | 409 debug_only(verify_esp(Lesp)); |
410 } | |
411 | |
412 | |
413 void InterpreterMacroAssembler::push_i(Register r) { | |
414 assert_not_delayed(); | |
415 debug_only(verify_esp(Lesp)); | |
1506 | 416 st(r, Lesp, 0); |
417 dec(Lesp, Interpreter::stackElementSize); | |
0 | 418 } |
419 | |
420 void InterpreterMacroAssembler::push_ptr(Register r) { | |
421 assert_not_delayed(); | |
1506 | 422 st_ptr(r, Lesp, 0); |
423 dec(Lesp, Interpreter::stackElementSize); | |
0 | 424 } |
425 | |
426 // remember: our convention for longs in SPARC is: | |
427 // O0 (Otos_l1) has high-order part in first word, | |
428 // O1 (Otos_l2) has low-order part in second word | |
429 | |
430 void InterpreterMacroAssembler::push_l(Register r) { | |
431 assert_not_delayed(); | |
432 debug_only(verify_esp(Lesp)); | |
1506 | 433 // Longs are stored in memory-correct order, even if unaligned. |
434 int offset = -Interpreter::stackElementSize; | |
0 | 435 store_unaligned_long(r, Lesp, offset); |
1506 | 436 dec(Lesp, 2 * Interpreter::stackElementSize); |
0 | 437 } |
438 | |
439 | |
440 void InterpreterMacroAssembler::push_f(FloatRegister f) { | |
441 assert_not_delayed(); | |
442 debug_only(verify_esp(Lesp)); | |
1506 | 443 stf(FloatRegisterImpl::S, f, Lesp, 0); |
444 dec(Lesp, Interpreter::stackElementSize); | |
0 | 445 } |
446 | |
447 | |
448 void InterpreterMacroAssembler::push_d(FloatRegister d) { | |
449 assert_not_delayed(); | |
450 debug_only(verify_esp(Lesp)); | |
1506 | 451 // Longs are stored in memory-correct order, even if unaligned. |
452 int offset = -Interpreter::stackElementSize; | |
0 | 453 store_unaligned_double(d, Lesp, offset); |
1506 | 454 dec(Lesp, 2 * Interpreter::stackElementSize); |
0 | 455 } |
456 | |
457 | |
458 void InterpreterMacroAssembler::push(TosState state) { | |
459 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
460 switch (state) { | |
461 case atos: push_ptr(); break; | |
462 case btos: push_i(); break; | |
463 case ctos: | |
464 case stos: push_i(); break; | |
465 case itos: push_i(); break; | |
466 case ltos: push_l(); break; | |
467 case ftos: push_f(); break; | |
468 case dtos: push_d(); break; | |
469 case vtos: /* nothing to do */ break; | |
470 default : ShouldNotReachHere(); | |
471 } | |
472 } | |
473 | |
474 | |
475 void InterpreterMacroAssembler::pop(TosState state) { | |
476 switch (state) { | |
477 case atos: pop_ptr(); break; | |
478 case btos: pop_i(); break; | |
479 case ctos: | |
480 case stos: pop_i(); break; | |
481 case itos: pop_i(); break; | |
482 case ltos: pop_l(); break; | |
483 case ftos: pop_f(); break; | |
484 case dtos: pop_d(); break; | |
485 case vtos: /* nothing to do */ break; | |
486 default : ShouldNotReachHere(); | |
487 } | |
488 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
489 } | |
490 | |
491 | |
1506 | 492 // Helpers for swap and dup |
493 void InterpreterMacroAssembler::load_ptr(int n, Register val) { | |
0 | 494 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val); |
495 } | |
1506 | 496 void InterpreterMacroAssembler::store_ptr(int n, Register val) { |
0 | 497 st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n)); |
498 } | |
499 | |
500 | |
501 void InterpreterMacroAssembler::load_receiver(Register param_count, | |
502 Register recv) { | |
1506 | 503 sll(param_count, Interpreter::logStackElementSize, param_count); |
6266
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6123
diff
changeset
|
504 ld_ptr(Lesp, param_count, recv); // gets receiver oop |
0 | 505 } |
506 | |
507 void InterpreterMacroAssembler::empty_expression_stack() { | |
508 // Reset Lesp. | |
509 sub( Lmonitors, wordSize, Lesp ); | |
510 | |
511 // Reset SP by subtracting more space from Lesp. | |
512 Label done; | |
727 | 513 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!"); |
0 | 514 |
515 // A native does not need to do this, since its callee does not change SP. | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
516 ld(Lmethod, Method::access_flags_offset(), Gframe_size); // Load access flags. |
0 | 517 btst(JVM_ACC_NATIVE, Gframe_size); |
518 br(Assembler::notZero, false, Assembler::pt, done); | |
519 delayed()->nop(); | |
520 | |
521 // Compute max expression stack+register save area | |
7183
b2dbd323c668
8003848: Make ConstMethod::generic_signature_index optional and move Method::_max_stack to ConstMethod.
jiangli
parents:
6805
diff
changeset
|
522 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Gframe_size); |
b2dbd323c668
8003848: Make ConstMethod::generic_signature_index optional and move Method::_max_stack to ConstMethod.
jiangli
parents:
6805
diff
changeset
|
523 lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size); // Load max stack. |
10393
603ca7e51354
8010460: Interpreter on some platforms loads ConstMethod::_max_stack and misses extra stack slots for JSR 292
roland
parents:
10138
diff
changeset
|
524 add(Gframe_size, frame::memory_parameter_word_sp_offset+Method::extra_stack_entries(), Gframe_size ); |
0 | 525 |
526 // | |
527 // now set up a stack frame with the size computed above | |
528 // | |
529 //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below | |
530 sll( Gframe_size, LogBytesPerWord, Gframe_size ); | |
531 sub( Lesp, Gframe_size, Gframe_size ); | |
532 and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary | |
533 debug_only(verify_sp(Gframe_size, G4_scratch)); | |
534 #ifdef _LP64 | |
535 sub(Gframe_size, STACK_BIAS, Gframe_size ); | |
536 #endif | |
537 mov(Gframe_size, SP); | |
538 | |
539 bind(done); | |
540 } | |
541 | |
542 | |
543 #ifdef ASSERT | |
544 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) { | |
545 Label Bad, OK; | |
546 | |
547 // Saved SP must be aligned. | |
548 #ifdef _LP64 | |
549 btst(2*BytesPerWord-1, Rsp); | |
550 #else | |
551 btst(LongAlignmentMask, Rsp); | |
552 #endif | |
553 br(Assembler::notZero, false, Assembler::pn, Bad); | |
554 delayed()->nop(); | |
555 | |
556 // Saved SP, plus register window size, must not be above FP. | |
557 add(Rsp, frame::register_save_words * wordSize, Rtemp); | |
558 #ifdef _LP64 | |
559 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP | |
560 #endif | |
3839 | 561 cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad); |
0 | 562 |
563 // Saved SP must not be ridiculously below current SP. | |
564 size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K); | |
565 set(maxstack, Rtemp); | |
566 sub(SP, Rtemp, Rtemp); | |
567 #ifdef _LP64 | |
568 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp | |
569 #endif | |
3839 | 570 cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad); |
571 | |
572 ba_short(OK); | |
0 | 573 |
574 bind(Bad); | |
575 stop("on return to interpreted call, restored SP is corrupted"); | |
576 | |
577 bind(OK); | |
578 } | |
579 | |
580 | |
581 void InterpreterMacroAssembler::verify_esp(Register Resp) { | |
582 // about to read or write Resp[0] | |
583 // make sure it is not in the monitors or the register save area | |
584 Label OK1, OK2; | |
585 | |
586 cmp(Resp, Lmonitors); | |
587 brx(Assembler::lessUnsigned, true, Assembler::pt, OK1); | |
588 delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp); | |
589 stop("too many pops: Lesp points into monitor area"); | |
590 bind(OK1); | |
591 #ifdef _LP64 | |
592 sub(Resp, STACK_BIAS, Resp); | |
593 #endif | |
594 cmp(Resp, SP); | |
595 brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2); | |
596 delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp); | |
597 stop("too many pushes: Lesp points into register window"); | |
598 bind(OK2); | |
599 } | |
600 #endif // ASSERT | |
601 | |
602 // Load compiled (i2c) or interpreter entry when calling from interpreted and | |
603 // do the call. Centralized so that all interpreter calls will do the same actions. | |
604 // If jvmti single stepping is on for a thread we must not call compiled code. | |
605 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) { | |
606 | |
607 // Assume we want to go compiled if available | |
608 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
609 ld_ptr(G5_method, in_bytes(Method::from_interpreted_offset()), target); |
0 | 610 |
611 if (JvmtiExport::can_post_interpreter_events()) { | |
612 // JVMTI events, such as single-stepping, are implemented partly by avoiding running | |
613 // compiled code in threads for which the event is enabled. Check here for | |
614 // interp_only_mode if these events CAN be enabled. | |
615 verify_thread(); | |
616 Label skip_compiled_code; | |
617 | |
727 | 618 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
0 | 619 ld(interp_only, scratch); |
3839 | 620 cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
621 delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target); |
0 | 622 bind(skip_compiled_code); |
623 } | |
624 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
625 // the i2c_adapters need Method* in G5_method (right? %%%) |
0 | 626 // do the call |
627 #ifdef ASSERT | |
628 { | |
629 Label ok; | |
3839 | 630 br_notnull_short(target, Assembler::pt, ok); |
0 | 631 stop("null entry point"); |
632 bind(ok); | |
633 } | |
634 #endif // ASSERT | |
635 | |
636 // Adjust Rret first so Llast_SP can be same as Rret | |
637 add(Rret, -frame::pc_return_offset, O7); | |
638 add(Lesp, BytesPerWord, Gargs); // setup parameter pointer | |
639 // Record SP so we can remove any stack space allocated by adapter transition | |
640 jmp(target, 0); | |
641 delayed()->mov(SP, Llast_SP); | |
642 } | |
643 | |
644 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) { | |
645 assert_not_delayed(); | |
646 | |
647 Label not_taken; | |
648 if (ptr_compare) brx(cc, false, Assembler::pn, not_taken); | |
649 else br (cc, false, Assembler::pn, not_taken); | |
650 delayed()->nop(); | |
651 | |
652 TemplateTable::branch(false,false); | |
653 | |
654 bind(not_taken); | |
655 | |
656 profile_not_taken_branch(G3_scratch); | |
657 } | |
658 | |
659 | |
660 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp( | |
661 int bcp_offset, | |
662 Register Rtmp, | |
663 Register Rdst, | |
664 signedOrNot is_signed, | |
665 setCCOrNot should_set_CC ) { | |
666 assert(Rtmp != Rdst, "need separate temp register"); | |
667 assert_not_delayed(); | |
668 switch (is_signed) { | |
669 default: ShouldNotReachHere(); | |
670 | |
671 case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte | |
672 case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte | |
673 } | |
674 ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte | |
675 sll( Rdst, BitsPerByte, Rdst); | |
676 switch (should_set_CC ) { | |
677 default: ShouldNotReachHere(); | |
678 | |
679 case set_CC: orcc( Rdst, Rtmp, Rdst ); break; | |
680 case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break; | |
681 } | |
682 } | |
683 | |
684 | |
685 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp( | |
686 int bcp_offset, | |
687 Register Rtmp, | |
688 Register Rdst, | |
689 setCCOrNot should_set_CC ) { | |
690 assert(Rtmp != Rdst, "need separate temp register"); | |
691 assert_not_delayed(); | |
692 add( Lbcp, bcp_offset, Rtmp); | |
693 andcc( Rtmp, 3, G0); | |
694 Label aligned; | |
695 switch (should_set_CC ) { | |
696 default: ShouldNotReachHere(); | |
697 | |
698 case set_CC: break; | |
699 case dont_set_CC: break; | |
700 } | |
701 | |
702 br(Assembler::zero, true, Assembler::pn, aligned); | |
703 #ifdef _LP64 | |
704 delayed()->ldsw(Rtmp, 0, Rdst); | |
705 #else | |
706 delayed()->ld(Rtmp, 0, Rdst); | |
707 #endif | |
708 | |
709 ldub(Lbcp, bcp_offset + 3, Rdst); | |
710 ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst); | |
711 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst); | |
712 #ifdef _LP64 | |
713 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); | |
714 #else | |
715 // Unsigned load is faster than signed on some implementations | |
716 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); | |
717 #endif | |
718 or3(Rtmp, Rdst, Rdst ); | |
719 | |
720 bind(aligned); | |
721 if (should_set_CC == set_CC) tst(Rdst); | |
722 } | |
723 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
724 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register temp, Register index, |
1565 | 725 int bcp_offset, size_t index_size) { |
1503 | 726 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); |
1565 | 727 if (index_size == sizeof(u2)) { |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
728 get_2_byte_integer_at_bcp(bcp_offset, temp, index, Unsigned); |
1565 | 729 } else if (index_size == sizeof(u4)) { |
2416
38fea01eb669
6817525: turn on method handle functionality by default for JSR 292
twisti
parents:
2118
diff
changeset
|
730 assert(EnableInvokeDynamic, "giant index used only for JSR 292"); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
731 get_4_byte_integer_at_bcp(bcp_offset, temp, index); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
732 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
733 xor3(index, -1, index); // convert to plain index |
1565 | 734 } else if (index_size == sizeof(u1)) { |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
735 ldub(Lbcp, bcp_offset, index); |
1565 | 736 } else { |
737 ShouldNotReachHere(); | |
1503 | 738 } |
739 } | |
740 | |
741 | |
742 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, | |
1565 | 743 int bcp_offset, size_t index_size) { |
0 | 744 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); |
745 assert_different_registers(cache, tmp); | |
746 assert_not_delayed(); | |
1565 | 747 get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size); |
1503 | 748 // convert from field index to ConstantPoolCacheEntry index and from |
749 // word index to byte offset | |
0 | 750 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); |
751 add(LcpoolCache, tmp, cache); | |
752 } | |
753 | |
754 | |
3852
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
755 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, |
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
756 Register temp, |
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
757 Register bytecode, |
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
758 int byte_no, |
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
759 int bcp_offset, |
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
760 size_t index_size) { |
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
761 get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
762 ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode); |
3852
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
763 const int shift_count = (1 + byte_no) * BitsPerByte; |
6266
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6123
diff
changeset
|
764 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || |
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6123
diff
changeset
|
765 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), |
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6123
diff
changeset
|
766 "correct shift count"); |
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6123
diff
changeset
|
767 srl(bytecode, shift_count, bytecode); |
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6123
diff
changeset
|
768 assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); |
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6123
diff
changeset
|
769 and3(bytecode, ConstantPoolCacheEntry::bytecode_1_mask, bytecode); |
3852
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
770 } |
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
771 |
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
772 |
1503 | 773 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, |
1565 | 774 int bcp_offset, size_t index_size) { |
0 | 775 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); |
776 assert_different_registers(cache, tmp); | |
777 assert_not_delayed(); | |
1565 | 778 if (index_size == sizeof(u2)) { |
779 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); | |
780 } else { | |
781 ShouldNotReachHere(); // other sizes not supported here | |
782 } | |
0 | 783 // convert from field index to ConstantPoolCacheEntry index |
784 // and from word index to byte offset | |
785 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); | |
786 // skip past the header | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
787 add(tmp, in_bytes(ConstantPoolCache::base_offset()), tmp); |
0 | 788 // construct pointer to cache entry |
789 add(LcpoolCache, tmp, cache); | |
790 } | |
791 | |
792 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
793 // Load object from cpool->resolved_references(index) |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
794 void InterpreterMacroAssembler::load_resolved_reference_at_index( |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
795 Register result, Register index) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
796 assert_different_registers(result, index); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
797 assert_not_delayed(); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
798 // convert from field index to resolved_references() index and from |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
799 // word index to byte offset. Since this is a java object, it can be compressed |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
800 Register tmp = index; // reuse |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
801 sll(index, LogBytesPerHeapOop, tmp); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
802 get_constant_pool(result); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
803 // load pointer for resolved_references[] objArray |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
804 ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
805 // JNIHandles::resolve(result) |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
806 ld_ptr(result, 0, result); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
807 // Add in the index |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
808 add(result, tmp, result); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
809 load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
810 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
811 |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
812 |
0 | 813 // Generate a subtype check: branch to ok_is_subtype if sub_klass is |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
814 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2. |
0 | 815 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, |
816 Register Rsuper_klass, | |
817 Register Rtmp1, | |
818 Register Rtmp2, | |
819 Register Rtmp3, | |
820 Label &ok_is_subtype ) { | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
821 Label not_subtype; |
0 | 822 |
823 // Profile the not-null value's klass. | |
824 profile_typecheck(Rsub_klass, Rtmp1); | |
825 | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
826 check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
827 Rtmp1, Rtmp2, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
828 &ok_is_subtype, ¬_subtype, NULL); |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
829 |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
830 check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
831 Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
832 &ok_is_subtype, NULL); |
0 | 833 |
834 bind(not_subtype); | |
835 profile_typecheck_failed(Rtmp1); | |
836 } | |
837 | |
838 // Separate these two to allow for delay slot in middle | |
839 // These are used to do a test and full jump to exception-throwing code. | |
840 | |
841 // %%%%% Could possibly reoptimize this by testing to see if could use | |
842 // a single conditional branch (i.e. if span is small enough. | |
843 // If you go that route, than get rid of the split and give up | |
844 // on the delay-slot hack. | |
845 | |
846 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition, | |
847 Label& ok ) { | |
848 assert_not_delayed(); | |
849 br(ok_condition, true, pt, ok); | |
850 // DELAY SLOT | |
851 } | |
852 | |
853 void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition, | |
854 Label& ok ) { | |
855 assert_not_delayed(); | |
856 bp( ok_condition, true, Assembler::xcc, pt, ok); | |
857 // DELAY SLOT | |
858 } | |
859 | |
860 void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition, | |
861 Label& ok ) { | |
862 assert_not_delayed(); | |
863 brx(ok_condition, true, pt, ok); | |
864 // DELAY SLOT | |
865 } | |
866 | |
867 void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point, | |
868 Register Rscratch, | |
869 Label& ok ) { | |
870 assert(throw_entry_point != NULL, "entry point must be generated by now"); | |
727 | 871 AddressLiteral dest(throw_entry_point); |
872 jump_to(dest, Rscratch); | |
0 | 873 delayed()->nop(); |
874 bind(ok); | |
875 } | |
876 | |
877 | |
878 // And if you cannot use the delay slot, here is a shorthand: | |
879 | |
880 void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition, | |
881 address throw_entry_point, | |
882 Register Rscratch ) { | |
883 Label ok; | |
884 if (ok_condition != never) { | |
885 throw_if_not_1_icc( ok_condition, ok); | |
886 delayed()->nop(); | |
887 } | |
888 throw_if_not_2( throw_entry_point, Rscratch, ok); | |
889 } | |
890 void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition, | |
891 address throw_entry_point, | |
892 Register Rscratch ) { | |
893 Label ok; | |
894 if (ok_condition != never) { | |
895 throw_if_not_1_xcc( ok_condition, ok); | |
896 delayed()->nop(); | |
897 } | |
898 throw_if_not_2( throw_entry_point, Rscratch, ok); | |
899 } | |
900 void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition, | |
901 address throw_entry_point, | |
902 Register Rscratch ) { | |
903 Label ok; | |
904 if (ok_condition != never) { | |
905 throw_if_not_1_x( ok_condition, ok); | |
906 delayed()->nop(); | |
907 } | |
908 throw_if_not_2( throw_entry_point, Rscratch, ok); | |
909 } | |
910 | |
911 // Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res | |
912 // Note: res is still shy of address by array offset into object. | |
913 | |
914 void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) { | |
915 assert_not_delayed(); | |
916 | |
917 verify_oop(array); | |
918 #ifdef _LP64 | |
919 // sign extend since tos (index) can be a 32bit value | |
920 sra(index, G0, index); | |
921 #endif // _LP64 | |
922 | |
923 // check array | |
924 Label ptr_ok; | |
925 tst(array); | |
926 throw_if_not_1_x( notZero, ptr_ok ); | |
927 delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index | |
928 throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok); | |
929 | |
930 Label index_ok; | |
931 cmp(index, tmp); | |
932 throw_if_not_1_icc( lessUnsigned, index_ok ); | |
933 if (index_shift > 0) delayed()->sll(index, index_shift, index); | |
934 else delayed()->add(array, index, res); // addr - const offset in index | |
935 // convention: move aberrant index into G3_scratch for exception message | |
936 mov(index, G3_scratch); | |
937 throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok); | |
938 | |
939 // add offset if didn't do it in delay slot | |
940 if (index_shift > 0) add(array, index, res); // addr - const offset in index | |
941 } | |
942 | |
943 | |
944 void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) { | |
945 assert_not_delayed(); | |
946 | |
947 // pop array | |
948 pop_ptr(array); | |
949 | |
950 // check array | |
951 index_check_without_pop(array, index, index_shift, tmp, res); | |
952 } | |
953 | |
954 | |
6123
2fe087c3e814
7172967: Eliminate constMethod's _method backpointer to methodOop.
jiangli
parents:
3852
diff
changeset
|
955 void InterpreterMacroAssembler::get_const(Register Rdst) { |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
956 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Rdst); |
6123
2fe087c3e814
7172967: Eliminate constMethod's _method backpointer to methodOop.
jiangli
parents:
3852
diff
changeset
|
957 } |
2fe087c3e814
7172967: Eliminate constMethod's _method backpointer to methodOop.
jiangli
parents:
3852
diff
changeset
|
958 |
2fe087c3e814
7172967: Eliminate constMethod's _method backpointer to methodOop.
jiangli
parents:
3852
diff
changeset
|
959 |
0 | 960 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) { |
6123
2fe087c3e814
7172967: Eliminate constMethod's _method backpointer to methodOop.
jiangli
parents:
3852
diff
changeset
|
961 get_const(Rdst); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
962 ld_ptr(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst); |
0 | 963 } |
964 | |
965 | |
966 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) { | |
967 get_constant_pool(Rdst); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
968 ld_ptr(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst); |
0 | 969 } |
970 | |
971 | |
972 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) { | |
973 get_constant_pool(Rcpool); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
974 ld_ptr(Rcpool, ConstantPool::tags_offset_in_bytes(), Rtags); |
0 | 975 } |
976 | |
977 | |
978 // unlock if synchronized method | |
979 // | |
980 // Unlock the receiver if this is a synchronized method. | |
981 // Unlock any Java monitors from syncronized blocks. | |
982 // | |
983 // If there are locked Java monitors | |
984 // If throw_monitor_exception | |
985 // throws IllegalMonitorStateException | |
986 // Else if install_monitor_exception | |
987 // installs IllegalMonitorStateException | |
988 // Else | |
989 // no error processing | |
990 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state, | |
991 bool throw_monitor_exception, | |
992 bool install_monitor_exception) { | |
993 Label unlocked, unlock, no_unlock; | |
994 | |
995 // get the value of _do_not_unlock_if_synchronized into G1_scratch | |
727 | 996 const Address do_not_unlock_if_synchronized(G2_thread, |
997 JavaThread::do_not_unlock_if_synchronized_offset()); | |
0 | 998 ldbool(do_not_unlock_if_synchronized, G1_scratch); |
999 stbool(G0, do_not_unlock_if_synchronized); // reset the flag | |
1000 | |
1001 // check if synchronized method | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1002 const Address access_flags(Lmethod, Method::access_flags_offset()); |
0 | 1003 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); |
1004 push(state); // save tos | |
727 | 1005 ld(access_flags, G3_scratch); // Load access flags. |
0 | 1006 btst(JVM_ACC_SYNCHRONIZED, G3_scratch); |
727 | 1007 br(zero, false, pt, unlocked); |
0 | 1008 delayed()->nop(); |
1009 | |
1010 // Don't unlock anything if the _do_not_unlock_if_synchronized flag | |
1011 // is set. | |
3839 | 1012 cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock); |
0 | 1013 delayed()->nop(); |
1014 | |
1015 // BasicObjectLock will be first in list, since this is a synchronized method. However, need | |
1016 // to check that the object has not been unlocked by an explicit monitorexit bytecode. | |
1017 | |
1018 //Intel: if (throw_monitor_exception) ... else ... | |
1019 // Entry already unlocked, need to throw exception | |
1020 //... | |
1021 | |
1022 // pass top-most monitor elem | |
1023 add( top_most_monitor(), O1 ); | |
1024 | |
1025 ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch); | |
3839 | 1026 br_notnull_short(G3_scratch, pt, unlock); |
0 | 1027 |
1028 if (throw_monitor_exception) { | |
1029 // Entry already unlocked need to throw an exception | |
1030 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); | |
1031 should_not_reach_here(); | |
1032 } else { | |
1033 // Monitor already unlocked during a stack unroll. | |
1034 // If requested, install an illegal_monitor_state_exception. | |
1035 // Continue with stack unrolling. | |
1036 if (install_monitor_exception) { | |
1037 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); | |
1038 } | |
3839 | 1039 ba_short(unlocked); |
0 | 1040 } |
1041 | |
1042 bind(unlock); | |
1043 | |
1044 unlock_object(O1); | |
1045 | |
1046 bind(unlocked); | |
1047 | |
1048 // I0, I1: Might contain return value | |
1049 | |
1050 // Check that all monitors are unlocked | |
1051 { Label loop, exception, entry, restart; | |
1052 | |
1053 Register Rmptr = O0; | |
1054 Register Rtemp = O1; | |
1055 Register Rlimit = Lmonitors; | |
1056 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; | |
1057 assert( (delta & LongAlignmentMask) == 0, | |
1058 "sizeof BasicObjectLock must be even number of doublewords"); | |
1059 | |
1060 #ifdef ASSERT | |
1061 add(top_most_monitor(), Rmptr, delta); | |
1062 { Label L; | |
1063 // ensure that Rmptr starts out above (or at) Rlimit | |
3839 | 1064 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); |
0 | 1065 stop("monitor stack has negative size"); |
1066 bind(L); | |
1067 } | |
1068 #endif | |
1069 bind(restart); | |
3839 | 1070 ba(entry); |
0 | 1071 delayed()-> |
1072 add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry | |
1073 | |
1074 // Entry is still locked, need to throw exception | |
1075 bind(exception); | |
1076 if (throw_monitor_exception) { | |
1077 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); | |
1078 should_not_reach_here(); | |
1079 } else { | |
1080 // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception. | |
1081 // Unlock does not block, so don't have to worry about the frame | |
1082 unlock_object(Rmptr); | |
1083 if (install_monitor_exception) { | |
1084 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); | |
1085 } | |
3839 | 1086 ba_short(restart); |
0 | 1087 } |
1088 | |
1089 bind(loop); | |
1090 cmp(Rtemp, G0); // check if current entry is used | |
1091 brx(Assembler::notEqual, false, pn, exception); | |
1092 delayed()-> | |
1093 dec(Rmptr, delta); // otherwise advance to next entry | |
1094 #ifdef ASSERT | |
1095 { Label L; | |
1096 // ensure that Rmptr has not somehow stepped below Rlimit | |
3839 | 1097 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); |
0 | 1098 stop("ran off the end of the monitor stack"); |
1099 bind(L); | |
1100 } | |
1101 #endif | |
1102 bind(entry); | |
1103 cmp(Rmptr, Rlimit); // check if bottom reached | |
1104 brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry | |
1105 delayed()-> | |
1106 ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp); | |
1107 } | |
1108 | |
1109 bind(no_unlock); | |
1110 pop(state); | |
1111 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
1112 } | |
1113 | |
1114 | |
1115 // remove activation | |
1116 // | |
1117 // Unlock the receiver if this is a synchronized method. | |
1118 // Unlock any Java monitors from syncronized blocks. | |
1119 // Remove the activation from the stack. | |
1120 // | |
1121 // If there are locked Java monitors | |
1122 // If throw_monitor_exception | |
1123 // throws IllegalMonitorStateException | |
1124 // Else if install_monitor_exception | |
1125 // installs IllegalMonitorStateException | |
1126 // Else | |
1127 // no error processing | |
1128 void InterpreterMacroAssembler::remove_activation(TosState state, | |
1129 bool throw_monitor_exception, | |
1130 bool install_monitor_exception) { | |
1131 | |
1132 unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception); | |
1133 | |
1134 // save result (push state before jvmti call and pop it afterwards) and notify jvmti | |
1135 notify_method_exit(false, state, NotifyJVMTI); | |
1136 | |
1137 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
1138 verify_thread(); | |
1139 | |
1140 // return tos | |
1141 assert(Otos_l1 == Otos_i, "adjust code below"); | |
1142 switch (state) { | |
1143 #ifdef _LP64 | |
1144 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0 | |
1145 #else | |
1146 case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1 | |
1147 #endif | |
1148 case btos: // fall through | |
1149 case ctos: | |
1150 case stos: // fall through | |
1151 case atos: // fall through | |
1152 case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I0 | |
1153 case ftos: // fall through | |
1154 case dtos: // fall through | |
1155 case vtos: /* nothing to do */ break; | |
1156 default : ShouldNotReachHere(); | |
1157 } | |
1158 | |
1159 #if defined(COMPILER2) && !defined(_LP64) | |
1160 if (state == ltos) { | |
1161 // C2 expects long results in G1 we can't tell if we're returning to interpreted | |
1162 // or compiled so just be safe use G1 and O0/O1 | |
1163 | |
1164 // Shift bits into high (msb) of G1 | |
1165 sllx(Otos_l1->after_save(), 32, G1); | |
1166 // Zero extend low bits | |
1167 srl (Otos_l2->after_save(), 0, Otos_l2->after_save()); | |
1168 or3 (Otos_l2->after_save(), G1, G1); | |
1169 } | |
1170 #endif /* COMPILER2 */ | |
1171 | |
1172 } | |
1173 #endif /* CC_INTERP */ | |
1174 | |
1175 | |
1176 // Lock object | |
1177 // | |
1178 // Argument - lock_reg points to the BasicObjectLock to be used for locking, | |
1179 // it must be initialized with the object to lock | |
1180 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) { | |
1181 if (UseHeavyMonitors) { | |
1182 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); | |
1183 } | |
1184 else { | |
1185 Register obj_reg = Object; | |
1186 Register mark_reg = G4_scratch; | |
1187 Register temp_reg = G1_scratch; | |
727 | 1188 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes()); |
1189 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); | |
0 | 1190 Label done; |
1191 | |
1192 Label slow_case; | |
1193 | |
1194 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg); | |
1195 | |
1196 // load markOop from object into mark_reg | |
1197 ld_ptr(mark_addr, mark_reg); | |
1198 | |
1199 if (UseBiasedLocking) { | |
1200 biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case); | |
1201 } | |
1202 | |
1203 // get the address of basicLock on stack that will be stored in the object | |
1204 // we need a temporary register here as we do not want to clobber lock_reg | |
1205 // (cas clobbers the destination register) | |
1206 mov(lock_reg, temp_reg); | |
1207 // set mark reg to be (markOop of object | UNLOCK_VALUE) | |
1208 or3(mark_reg, markOopDesc::unlocked_value, mark_reg); | |
1209 // initialize the box (Must happen before we update the object mark!) | |
1210 st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes()); | |
1211 // compare and exchange object_addr, markOop | 1, stack address of basicLock | |
1212 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); | |
10997 | 1213 cas_ptr(mark_addr.base(), mark_reg, temp_reg); |
0 | 1214 |
1215 // if the compare and exchange succeeded we are done (we saw an unlocked object) | |
3839 | 1216 cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done); |
0 | 1217 |
1218 // We did not see an unlocked object so try the fast recursive case | |
1219 | |
1220 // Check if owner is self by comparing the value in the markOop of object | |
1221 // with the stack pointer | |
1222 sub(temp_reg, SP, temp_reg); | |
1223 #ifdef _LP64 | |
1224 sub(temp_reg, STACK_BIAS, temp_reg); | |
1225 #endif | |
1226 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); | |
1227 | |
1228 // Composite "andcc" test: | |
1229 // (a) %sp -vs- markword proximity check, and, | |
1230 // (b) verify mark word LSBs == 0 (Stack-locked). | |
1231 // | |
1232 // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size()) | |
1233 // Note that the page size used for %sp proximity testing is arbitrary and is | |
1234 // unrelated to the actual MMU page size. We use a 'logical' page size of | |
1235 // 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate | |
1236 // field of the andcc instruction. | |
1237 andcc (temp_reg, 0xFFFFF003, G0) ; | |
1238 | |
1239 // if condition is true we are done and hence we can store 0 in the displaced | |
1240 // header indicating it is a recursive lock and be done | |
1241 brx(Assembler::zero, true, Assembler::pt, done); | |
1242 delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes()); | |
1243 | |
1244 // none of the above fast optimizations worked so we have to get into the | |
1245 // slow case of monitor enter | |
1246 bind(slow_case); | |
1247 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); | |
1248 | |
1249 bind(done); | |
1250 } | |
1251 } | |
1252 | |
1253 // Unlocks an object. Used in monitorexit bytecode and remove_activation. | |
1254 // | |
1255 // Argument - lock_reg points to the BasicObjectLock for lock | |
1256 // Throw IllegalMonitorException if object is not locked by current thread | |
1257 void InterpreterMacroAssembler::unlock_object(Register lock_reg) { | |
1258 if (UseHeavyMonitors) { | |
1259 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); | |
1260 } else { | |
1261 Register obj_reg = G3_scratch; | |
1262 Register mark_reg = G4_scratch; | |
1263 Register displaced_header_reg = G1_scratch; | |
727 | 1264 Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes()); |
1265 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); | |
0 | 1266 Label done; |
1267 | |
1268 if (UseBiasedLocking) { | |
1269 // load the object out of the BasicObjectLock | |
1270 ld_ptr(lockobj_addr, obj_reg); | |
1271 biased_locking_exit(mark_addr, mark_reg, done, true); | |
1272 st_ptr(G0, lockobj_addr); // free entry | |
1273 } | |
1274 | |
1275 // Test first if we are in the fast recursive case | |
727 | 1276 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes()); |
1277 ld_ptr(lock_addr, displaced_header_reg); | |
0 | 1278 br_null(displaced_header_reg, true, Assembler::pn, done); |
1279 delayed()->st_ptr(G0, lockobj_addr); // free entry | |
1280 | |
1281 // See if it is still a light weight lock, if so we just unlock | |
1282 // the object and we are done | |
1283 | |
1284 if (!UseBiasedLocking) { | |
1285 // load the object out of the BasicObjectLock | |
1286 ld_ptr(lockobj_addr, obj_reg); | |
1287 } | |
1288 | |
1289 // we have the displaced header in displaced_header_reg | |
1290 // we expect to see the stack address of the basicLock in case the | |
1291 // lock is still a light weight lock (lock_reg) | |
1292 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); | |
10997 | 1293 cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg); |
0 | 1294 cmp(lock_reg, displaced_header_reg); |
1295 brx(Assembler::equal, true, Assembler::pn, done); | |
1296 delayed()->st_ptr(G0, lockobj_addr); // free entry | |
1297 | |
1298 // The lock has been converted into a heavy lock and hence | |
1299 // we need to get into the slow case | |
1300 | |
1301 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); | |
1302 | |
1303 bind(done); | |
1304 } | |
1305 } | |
1306 | |
1307 #ifndef CC_INTERP | |
1308 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1309 // Get the method data pointer from the Method* and set the |
0 | 1310 // specified register to its value. |
1311 | |
2118
dd031b2226de
4930919: race condition in MDO creation at back branch locations
iveresov
parents:
1972
diff
changeset
|
1312 void InterpreterMacroAssembler::set_method_data_pointer() { |
0 | 1313 assert(ProfileInterpreter, "must be profiling interpreter"); |
1314 Label get_continue; | |
1315 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1316 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); |
0 | 1317 test_method_data_pointer(get_continue); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1318 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); |
0 | 1319 bind(get_continue); |
1320 } | |
1321 | |
1322 // Set the method data pointer for the current bcp. | |
1323 | |
1324 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { | |
1325 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1326 Label zero_continue; | |
1327 | |
1328 // Test MDO to avoid the call if it is NULL. | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1329 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); |
0 | 1330 test_method_data_pointer(zero_continue); |
1331 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1332 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); |
2118
dd031b2226de
4930919: race condition in MDO creation at back branch locations
iveresov
parents:
1972
diff
changeset
|
1333 add(ImethodDataPtr, O0, ImethodDataPtr); |
0 | 1334 bind(zero_continue); |
1335 } | |
1336 | |
1337 // Test ImethodDataPtr. If it is null, continue at the specified label | |
1338 | |
1339 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) { | |
1340 assert(ProfileInterpreter, "must be profiling interpreter"); | |
3839 | 1341 br_null_short(ImethodDataPtr, Assembler::pn, zero_continue); |
0 | 1342 } |
1343 | |
1344 void InterpreterMacroAssembler::verify_method_data_pointer() { | |
1345 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1346 #ifdef ASSERT | |
1347 Label verify_continue; | |
1348 test_method_data_pointer(verify_continue); | |
1349 | |
1350 // If the mdp is valid, it will point to a DataLayout header which is | |
1351 // consistent with the bcp. The converse is highly probable also. | |
1352 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1353 ld_ptr(Lmethod, Method::const_offset(), O5); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1354 add(G3_scratch, in_bytes(ConstMethod::codes_offset()), G3_scratch); |
0 | 1355 add(G3_scratch, O5, G3_scratch); |
1356 cmp(Lbcp, G3_scratch); | |
1357 brx(Assembler::equal, false, Assembler::pt, verify_continue); | |
1358 | |
1359 Register temp_reg = O5; | |
1360 delayed()->mov(ImethodDataPtr, temp_reg); | |
1361 // %%% should use call_VM_leaf here? | |
1362 //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr); | |
1363 save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1); | |
727 | 1364 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS); |
0 | 1365 stf(FloatRegisterImpl::D, Ftos_d, d_save); |
1366 mov(temp_reg->after_save(), O2); | |
1367 save_thread(L7_thread_cache); | |
1368 call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none); | |
1369 delayed()->nop(); | |
1370 restore_thread(L7_thread_cache); | |
1371 ldf(FloatRegisterImpl::D, d_save, Ftos_d); | |
1372 restore(); | |
1373 bind(verify_continue); | |
1374 #endif // ASSERT | |
1375 } | |
1376 | |
1377 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count, | |
1378 Register Rtmp, | |
1379 Label &profile_continue) { | |
1380 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1381 // Control will flow to "profile_continue" if the counter is less than the | |
1382 // limit or if we call profile_method() | |
1383 | |
1384 Label done; | |
1385 | |
1386 // if no method data exists, and the counter is high enough, make one | |
3839 | 1387 br_notnull_short(ImethodDataPtr, Assembler::pn, done); |
0 | 1388 |
1389 // Test to see if we should create a method data oop | |
727 | 1390 AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit); |
1391 sethi(profile_limit, Rtmp); | |
1392 ld(Rtmp, profile_limit.low10(), Rtmp); | |
6805
69fb89ec6fa7
7198084: NPG: distance is too big for short branches in test_invocation_counter_for_mdp()
kvn
parents:
6725
diff
changeset
|
1393 cmp(invocation_count, Rtmp); |
69fb89ec6fa7
7198084: NPG: distance is too big for short branches in test_invocation_counter_for_mdp()
kvn
parents:
6725
diff
changeset
|
1394 // Use long branches because call_VM() code and following code generated by |
69fb89ec6fa7
7198084: NPG: distance is too big for short branches in test_invocation_counter_for_mdp()
kvn
parents:
6725
diff
changeset
|
1395 // test_backedge_count_for_osr() is large in debug VM. |
69fb89ec6fa7
7198084: NPG: distance is too big for short branches in test_invocation_counter_for_mdp()
kvn
parents:
6725
diff
changeset
|
1396 br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue); |
69fb89ec6fa7
7198084: NPG: distance is too big for short branches in test_invocation_counter_for_mdp()
kvn
parents:
6725
diff
changeset
|
1397 delayed()->nop(); |
0 | 1398 |
1399 // Build it now. | |
2118
dd031b2226de
4930919: race condition in MDO creation at back branch locations
iveresov
parents:
1972
diff
changeset
|
1400 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); |
dd031b2226de
4930919: race condition in MDO creation at back branch locations
iveresov
parents:
1972
diff
changeset
|
1401 set_method_data_pointer_for_bcp(); |
6805
69fb89ec6fa7
7198084: NPG: distance is too big for short branches in test_invocation_counter_for_mdp()
kvn
parents:
6725
diff
changeset
|
1402 ba(profile_continue); |
69fb89ec6fa7
7198084: NPG: distance is too big for short branches in test_invocation_counter_for_mdp()
kvn
parents:
6725
diff
changeset
|
1403 delayed()->nop(); |
0 | 1404 bind(done); |
1405 } | |
1406 | |
1407 // Store a value at some constant offset from the method data pointer. | |
1408 | |
1409 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) { | |
1410 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1411 st_ptr(value, ImethodDataPtr, constant); | |
1412 } | |
1413 | |
1414 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter, | |
1415 Register bumped_count, | |
1416 bool decrement) { | |
1417 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1418 | |
1419 // Load the counter. | |
1420 ld_ptr(counter, bumped_count); | |
1421 | |
1422 if (decrement) { | |
1423 // Decrement the register. Set condition codes. | |
1424 subcc(bumped_count, DataLayout::counter_increment, bumped_count); | |
1425 | |
1426 // If the decrement causes the counter to overflow, stay negative | |
1427 Label L; | |
1428 brx(Assembler::negative, true, Assembler::pn, L); | |
1429 | |
1430 // Store the decremented counter, if it is still negative. | |
1431 delayed()->st_ptr(bumped_count, counter); | |
1432 bind(L); | |
1433 } else { | |
1434 // Increment the register. Set carry flag. | |
1435 addcc(bumped_count, DataLayout::counter_increment, bumped_count); | |
1436 | |
1437 // If the increment causes the counter to overflow, pull back by 1. | |
1438 assert(DataLayout::counter_increment == 1, "subc works"); | |
1439 subc(bumped_count, G0, bumped_count); | |
1440 | |
1441 // Store the incremented counter. | |
1442 st_ptr(bumped_count, counter); | |
1443 } | |
1444 } | |
1445 | |
1446 // Increment the value at some constant offset from the method data pointer. | |
1447 | |
1448 void InterpreterMacroAssembler::increment_mdp_data_at(int constant, | |
1449 Register bumped_count, | |
1450 bool decrement) { | |
1451 // Locate the counter at a fixed offset from the mdp: | |
727 | 1452 Address counter(ImethodDataPtr, constant); |
0 | 1453 increment_mdp_data_at(counter, bumped_count, decrement); |
1454 } | |
1455 | |
1456 // Increment the value at some non-fixed (reg + constant) offset from | |
1457 // the method data pointer. | |
1458 | |
1459 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg, | |
1460 int constant, | |
1461 Register bumped_count, | |
1462 Register scratch2, | |
1463 bool decrement) { | |
1464 // Add the constant to reg to get the offset. | |
1465 add(ImethodDataPtr, reg, scratch2); | |
727 | 1466 Address counter(scratch2, constant); |
0 | 1467 increment_mdp_data_at(counter, bumped_count, decrement); |
1468 } | |
1469 | |
1470 // Set a flag value at the current method data pointer position. | |
1471 // Updates a single byte of the header, to avoid races with other header bits. | |
1472 | |
1473 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant, | |
1474 Register scratch) { | |
1475 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1476 // Load the data header | |
1477 ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch); | |
1478 | |
1479 // Set the flag | |
1480 or3(scratch, flag_constant, scratch); | |
1481 | |
1482 // Store the modified header. | |
1483 stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset())); | |
1484 } | |
1485 | |
1486 // Test the location at some offset from the method data pointer. | |
1487 // If it is not equal to value, branch to the not_equal_continue Label. | |
1488 // Set condition codes to match the nullness of the loaded value. | |
1489 | |
1490 void InterpreterMacroAssembler::test_mdp_data_at(int offset, | |
1491 Register value, | |
1492 Label& not_equal_continue, | |
1493 Register scratch) { | |
1494 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1495 ld_ptr(ImethodDataPtr, offset, scratch); | |
1496 cmp(value, scratch); | |
1497 brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue); | |
1498 delayed()->tst(scratch); | |
1499 } | |
1500 | |
1501 // Update the method data pointer by the displacement located at some fixed | |
1502 // offset from the method data pointer. | |
1503 | |
1504 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp, | |
1505 Register scratch) { | |
1506 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1507 ld_ptr(ImethodDataPtr, offset_of_disp, scratch); | |
1508 add(ImethodDataPtr, scratch, ImethodDataPtr); | |
1509 } | |
1510 | |
1511 // Update the method data pointer by the displacement located at the | |
1512 // offset (reg + offset_of_disp). | |
1513 | |
1514 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg, | |
1515 int offset_of_disp, | |
1516 Register scratch) { | |
1517 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1518 add(reg, offset_of_disp, scratch); | |
1519 ld_ptr(ImethodDataPtr, scratch, scratch); | |
1520 add(ImethodDataPtr, scratch, ImethodDataPtr); | |
1521 } | |
1522 | |
1523 // Update the method data pointer by a simple constant displacement. | |
1524 | |
1525 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) { | |
1526 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1527 add(ImethodDataPtr, constant, ImethodDataPtr); | |
1528 } | |
1529 | |
1530 // Update the method data pointer for a _ret bytecode whose target | |
1531 // was not among our cached targets. | |
1532 | |
1533 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state, | |
1534 Register return_bci) { | |
1535 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1536 push(state); | |
1537 st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile | |
1538 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); | |
1539 ld_ptr(l_tmp, return_bci); | |
1540 pop(state); | |
1541 } | |
1542 | |
1543 // Count a taken branch in the bytecodes. | |
1544 | |
1545 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) { | |
1546 if (ProfileInterpreter) { | |
1547 Label profile_continue; | |
1548 | |
1549 // If no method data exists, go to profile_continue. | |
1550 test_method_data_pointer(profile_continue); | |
1551 | |
1552 // We are taking a branch. Increment the taken count. | |
1553 increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count); | |
1554 | |
1555 // The method data pointer needs to be updated to reflect the new target. | |
1556 update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch); | |
1557 bind (profile_continue); | |
1558 } | |
1559 } | |
1560 | |
1561 | |
1562 // Count a not-taken branch in the bytecodes. | |
1563 | |
1564 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) { | |
1565 if (ProfileInterpreter) { | |
1566 Label profile_continue; | |
1567 | |
1568 // If no method data exists, go to profile_continue. | |
1569 test_method_data_pointer(profile_continue); | |
1570 | |
1571 // We are taking a branch. Increment the not taken count. | |
1572 increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch); | |
1573 | |
1574 // The method data pointer needs to be updated to correspond to the | |
1575 // next bytecode. | |
1576 update_mdp_by_constant(in_bytes(BranchData::branch_data_size())); | |
1577 bind (profile_continue); | |
1578 } | |
1579 } | |
1580 | |
1581 | |
1582 // Count a non-virtual call in the bytecodes. | |
1583 | |
1584 void InterpreterMacroAssembler::profile_call(Register scratch) { | |
1585 if (ProfileInterpreter) { | |
1586 Label profile_continue; | |
1587 | |
1588 // If no method data exists, go to profile_continue. | |
1589 test_method_data_pointer(profile_continue); | |
1590 | |
1591 // We are making a call. Increment the count. | |
1592 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); | |
1593 | |
1594 // The method data pointer needs to be updated to reflect the new target. | |
1595 update_mdp_by_constant(in_bytes(CounterData::counter_data_size())); | |
1596 bind (profile_continue); | |
1597 } | |
1598 } | |
1599 | |
1600 | |
1601 // Count a final call in the bytecodes. | |
1602 | |
1603 void InterpreterMacroAssembler::profile_final_call(Register scratch) { | |
1604 if (ProfileInterpreter) { | |
1605 Label profile_continue; | |
1606 | |
1607 // If no method data exists, go to profile_continue. | |
1608 test_method_data_pointer(profile_continue); | |
1609 | |
1610 // We are making a call. Increment the count. | |
1611 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); | |
1612 | |
1613 // The method data pointer needs to be updated to reflect the new target. | |
1614 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); | |
1615 bind (profile_continue); | |
1616 } | |
1617 } | |
1618 | |
1619 | |
1620 // Count a virtual call in the bytecodes. | |
1621 | |
1622 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, | |
1503 | 1623 Register scratch, |
1624 bool receiver_can_be_null) { | |
0 | 1625 if (ProfileInterpreter) { |
1626 Label profile_continue; | |
1627 | |
1628 // If no method data exists, go to profile_continue. | |
1629 test_method_data_pointer(profile_continue); | |
1630 | |
1503 | 1631 |
1632 Label skip_receiver_profile; | |
1633 if (receiver_can_be_null) { | |
1634 Label not_null; | |
3839 | 1635 br_notnull_short(receiver, Assembler::pt, not_null); |
1503 | 1636 // We are making a call. Increment the count for null receiver. |
1637 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); | |
3839 | 1638 ba_short(skip_receiver_profile); |
1503 | 1639 bind(not_null); |
1640 } | |
1641 | |
0 | 1642 // Record the receiver type. |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1643 record_klass_in_profile(receiver, scratch, true); |
1503 | 1644 bind(skip_receiver_profile); |
0 | 1645 |
1646 // The method data pointer needs to be updated to reflect the new target. | |
1647 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); | |
1648 bind (profile_continue); | |
1649 } | |
1650 } | |
1651 | |
1652 void InterpreterMacroAssembler::record_klass_in_profile_helper( | |
1653 Register receiver, Register scratch, | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1654 int start_row, Label& done, bool is_virtual_call) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1655 if (TypeProfileWidth == 0) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1656 if (is_virtual_call) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1657 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1658 } |
967
6918603297f7
6858208: jvm crash when specifying TypeProfileWidth=0 on jdk 6.0
poonam
parents:
727
diff
changeset
|
1659 return; |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1660 } |
967
6918603297f7
6858208: jvm crash when specifying TypeProfileWidth=0 on jdk 6.0
poonam
parents:
727
diff
changeset
|
1661 |
0 | 1662 int last_row = VirtualCallData::row_limit() - 1; |
1663 assert(start_row <= last_row, "must be work left to do"); | |
1664 // Test this row for both the receiver and for null. | |
1665 // Take any of three different outcomes: | |
1666 // 1. found receiver => increment count and goto done | |
1667 // 2. found null => keep looking for case 1, maybe allocate this cell | |
1668 // 3. found something else => keep looking for cases 1 and 2 | |
1669 // Case 3 is handled by a recursive call. | |
1670 for (int row = start_row; row <= last_row; row++) { | |
1671 Label next_test; | |
1672 bool test_for_null_also = (row == start_row); | |
1673 | |
1674 // See if the receiver is receiver[n]. | |
1675 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row)); | |
1676 test_mdp_data_at(recvr_offset, receiver, next_test, scratch); | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1677 // delayed()->tst(scratch); |
0 | 1678 |
1679 // The receiver is receiver[n]. Increment count[n]. | |
1680 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row)); | |
1681 increment_mdp_data_at(count_offset, scratch); | |
3839 | 1682 ba_short(done); |
0 | 1683 bind(next_test); |
1684 | |
1685 if (test_for_null_also) { | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1686 Label found_null; |
0 | 1687 // Failed the equality check on receiver[n]... Test for null. |
1688 if (start_row == last_row) { | |
1689 // The only thing left to do is handle the null case. | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1690 if (is_virtual_call) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1691 brx(Assembler::zero, false, Assembler::pn, found_null); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1692 delayed()->nop(); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1693 // Receiver did not match any saved receiver and there is no empty row for it. |
1251
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1206
diff
changeset
|
1694 // Increment total counter to indicate polymorphic case. |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1695 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); |
3839 | 1696 ba_short(done); |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1697 bind(found_null); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1698 } else { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1699 brx(Assembler::notZero, false, Assembler::pt, done); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1700 delayed()->nop(); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1701 } |
0 | 1702 break; |
1703 } | |
1704 // Since null is rare, make it be the branch-taken case. | |
1705 brx(Assembler::zero, false, Assembler::pn, found_null); | |
1706 delayed()->nop(); | |
1707 | |
1708 // Put all the "Case 3" tests here. | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1709 record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call); |
0 | 1710 |
1711 // Found a null. Keep searching for a matching receiver, | |
1712 // but remember that this is an empty (unused) slot. | |
1713 bind(found_null); | |
1714 } | |
1715 } | |
1716 | |
1717 // In the fall-through case, we found no matching receiver, but we | |
1718 // observed the receiver[start_row] is NULL. | |
1719 | |
1720 // Fill in the receiver field and increment the count. | |
1721 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); | |
1722 set_mdp_data_at(recvr_offset, receiver); | |
1723 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); | |
1724 mov(DataLayout::counter_increment, scratch); | |
1725 set_mdp_data_at(count_offset, scratch); | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1726 if (start_row > 0) { |
3839 | 1727 ba_short(done); |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1728 } |
0 | 1729 } |
1730 | |
1731 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1732 Register scratch, bool is_virtual_call) { |
0 | 1733 assert(ProfileInterpreter, "must be profiling"); |
1734 Label done; | |
1735 | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1736 record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call); |
0 | 1737 |
1738 bind (done); | |
1739 } | |
1740 | |
1741 | |
1742 // Count a ret in the bytecodes. | |
1743 | |
1744 void InterpreterMacroAssembler::profile_ret(TosState state, | |
1745 Register return_bci, | |
1746 Register scratch) { | |
1747 if (ProfileInterpreter) { | |
1748 Label profile_continue; | |
1749 uint row; | |
1750 | |
1751 // If no method data exists, go to profile_continue. | |
1752 test_method_data_pointer(profile_continue); | |
1753 | |
1754 // Update the total ret count. | |
1755 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); | |
1756 | |
1757 for (row = 0; row < RetData::row_limit(); row++) { | |
1758 Label next_test; | |
1759 | |
1760 // See if return_bci is equal to bci[n]: | |
1761 test_mdp_data_at(in_bytes(RetData::bci_offset(row)), | |
1762 return_bci, next_test, scratch); | |
1763 | |
1764 // return_bci is equal to bci[n]. Increment the count. | |
1765 increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch); | |
1766 | |
1767 // The method data pointer needs to be updated to reflect the new target. | |
1768 update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch); | |
3839 | 1769 ba_short(profile_continue); |
0 | 1770 bind(next_test); |
1771 } | |
1772 | |
1773 update_mdp_for_ret(state, return_bci); | |
1774 | |
1775 bind (profile_continue); | |
1776 } | |
1777 } | |
1778 | |
1779 // Profile an unexpected null in the bytecodes. | |
1780 void InterpreterMacroAssembler::profile_null_seen(Register scratch) { | |
1781 if (ProfileInterpreter) { | |
1782 Label profile_continue; | |
1783 | |
1784 // If no method data exists, go to profile_continue. | |
1785 test_method_data_pointer(profile_continue); | |
1786 | |
1787 set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch); | |
1788 | |
1789 // The method data pointer needs to be updated. | |
1790 int mdp_delta = in_bytes(BitData::bit_data_size()); | |
1791 if (TypeProfileCasts) { | |
1792 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); | |
1793 } | |
1794 update_mdp_by_constant(mdp_delta); | |
1795 | |
1796 bind (profile_continue); | |
1797 } | |
1798 } | |
1799 | |
1800 void InterpreterMacroAssembler::profile_typecheck(Register klass, | |
1801 Register scratch) { | |
1802 if (ProfileInterpreter) { | |
1803 Label profile_continue; | |
1804 | |
1805 // If no method data exists, go to profile_continue. | |
1806 test_method_data_pointer(profile_continue); | |
1807 | |
1808 int mdp_delta = in_bytes(BitData::bit_data_size()); | |
1809 if (TypeProfileCasts) { | |
1810 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); | |
1811 | |
1812 // Record the object type. | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1813 record_klass_in_profile(klass, scratch, false); |
0 | 1814 } |
1815 | |
1816 // The method data pointer needs to be updated. | |
1817 update_mdp_by_constant(mdp_delta); | |
1818 | |
1819 bind (profile_continue); | |
1820 } | |
1821 } | |
1822 | |
1823 void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) { | |
1824 if (ProfileInterpreter && TypeProfileCasts) { | |
1825 Label profile_continue; | |
1826 | |
1827 // If no method data exists, go to profile_continue. | |
1828 test_method_data_pointer(profile_continue); | |
1829 | |
1830 int count_offset = in_bytes(CounterData::count_offset()); | |
1831 // Back up the address, since we have already bumped the mdp. | |
1832 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); | |
1833 | |
1834 // *Decrement* the counter. We expect to see zero or small negatives. | |
1835 increment_mdp_data_at(count_offset, scratch, true); | |
1836 | |
1837 bind (profile_continue); | |
1838 } | |
1839 } | |
1840 | |
1841 // Count the default case of a switch construct. | |
1842 | |
1843 void InterpreterMacroAssembler::profile_switch_default(Register scratch) { | |
1844 if (ProfileInterpreter) { | |
1845 Label profile_continue; | |
1846 | |
1847 // If no method data exists, go to profile_continue. | |
1848 test_method_data_pointer(profile_continue); | |
1849 | |
1850 // Update the default case count | |
1851 increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()), | |
1852 scratch); | |
1853 | |
1854 // The method data pointer needs to be updated. | |
1855 update_mdp_by_offset( | |
1856 in_bytes(MultiBranchData::default_displacement_offset()), | |
1857 scratch); | |
1858 | |
1859 bind (profile_continue); | |
1860 } | |
1861 } | |
1862 | |
1863 // Count the index'th case of a switch construct. | |
1864 | |
1865 void InterpreterMacroAssembler::profile_switch_case(Register index, | |
1866 Register scratch, | |
1867 Register scratch2, | |
1868 Register scratch3) { | |
1869 if (ProfileInterpreter) { | |
1870 Label profile_continue; | |
1871 | |
1872 // If no method data exists, go to profile_continue. | |
1873 test_method_data_pointer(profile_continue); | |
1874 | |
1875 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes() | |
1876 set(in_bytes(MultiBranchData::per_case_size()), scratch); | |
1877 smul(index, scratch, scratch); | |
1878 add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch); | |
1879 | |
1880 // Update the case count | |
1881 increment_mdp_data_at(scratch, | |
1882 in_bytes(MultiBranchData::relative_count_offset()), | |
1883 scratch2, | |
1884 scratch3); | |
1885 | |
1886 // The method data pointer needs to be updated. | |
1887 update_mdp_by_offset(scratch, | |
1888 in_bytes(MultiBranchData::relative_displacement_offset()), | |
1889 scratch2); | |
1890 | |
1891 bind (profile_continue); | |
1892 } | |
1893 } | |
1894 | |
17628 | 1895 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr, Register tmp) { |
1896 Label not_null, do_nothing, do_update; | |
1897 | |
1898 assert_different_registers(obj, mdo_addr.base(), tmp); | |
1899 | |
1900 verify_oop(obj); | |
1901 | |
1902 ld_ptr(mdo_addr, tmp); | |
1903 | |
1904 br_notnull_short(obj, pt, not_null); | |
1905 or3(tmp, TypeEntries::null_seen, tmp); | |
1906 ba_short(do_update); | |
1907 | |
1908 bind(not_null); | |
1909 load_klass(obj, obj); | |
1910 | |
1911 xor3(obj, tmp, obj); | |
1912 btst(TypeEntries::type_klass_mask, obj); | |
1913 // klass seen before, nothing to do. The unknown bit may have been | |
1914 // set already but no need to check. | |
1915 brx(zero, false, pt, do_nothing); | |
1916 delayed()-> | |
1917 | |
1918 btst(TypeEntries::type_unknown, obj); | |
1919 // already unknown. Nothing to do anymore. | |
1920 brx(notZero, false, pt, do_nothing); | |
1921 delayed()-> | |
1922 | |
1923 btst(TypeEntries::type_mask, tmp); | |
1924 brx(zero, true, pt, do_update); | |
1925 // first time here. Set profile type. | |
1926 delayed()->or3(tmp, obj, tmp); | |
1927 | |
1928 // different than before. Cannot keep accurate profile. | |
1929 or3(tmp, TypeEntries::type_unknown, tmp); | |
1930 | |
1931 bind(do_update); | |
1932 // update profile | |
1933 st_ptr(tmp, mdo_addr); | |
1934 | |
1935 bind(do_nothing); | |
1936 } | |
1937 | |
1938 void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) { | |
1939 if (!ProfileInterpreter) { | |
1940 return; | |
1941 } | |
1942 | |
1943 assert_different_registers(callee, tmp1, tmp2, ImethodDataPtr); | |
1944 | |
1945 if (MethodData::profile_arguments() || MethodData::profile_return()) { | |
1946 Label profile_continue; | |
1947 | |
1948 test_method_data_pointer(profile_continue); | |
1949 | |
1950 int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); | |
1951 | |
1952 ldub(ImethodDataPtr, in_bytes(DataLayout::tag_offset()) - off_to_start, tmp1); | |
1953 cmp_and_br_short(tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag, notEqual, pn, profile_continue); | |
1954 | |
1955 if (MethodData::profile_arguments()) { | |
1956 Label done; | |
1957 int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset()); | |
1958 add(ImethodDataPtr, off_to_args, ImethodDataPtr); | |
1959 | |
1960 for (int i = 0; i < TypeProfileArgsLimit; i++) { | |
1961 if (i > 0 || MethodData::profile_return()) { | |
1962 // If return value type is profiled we may have no argument to profile | |
1963 ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, tmp1); | |
1964 sub(tmp1, i*TypeStackSlotEntries::per_arg_count(), tmp1); | |
1965 cmp_and_br_short(tmp1, TypeStackSlotEntries::per_arg_count(), less, pn, done); | |
1966 } | |
1967 ld_ptr(Address(callee, Method::const_offset()), tmp1); | |
1968 lduh(Address(tmp1, ConstMethod::size_of_parameters_offset()), tmp1); | |
1969 // stack offset o (zero based) from the start of the argument | |
1970 // list, for n arguments translates into offset n - o - 1 from | |
1971 // the end of the argument list. But there's an extra slot at | |
1972 // the stop of the stack. So the offset is n - o from Lesp. | |
1973 ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, tmp2); | |
1974 sub(tmp1, tmp2, tmp1); | |
1975 | |
1976 // Can't use MacroAssembler::argument_address() which needs Gargs to be set up | |
1977 sll(tmp1, Interpreter::logStackElementSize, tmp1); | |
1978 ld_ptr(Lesp, tmp1, tmp1); | |
1979 | |
1980 Address mdo_arg_addr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args); | |
1981 profile_obj_type(tmp1, mdo_arg_addr, tmp2); | |
1982 | |
1983 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); | |
1984 add(ImethodDataPtr, to_add, ImethodDataPtr); | |
1985 off_to_args += to_add; | |
1986 } | |
1987 | |
1988 if (MethodData::profile_return()) { | |
1989 ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, tmp1); | |
1990 sub(tmp1, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count(), tmp1); | |
1991 } | |
1992 | |
1993 bind(done); | |
1994 | |
1995 if (MethodData::profile_return()) { | |
1996 // We're right after the type profile for the last | |
1997 // argument. tmp1 is the number of cells left in the | |
1998 // CallTypeData/VirtualCallTypeData to reach its end. Non null | |
1999 // if there's a return to profile. | |
2000 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); | |
2001 sll(tmp1, exact_log2(DataLayout::cell_size), tmp1); | |
2002 add(ImethodDataPtr, tmp1, ImethodDataPtr); | |
2003 } | |
2004 } else { | |
2005 assert(MethodData::profile_return(), "either profile call args or call ret"); | |
17900
ce9fd31ffd14
8039975: SIGSEGV in MethodData::next_data(ProfileData*)
roland
parents:
17628
diff
changeset
|
2006 update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size())); |
17628 | 2007 } |
2008 | |
2009 // mdp points right after the end of the | |
2010 // CallTypeData/VirtualCallTypeData, right after the cells for the | |
2011 // return value type if there's one. | |
2012 | |
2013 bind(profile_continue); | |
2014 } | |
2015 } | |
2016 | |
2017 void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) { | |
2018 assert_different_registers(ret, tmp1, tmp2); | |
2019 if (ProfileInterpreter && MethodData::profile_return()) { | |
2020 Label profile_continue, done; | |
2021 | |
2022 test_method_data_pointer(profile_continue); | |
2023 | |
2024 if (MethodData::profile_return_jsr292_only()) { | |
2025 // If we don't profile all invoke bytecodes we must make sure | |
2026 // it's a bytecode we indeed profile. We can't go back to the | |
2027 // begining of the ProfileData we intend to update to check its | |
2028 // type because we're right after it and we don't known its | |
2029 // length. | |
2030 Label do_profile; | |
2031 ldub(Lbcp, 0, tmp1); | |
2032 cmp_and_br_short(tmp1, Bytecodes::_invokedynamic, equal, pn, do_profile); | |
2033 cmp(tmp1, Bytecodes::_invokehandle); | |
2034 br(equal, false, pn, do_profile); | |
2035 delayed()->ldub(Lmethod, Method::intrinsic_id_offset_in_bytes(), tmp1); | |
2036 cmp_and_br_short(tmp1, vmIntrinsics::_compiledLambdaForm, notEqual, pt, profile_continue); | |
2037 | |
2038 bind(do_profile); | |
2039 } | |
2040 | |
2041 Address mdo_ret_addr(ImethodDataPtr, -in_bytes(ReturnTypeEntry::size())); | |
2042 mov(ret, tmp1); | |
2043 profile_obj_type(tmp1, mdo_ret_addr, tmp2); | |
2044 | |
2045 bind(profile_continue); | |
2046 } | |
2047 } | |
2048 | |
2049 void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) { | |
2050 if (ProfileInterpreter && MethodData::profile_parameters()) { | |
2051 Label profile_continue, done; | |
2052 | |
2053 test_method_data_pointer(profile_continue); | |
2054 | |
2055 // Load the offset of the area within the MDO used for | |
2056 // parameters. If it's negative we're not profiling any parameters. | |
2057 lduw(ImethodDataPtr, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), tmp1); | |
2058 cmp_and_br_short(tmp1, 0, less, pn, profile_continue); | |
2059 | |
2060 // Compute a pointer to the area for parameters from the offset | |
2061 // and move the pointer to the slot for the last | |
2062 // parameters. Collect profiling from last parameter down. | |
2063 // mdo start + parameters offset + array length - 1 | |
2064 | |
2065 // Pointer to the parameter area in the MDO | |
2066 Register mdp = tmp1; | |
2067 add(ImethodDataPtr, tmp1, mdp); | |
2068 | |
2069 // offset of the current profile entry to update | |
2070 Register entry_offset = tmp2; | |
2071 // entry_offset = array len in number of cells | |
2072 ld_ptr(mdp, ArrayData::array_len_offset(), entry_offset); | |
2073 | |
2074 int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); | |
2075 assert(off_base % DataLayout::cell_size == 0, "should be a number of cells"); | |
2076 | |
2077 // entry_offset (number of cells) = array len - size of 1 entry + offset of the stack slot field | |
2078 sub(entry_offset, TypeStackSlotEntries::per_arg_count() - (off_base / DataLayout::cell_size), entry_offset); | |
2079 // entry_offset in bytes | |
2080 sll(entry_offset, exact_log2(DataLayout::cell_size), entry_offset); | |
2081 | |
2082 Label loop; | |
2083 bind(loop); | |
2084 | |
2085 // load offset on the stack from the slot for this parameter | |
2086 ld_ptr(mdp, entry_offset, tmp3); | |
2087 sll(tmp3,Interpreter::logStackElementSize, tmp3); | |
2088 neg(tmp3); | |
2089 // read the parameter from the local area | |
2090 ld_ptr(Llocals, tmp3, tmp3); | |
2091 | |
2092 // make entry_offset now point to the type field for this parameter | |
2093 int type_base = in_bytes(ParametersTypeData::type_offset(0)); | |
2094 assert(type_base > off_base, "unexpected"); | |
2095 add(entry_offset, type_base - off_base, entry_offset); | |
2096 | |
2097 // profile the parameter | |
2098 Address arg_type(mdp, entry_offset); | |
2099 profile_obj_type(tmp3, arg_type, tmp4); | |
2100 | |
2101 // go to next parameter | |
2102 sub(entry_offset, TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base), entry_offset); | |
2103 cmp_and_br_short(entry_offset, off_base, greaterEqual, pt, loop); | |
2104 | |
2105 bind(profile_continue); | |
2106 } | |
2107 } | |
2108 | |
0 | 2109 // add a InterpMonitorElem to stack (see frame_sparc.hpp) |
2110 | |
2111 void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty, | |
2112 Register Rtemp, | |
2113 Register Rtemp2 ) { | |
2114 | |
2115 Register Rlimit = Lmonitors; | |
2116 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; | |
2117 assert( (delta & LongAlignmentMask) == 0, | |
2118 "sizeof BasicObjectLock must be even number of doublewords"); | |
2119 | |
2120 sub( SP, delta, SP); | |
2121 sub( Lesp, delta, Lesp); | |
2122 sub( Lmonitors, delta, Lmonitors); | |
2123 | |
2124 if (!stack_is_empty) { | |
2125 | |
2126 // must copy stack contents down | |
2127 | |
2128 Label start_copying, next; | |
2129 | |
2130 // untested("monitor stack expansion"); | |
2131 compute_stack_base(Rtemp); | |
3839 | 2132 ba(start_copying); |
2133 delayed()->cmp(Rtemp, Rlimit); // done? duplicated below | |
0 | 2134 |
2135 // note: must copy from low memory upwards | |
2136 // On entry to loop, | |
2137 // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS) | |
2138 // Loop mutates Rtemp | |
2139 | |
2140 bind( next); | |
2141 | |
2142 st_ptr(Rtemp2, Rtemp, 0); | |
2143 inc(Rtemp, wordSize); | |
2144 cmp(Rtemp, Rlimit); // are we done? (duplicated above) | |
2145 | |
2146 bind( start_copying ); | |
2147 | |
2148 brx( notEqual, true, pn, next ); | |
2149 delayed()->ld_ptr( Rtemp, delta, Rtemp2 ); | |
2150 | |
2151 // done copying stack | |
2152 } | |
2153 } | |
2154 | |
2155 // Locals | |
2156 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) { | |
2157 assert_not_delayed(); | |
1506 | 2158 sll(index, Interpreter::logStackElementSize, index); |
0 | 2159 sub(Llocals, index, index); |
1506 | 2160 ld_ptr(index, 0, dst); |
0 | 2161 // Note: index must hold the effective address--the iinc template uses it |
2162 } | |
2163 | |
2164 // Just like access_local_ptr but the tag is a returnAddress | |
2165 void InterpreterMacroAssembler::access_local_returnAddress(Register index, | |
2166 Register dst ) { | |
2167 assert_not_delayed(); | |
1506 | 2168 sll(index, Interpreter::logStackElementSize, index); |
0 | 2169 sub(Llocals, index, index); |
1506 | 2170 ld_ptr(index, 0, dst); |
0 | 2171 } |
2172 | |
2173 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) { | |
2174 assert_not_delayed(); | |
1506 | 2175 sll(index, Interpreter::logStackElementSize, index); |
0 | 2176 sub(Llocals, index, index); |
1506 | 2177 ld(index, 0, dst); |
0 | 2178 // Note: index must hold the effective address--the iinc template uses it |
2179 } | |
2180 | |
2181 | |
2182 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) { | |
2183 assert_not_delayed(); | |
1506 | 2184 sll(index, Interpreter::logStackElementSize, index); |
0 | 2185 sub(Llocals, index, index); |
2186 // First half stored at index n+1 (which grows down from Llocals[n]) | |
2187 load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst); | |
2188 } | |
2189 | |
2190 | |
2191 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) { | |
2192 assert_not_delayed(); | |
1506 | 2193 sll(index, Interpreter::logStackElementSize, index); |
0 | 2194 sub(Llocals, index, index); |
1506 | 2195 ldf(FloatRegisterImpl::S, index, 0, dst); |
0 | 2196 } |
2197 | |
2198 | |
2199 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) { | |
2200 assert_not_delayed(); | |
1506 | 2201 sll(index, Interpreter::logStackElementSize, index); |
0 | 2202 sub(Llocals, index, index); |
2203 load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst); | |
2204 } | |
2205 | |
2206 | |
2207 #ifdef ASSERT | |
2208 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) { | |
2209 Label L; | |
2210 | |
2211 assert(Rindex != Rscratch, "Registers cannot be same"); | |
2212 assert(Rindex != Rscratch1, "Registers cannot be same"); | |
2213 assert(Rlimit != Rscratch, "Registers cannot be same"); | |
2214 assert(Rlimit != Rscratch1, "Registers cannot be same"); | |
2215 assert(Rscratch1 != Rscratch, "Registers cannot be same"); | |
2216 | |
2217 // untested("reg area corruption"); | |
2218 add(Rindex, offset, Rscratch); | |
2219 add(Rlimit, 64 + STACK_BIAS, Rscratch1); | |
3839 | 2220 cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L); |
0 | 2221 stop("regsave area is being clobbered"); |
2222 bind(L); | |
2223 } | |
2224 #endif // ASSERT | |
2225 | |
2226 | |
2227 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) { | |
2228 assert_not_delayed(); | |
1506 | 2229 sll(index, Interpreter::logStackElementSize, index); |
0 | 2230 sub(Llocals, index, index); |
1506 | 2231 debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);) |
2232 st(src, index, 0); | |
0 | 2233 } |
2234 | |
1506 | 2235 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) { |
0 | 2236 assert_not_delayed(); |
1506 | 2237 sll(index, Interpreter::logStackElementSize, index); |
0 | 2238 sub(Llocals, index, index); |
1506 | 2239 #ifdef ASSERT |
2240 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); | |
2241 #endif | |
2242 st_ptr(src, index, 0); | |
0 | 2243 } |
2244 | |
2245 | |
2246 | |
1506 | 2247 void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) { |
2248 st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n)); | |
0 | 2249 } |
2250 | |
2251 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) { | |
2252 assert_not_delayed(); | |
1506 | 2253 sll(index, Interpreter::logStackElementSize, index); |
0 | 2254 sub(Llocals, index, index); |
1506 | 2255 #ifdef ASSERT |
0 | 2256 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); |
1506 | 2257 #endif |
0 | 2258 store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1 |
2259 } | |
2260 | |
2261 | |
2262 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) { | |
2263 assert_not_delayed(); | |
1506 | 2264 sll(index, Interpreter::logStackElementSize, index); |
0 | 2265 sub(Llocals, index, index); |
1506 | 2266 #ifdef ASSERT |
2267 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); | |
2268 #endif | |
2269 stf(FloatRegisterImpl::S, src, index, 0); | |
0 | 2270 } |
2271 | |
2272 | |
2273 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) { | |
2274 assert_not_delayed(); | |
1506 | 2275 sll(index, Interpreter::logStackElementSize, index); |
0 | 2276 sub(Llocals, index, index); |
1506 | 2277 #ifdef ASSERT |
0 | 2278 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); |
1506 | 2279 #endif |
0 | 2280 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1)); |
2281 } | |
2282 | |
2283 | |
2284 int InterpreterMacroAssembler::top_most_monitor_byte_offset() { | |
2285 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; | |
2286 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong); | |
2287 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS; | |
2288 } | |
2289 | |
2290 | |
2291 Address InterpreterMacroAssembler::top_most_monitor() { | |
727 | 2292 return Address(FP, top_most_monitor_byte_offset()); |
0 | 2293 } |
2294 | |
2295 | |
2296 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) { | |
2297 add( Lesp, wordSize, Rdest ); | |
2298 } | |
2299 | |
2300 #endif /* CC_INTERP */ | |
2301 | |
10105
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2302 void InterpreterMacroAssembler::get_method_counters(Register method, |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2303 Register Rcounters, |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2304 Label& skip) { |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2305 Label has_counters; |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2306 Address method_counters(method, in_bytes(Method::method_counters_offset())); |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2307 ld_ptr(method_counters, Rcounters); |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2308 br_notnull_short(Rcounters, Assembler::pt, has_counters); |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2309 call_VM(noreg, CAST_FROM_FN_PTR(address, |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2310 InterpreterRuntime::build_method_counters), method); |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2311 ld_ptr(method_counters, Rcounters); |
10138
1ea6a35dcbe5
8012927: 'assert(nbits == 32 || (-(1 << nbits-1) <= x && x < ( 1 << nbits-1))) failed: value out of range' in interpreter initialization.
jiangli
parents:
10105
diff
changeset
|
2312 br_null(Rcounters, false, Assembler::pn, skip); // No MethodCounters, OutOfMemory |
1ea6a35dcbe5
8012927: 'assert(nbits == 32 || (-(1 << nbits-1) <= x && x < ( 1 << nbits-1))) failed: value out of range' in interpreter initialization.
jiangli
parents:
10105
diff
changeset
|
2313 delayed()->nop(); |
10105
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2314 bind(has_counters); |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2315 } |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2316 |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2317 void InterpreterMacroAssembler::increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) { |
0 | 2318 assert(UseCompiler, "incrementing must be useful"); |
10105
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2319 assert_different_registers(Rcounters, Rtmp, Rtmp2); |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2320 |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2321 Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() + |
727 | 2322 InvocationCounter::counter_offset()); |
10105
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2323 Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() + |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2324 InvocationCounter::counter_offset()); |
0 | 2325 int delta = InvocationCounter::count_increment; |
2326 | |
2327 // Load each counter in a register | |
2328 ld( inv_counter, Rtmp ); | |
2329 ld( be_counter, Rtmp2 ); | |
2330 | |
2331 assert( is_simm13( delta ), " delta too large."); | |
2332 | |
2333 // Add the delta to the invocation counter and store the result | |
2334 add( Rtmp, delta, Rtmp ); | |
2335 | |
2336 // Mask the backedge counter | |
2337 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); | |
2338 | |
2339 // Store value | |
2340 st( Rtmp, inv_counter); | |
2341 | |
2342 // Add invocation counter + backedge counter | |
2343 add( Rtmp, Rtmp2, Rtmp); | |
2344 | |
2345 // Note that this macro must leave the backedge_count + invocation_count in Rtmp! | |
2346 } | |
2347 | |
10105
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2348 void InterpreterMacroAssembler::increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) { |
0 | 2349 assert(UseCompiler, "incrementing must be useful"); |
10105
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2350 assert_different_registers(Rcounters, Rtmp, Rtmp2); |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2351 |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2352 Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() + |
727 | 2353 InvocationCounter::counter_offset()); |
10105
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2354 Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() + |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2355 InvocationCounter::counter_offset()); |
aeaca88565e6
8010862: The Method counter fields used for profiling can be allocated lazily.
jiangli
parents:
7184
diff
changeset
|
2356 |
0 | 2357 int delta = InvocationCounter::count_increment; |
2358 // Load each counter in a register | |
2359 ld( be_counter, Rtmp ); | |
2360 ld( inv_counter, Rtmp2 ); | |
2361 | |
2362 // Add the delta to the backedge counter | |
2363 add( Rtmp, delta, Rtmp ); | |
2364 | |
2365 // Mask the invocation counter, add to backedge counter | |
2366 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); | |
2367 | |
2368 // and store the result to memory | |
2369 st( Rtmp, be_counter ); | |
2370 | |
2371 // Add backedge + invocation counter | |
2372 add( Rtmp, Rtmp2, Rtmp ); | |
2373 | |
2374 // Note that this macro must leave backedge_count + invocation_count in Rtmp! | |
2375 } | |
2376 | |
2377 #ifndef CC_INTERP | |
2378 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count, | |
2379 Register branch_bcp, | |
2380 Register Rtmp ) { | |
2381 Label did_not_overflow; | |
2382 Label overflow_with_error; | |
2383 assert_different_registers(backedge_count, Rtmp, branch_bcp); | |
2384 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); | |
2385 | |
727 | 2386 AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit); |
0 | 2387 load_contents(limit, Rtmp); |
3839 | 2388 cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow); |
0 | 2389 |
2390 // When ProfileInterpreter is on, the backedge_count comes from the | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2391 // MethodData*, which value does not get reset on the call to |
0 | 2392 // frequency_counter_overflow(). To avoid excessive calls to the overflow |
2393 // routine while the method is being compiled, add a second test to make sure | |
2394 // the overflow function is called only once every overflow_frequency. | |
2395 if (ProfileInterpreter) { | |
2396 const int overflow_frequency = 1024; | |
2397 andcc(backedge_count, overflow_frequency-1, Rtmp); | |
2398 brx(Assembler::notZero, false, Assembler::pt, did_not_overflow); | |
2399 delayed()->nop(); | |
2400 } | |
2401 | |
2402 // overflow in loop, pass branch bytecode | |
2403 set(6,Rtmp); | |
2404 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp); | |
2405 | |
2406 // Was an OSR adapter generated? | |
2407 // O0 = osr nmethod | |
3839 | 2408 br_null_short(O0, Assembler::pn, overflow_with_error); |
0 | 2409 |
2410 // Has the nmethod been invalidated already? | |
2411 ld(O0, nmethod::entry_bci_offset(), O2); | |
3839 | 2412 cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, overflow_with_error); |
0 | 2413 |
2414 // migrate the interpreter frame off of the stack | |
2415 | |
2416 mov(G2_thread, L7); | |
2417 // save nmethod | |
2418 mov(O0, L6); | |
2419 set_last_Java_frame(SP, noreg); | |
2420 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); | |
2421 reset_last_Java_frame(); | |
2422 mov(L7, G2_thread); | |
2423 | |
2424 // move OSR nmethod to I1 | |
2425 mov(L6, I1); | |
2426 | |
2427 // OSR buffer to I0 | |
2428 mov(O0, I0); | |
2429 | |
2430 // remove the interpreter frame | |
2431 restore(I5_savedSP, 0, SP); | |
2432 | |
2433 // Jump to the osr code. | |
2434 ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); | |
2435 jmp(O2, G0); | |
2436 delayed()->nop(); | |
2437 | |
2438 bind(overflow_with_error); | |
2439 | |
2440 bind(did_not_overflow); | |
2441 } | |
2442 | |
2443 | |
2444 | |
2445 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) { | |
2446 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); } | |
2447 } | |
2448 | |
2449 | |
2450 // local helper function for the verify_oop_or_return_address macro | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2451 static bool verify_return_address(Method* m, int bci) { |
0 | 2452 #ifndef PRODUCT |
2453 address pc = (address)(m->constMethod()) | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2454 + in_bytes(ConstMethod::codes_offset()) + bci; |
0 | 2455 // assume it is a valid return address if it is inside m and is preceded by a jsr |
2456 if (!m->contains(pc)) return false; | |
2457 address jsr_pc; | |
2458 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr); | |
2459 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true; | |
2460 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w); | |
2461 if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true; | |
2462 #endif // PRODUCT | |
2463 return false; | |
2464 } | |
2465 | |
2466 | |
2467 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) { | |
2468 if (!VerifyOops) return; | |
2469 // the VM documentation for the astore[_wide] bytecode allows | |
2470 // the TOS to be not only an oop but also a return address | |
2471 Label test; | |
2472 Label skip; | |
2473 // See if it is an address (in the current method): | |
2474 | |
2475 mov(reg, Rtmp); | |
2476 const int log2_bytecode_size_limit = 16; | |
2477 srl(Rtmp, log2_bytecode_size_limit, Rtmp); | |
3839 | 2478 br_notnull_short( Rtmp, pt, test ); |
0 | 2479 |
2480 // %%% should use call_VM_leaf here? | |
2481 save_frame_and_mov(0, Lmethod, O0, reg, O1); | |
2482 save_thread(L7_thread_cache); | |
2483 call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none); | |
2484 delayed()->nop(); | |
2485 restore_thread(L7_thread_cache); | |
2486 br_notnull( O0, false, pt, skip ); | |
2487 delayed()->restore(); | |
2488 | |
2489 // Perform a more elaborate out-of-line call | |
2490 // Not an address; verify it: | |
2491 bind(test); | |
2492 verify_oop(reg); | |
2493 bind(skip); | |
2494 } | |
2495 | |
2496 | |
2497 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { | |
2498 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth); | |
2499 } | |
2500 #endif /* CC_INTERP */ | |
2501 | |
2502 // Inline assembly for: | |
2503 // | |
2504 // if (thread is in interp_only_mode) { | |
2505 // InterpreterRuntime::post_method_entry(); | |
2506 // } | |
2507 // if (DTraceMethodProbes) { | |
605 | 2508 // SharedRuntime::dtrace_method_entry(method, receiver); |
0 | 2509 // } |
610
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2510 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2511 // SharedRuntime::rc_trace_method_entry(method, receiver); |
0 | 2512 // } |
2513 | |
2514 void InterpreterMacroAssembler::notify_method_entry() { | |
2515 | |
2516 // C++ interpreter only uses this for native methods. | |
2517 | |
2518 // Whenever JVMTI puts a thread in interp_only_mode, method | |
2519 // entry/exit events are sent for that thread to track stack | |
2520 // depth. If it is possible to enter interp_only_mode we add | |
2521 // the code to check if the event should be sent. | |
2522 if (JvmtiExport::can_post_interpreter_events()) { | |
2523 Label L; | |
2524 Register temp_reg = O5; | |
727 | 2525 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
0 | 2526 ld(interp_only, temp_reg); |
3839 | 2527 cmp_and_br_short(temp_reg, 0, equal, pt, L); |
0 | 2528 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry)); |
2529 bind(L); | |
2530 } | |
2531 | |
2532 { | |
2533 Register temp_reg = O5; | |
2534 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); | |
2535 call_VM_leaf(noreg, | |
2536 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), | |
2537 G2_thread, Lmethod); | |
2538 } | |
610
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2539 |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2540 // RedefineClasses() tracing support for obsolete method entry |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2541 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2542 call_VM_leaf(noreg, |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2543 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2544 G2_thread, Lmethod); |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2545 } |
0 | 2546 } |
2547 | |
2548 | |
2549 // Inline assembly for: | |
2550 // | |
2551 // if (thread is in interp_only_mode) { | |
2552 // // save result | |
2553 // InterpreterRuntime::post_method_exit(); | |
2554 // // restore result | |
2555 // } | |
2556 // if (DTraceMethodProbes) { | |
2557 // SharedRuntime::dtrace_method_exit(thread, method); | |
2558 // } | |
2559 // | |
2560 // Native methods have their result stored in d_tmp and l_tmp | |
2561 // Java methods have their result stored in the expression stack | |
2562 | |
2563 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, | |
2564 TosState state, | |
2565 NotifyMethodExitMode mode) { | |
2566 // C++ interpreter only uses this for native methods. | |
2567 | |
2568 // Whenever JVMTI puts a thread in interp_only_mode, method | |
2569 // entry/exit events are sent for that thread to track stack | |
2570 // depth. If it is possible to enter interp_only_mode we add | |
2571 // the code to check if the event should be sent. | |
2572 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { | |
2573 Label L; | |
2574 Register temp_reg = O5; | |
727 | 2575 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
0 | 2576 ld(interp_only, temp_reg); |
3839 | 2577 cmp_and_br_short(temp_reg, 0, equal, pt, L); |
0 | 2578 |
2579 // Note: frame::interpreter_frame_result has a dependency on how the | |
2580 // method result is saved across the call to post_method_exit. For | |
2581 // native methods it assumes the result registers are saved to | |
2582 // l_scratch and d_scratch. If this changes then the interpreter_frame_result | |
2583 // implementation will need to be updated too. | |
2584 | |
2585 save_return_value(state, is_native_method); | |
2586 call_VM(noreg, | |
2587 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); | |
2588 restore_return_value(state, is_native_method); | |
2589 bind(L); | |
2590 } | |
2591 | |
2592 { | |
2593 Register temp_reg = O5; | |
2594 // Dtrace notification | |
2595 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); | |
2596 save_return_value(state, is_native_method); | |
2597 call_VM_leaf( | |
2598 noreg, | |
2599 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), | |
2600 G2_thread, Lmethod); | |
2601 restore_return_value(state, is_native_method); | |
2602 } | |
2603 } | |
2604 | |
2605 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) { | |
2606 #ifdef CC_INTERP | |
2607 // result potentially in O0/O1: save it across calls | |
2608 stf(FloatRegisterImpl::D, F0, STATE(_native_fresult)); | |
2609 #ifdef _LP64 | |
2610 stx(O0, STATE(_native_lresult)); | |
2611 #else | |
2612 std(O0, STATE(_native_lresult)); | |
2613 #endif | |
2614 #else // CC_INTERP | |
2615 if (is_native_call) { | |
2616 stf(FloatRegisterImpl::D, F0, d_tmp); | |
2617 #ifdef _LP64 | |
2618 stx(O0, l_tmp); | |
2619 #else | |
2620 std(O0, l_tmp); | |
2621 #endif | |
2622 } else { | |
2623 push(state); | |
2624 } | |
2625 #endif // CC_INTERP | |
2626 } | |
2627 | |
2628 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) { | |
2629 #ifdef CC_INTERP | |
2630 ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0); | |
2631 #ifdef _LP64 | |
2632 ldx(STATE(_native_lresult), O0); | |
2633 #else | |
2634 ldd(STATE(_native_lresult), O0); | |
2635 #endif | |
2636 #else // CC_INTERP | |
2637 if (is_native_call) { | |
2638 ldf(FloatRegisterImpl::D, d_tmp, F0); | |
2639 #ifdef _LP64 | |
2640 ldx(l_tmp, O0); | |
2641 #else | |
2642 ldd(l_tmp, O0); | |
2643 #endif | |
2644 } else { | |
2645 pop(state); | |
2646 } | |
2647 #endif // CC_INTERP | |
2648 } | |
1783 | 2649 |
2650 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. | |
2651 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, | |
2652 int increment, int mask, | |
2653 Register scratch1, Register scratch2, | |
2654 Condition cond, Label *where) { | |
2655 ld(counter_addr, scratch1); | |
2656 add(scratch1, increment, scratch1); | |
2657 if (is_simm13(mask)) { | |
2658 andcc(scratch1, mask, G0); | |
2659 } else { | |
2660 set(mask, scratch2); | |
2661 andcc(scratch1, scratch2, G0); | |
2662 } | |
2663 br(cond, false, Assembler::pn, *where); | |
2664 delayed()->st(scratch1, counter_addr); | |
2665 } |