Mercurial > hg > truffle
annotate src/cpu/sparc/vm/interp_masm_sparc.cpp @ 7345:dd903cdfe708
Add possibility for -ict option to SPECjvm2008 benchmark execution.
author | Thomas Wuerthinger <thomas.wuerthinger@oracle.com> |
---|---|
date | Sat, 12 Jan 2013 14:48:44 +0100 |
parents | 5505fbbae3d3 |
children | aeaca88565e6 |
rev | line source |
---|---|
0 | 1 /* |
6123
2fe087c3e814
7172967: Eliminate constMethod's _method backpointer to methodOop.
jiangli
parents:
3852
diff
changeset
|
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1506
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1506
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1506
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "interp_masm_sparc.hpp" | |
27 #include "interpreter/interpreter.hpp" | |
28 #include "interpreter/interpreterRuntime.hpp" | |
29 #include "oops/arrayOop.hpp" | |
30 #include "oops/markOop.hpp" | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
31 #include "oops/methodData.hpp" |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
32 #include "oops/method.hpp" |
1972 | 33 #include "prims/jvmtiExport.hpp" |
34 #include "prims/jvmtiRedefineClassesTrace.hpp" | |
35 #include "prims/jvmtiThreadState.hpp" | |
36 #include "runtime/basicLock.hpp" | |
37 #include "runtime/biasedLocking.hpp" | |
38 #include "runtime/sharedRuntime.hpp" | |
7180
f34d701e952e
8003935: Simplify the needed includes for using Thread::current()
stefank
parents:
6805
diff
changeset
|
39 #include "runtime/thread.inline.hpp" |
0 | 40 |
41 #ifndef CC_INTERP | |
42 #ifndef FAST_DISPATCH | |
43 #define FAST_DISPATCH 1 | |
44 #endif | |
45 #undef FAST_DISPATCH | |
46 | |
47 // Implementation of InterpreterMacroAssembler | |
48 | |
49 // This file specializes the assember with interpreter-specific macros | |
50 | |
727 | 51 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS); |
52 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS); | |
0 | 53 |
54 #else // CC_INTERP | |
55 #ifndef STATE | |
56 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name)) | |
57 #endif // STATE | |
58 | |
59 #endif // CC_INTERP | |
60 | |
61 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) { | |
62 // Note: this algorithm is also used by C1's OSR entry sequence. | |
63 // Any changes should also be applied to CodeEmitter::emit_osr_entry(). | |
64 assert_different_registers(args_size, locals_size); | |
65 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted. | |
66 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words | |
67 // Use br/mov combination because it works on both V8 and V9 and is | |
68 // faster. | |
69 Label skip_move; | |
70 br(Assembler::negative, true, Assembler::pt, skip_move); | |
71 delayed()->mov(G0, delta); | |
72 bind(skip_move); | |
73 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned) | |
74 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes | |
75 } | |
76 | |
77 #ifndef CC_INTERP | |
78 | |
79 // Dispatch code executed in the prolog of a bytecode which does not do it's | |
80 // own dispatch. The dispatch address is computed and placed in IdispatchAddress | |
81 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) { | |
82 assert_not_delayed(); | |
83 #ifdef FAST_DISPATCH | |
84 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since | |
85 // they both use I2. | |
86 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); | |
87 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode | |
88 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); | |
89 // add offset to correct dispatch table | |
90 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize | |
91 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr | |
92 #else | |
727 | 93 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode |
0 | 94 // dispatch table to use |
727 | 95 AddressLiteral tbl(Interpreter::dispatch_table(state)); |
96 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize | |
97 set(tbl, G3_scratch); // compute addr of table | |
98 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr | |
0 | 99 #endif |
100 } | |
101 | |
102 | |
103 // Dispatch code executed in the epilog of a bytecode which does not do it's | |
104 // own dispatch. The dispatch address in IdispatchAddress is used for the | |
105 // dispatch. | |
106 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) { | |
107 assert_not_delayed(); | |
108 verify_FPU(1, state); | |
109 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
110 jmp( IdispatchAddress, 0 ); | |
111 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); | |
112 else delayed()->nop(); | |
113 } | |
114 | |
115 | |
116 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) { | |
117 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) | |
118 assert_not_delayed(); | |
119 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode | |
120 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr); | |
121 } | |
122 | |
123 | |
124 void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) { | |
125 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) | |
126 assert_not_delayed(); | |
127 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode | |
128 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false); | |
129 } | |
130 | |
131 | |
132 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { | |
133 // load current bytecode | |
134 assert_not_delayed(); | |
135 ldub( Lbcp, 0, Lbyte_code); // load next bytecode | |
136 dispatch_base(state, table); | |
137 } | |
138 | |
139 | |
140 void InterpreterMacroAssembler::call_VM_leaf_base( | |
141 Register java_thread, | |
142 address entry_point, | |
143 int number_of_arguments | |
144 ) { | |
145 if (!java_thread->is_valid()) | |
146 java_thread = L7_thread_cache; | |
147 // super call | |
148 MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments); | |
149 } | |
150 | |
151 | |
152 void InterpreterMacroAssembler::call_VM_base( | |
153 Register oop_result, | |
154 Register java_thread, | |
155 Register last_java_sp, | |
156 address entry_point, | |
157 int number_of_arguments, | |
158 bool check_exception | |
159 ) { | |
160 if (!java_thread->is_valid()) | |
161 java_thread = L7_thread_cache; | |
162 // See class ThreadInVMfromInterpreter, which assumes that the interpreter | |
163 // takes responsibility for setting its own thread-state on call-out. | |
164 // However, ThreadInVMfromInterpreter resets the state to "in_Java". | |
165 | |
166 //save_bcp(); // save bcp | |
167 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception); | |
168 //restore_bcp(); // restore bcp | |
169 //restore_locals(); // restore locals pointer | |
170 } | |
171 | |
172 | |
173 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) { | |
174 if (JvmtiExport::can_pop_frame()) { | |
175 Label L; | |
176 | |
177 // Check the "pending popframe condition" flag in the current thread | |
727 | 178 ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg); |
0 | 179 |
180 // Initiate popframe handling only if it is not already being processed. If the flag | |
181 // has the popframe_processing bit set, it means that this code is called *during* popframe | |
182 // handling - we don't want to reenter. | |
183 btst(JavaThread::popframe_pending_bit, scratch_reg); | |
184 br(zero, false, pt, L); | |
185 delayed()->nop(); | |
186 btst(JavaThread::popframe_processing_bit, scratch_reg); | |
187 br(notZero, false, pt, L); | |
188 delayed()->nop(); | |
189 | |
190 // Call Interpreter::remove_activation_preserving_args_entry() to get the | |
191 // address of the same-named entrypoint in the generated interpreter code. | |
192 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); | |
193 | |
194 // Jump to Interpreter::_remove_activation_preserving_args_entry | |
195 jmpl(O0, G0, G0); | |
196 delayed()->nop(); | |
197 bind(L); | |
198 } | |
199 } | |
200 | |
201 | |
202 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { | |
203 Register thr_state = G4_scratch; | |
727 | 204 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); |
205 const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset()); | |
206 const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset()); | |
207 const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset()); | |
0 | 208 switch (state) { |
209 case ltos: ld_long(val_addr, Otos_l); break; | |
210 case atos: ld_ptr(oop_addr, Otos_l); | |
211 st_ptr(G0, oop_addr); break; | |
212 case btos: // fall through | |
213 case ctos: // fall through | |
214 case stos: // fall through | |
215 case itos: ld(val_addr, Otos_l1); break; | |
216 case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break; | |
217 case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break; | |
218 case vtos: /* nothing to do */ break; | |
219 default : ShouldNotReachHere(); | |
220 } | |
221 // Clean up tos value in the jvmti thread state | |
222 or3(G0, ilgl, G3_scratch); | |
223 stw(G3_scratch, tos_addr); | |
224 st_long(G0, val_addr); | |
225 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
226 } | |
227 | |
228 | |
229 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) { | |
230 if (JvmtiExport::can_force_early_return()) { | |
231 Label L; | |
232 Register thr_state = G3_scratch; | |
727 | 233 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); |
3839 | 234 br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; |
0 | 235 |
236 // Initiate earlyret handling only if it is not already being processed. | |
237 // If the flag has the earlyret_processing bit set, it means that this code | |
238 // is called *during* earlyret handling - we don't want to reenter. | |
727 | 239 ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch); |
3839 | 240 cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L); |
0 | 241 |
242 // Call Interpreter::remove_activation_early_entry() to get the address of the | |
243 // same-named entrypoint in the generated interpreter code | |
727 | 244 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1); |
0 | 245 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1); |
246 | |
247 // Jump to Interpreter::_remove_activation_early_entry | |
248 jmpl(O0, G0, G0); | |
249 delayed()->nop(); | |
250 bind(L); | |
251 } | |
252 } | |
253 | |
254 | |
1295 | 255 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { |
0 | 256 mov(arg_1, O0); |
1295 | 257 mov(arg_2, O1); |
258 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2); | |
0 | 259 } |
260 #endif /* CC_INTERP */ | |
261 | |
262 | |
263 #ifndef CC_INTERP | |
264 | |
265 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) { | |
266 assert_not_delayed(); | |
267 dispatch_Lbyte_code(state, table); | |
268 } | |
269 | |
270 | |
271 void InterpreterMacroAssembler::dispatch_normal(TosState state) { | |
272 dispatch_base(state, Interpreter::normal_table(state)); | |
273 } | |
274 | |
275 | |
276 void InterpreterMacroAssembler::dispatch_only(TosState state) { | |
277 dispatch_base(state, Interpreter::dispatch_table(state)); | |
278 } | |
279 | |
280 | |
281 // common code to dispatch and dispatch_only | |
282 // dispatch value in Lbyte_code and increment Lbcp | |
283 | |
284 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) { | |
285 verify_FPU(1, state); | |
286 // %%%%% maybe implement +VerifyActivationFrameSize here | |
287 //verify_thread(); //too slow; we will just verify on method entry & exit | |
288 if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
289 #ifdef FAST_DISPATCH | |
290 if (table == Interpreter::dispatch_table(state)) { | |
291 // use IdispatchTables | |
292 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); | |
293 // add offset to correct dispatch table | |
294 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize | |
295 ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr | |
296 } else { | |
297 #endif | |
298 // dispatch table to use | |
727 | 299 AddressLiteral tbl(table); |
0 | 300 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize |
727 | 301 set(tbl, G3_scratch); // compute addr of table |
0 | 302 ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr |
303 #ifdef FAST_DISPATCH | |
304 } | |
305 #endif | |
306 jmp( G3_scratch, 0 ); | |
307 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); | |
308 else delayed()->nop(); | |
309 } | |
310 | |
311 | |
312 // Helpers for expression stack | |
313 | |
314 // Longs and doubles are Category 2 computational types in the | |
315 // JVM specification (section 3.11.1) and take 2 expression stack or | |
316 // local slots. | |
317 // Aligning them on 32 bit with tagged stacks is hard because the code generated | |
318 // for the dup* bytecodes depends on what types are already on the stack. | |
319 // If the types are split into the two stack/local slots, that is much easier | |
320 // (and we can use 0 for non-reference tags). | |
321 | |
322 // Known good alignment in _LP64 but unknown otherwise | |
323 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) { | |
324 assert_not_delayed(); | |
325 | |
326 #ifdef _LP64 | |
327 ldf(FloatRegisterImpl::D, r1, offset, d); | |
328 #else | |
329 ldf(FloatRegisterImpl::S, r1, offset, d); | |
1506 | 330 ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor()); |
0 | 331 #endif |
332 } | |
333 | |
334 // Known good alignment in _LP64 but unknown otherwise | |
335 void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) { | |
336 assert_not_delayed(); | |
337 | |
338 #ifdef _LP64 | |
339 stf(FloatRegisterImpl::D, d, r1, offset); | |
340 // store something more useful here | |
1506 | 341 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) |
0 | 342 #else |
343 stf(FloatRegisterImpl::S, d, r1, offset); | |
1506 | 344 stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize); |
0 | 345 #endif |
346 } | |
347 | |
348 | |
349 // Known good alignment in _LP64 but unknown otherwise | |
350 void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) { | |
351 assert_not_delayed(); | |
352 #ifdef _LP64 | |
353 ldx(r1, offset, rd); | |
354 #else | |
355 ld(r1, offset, rd); | |
1506 | 356 ld(r1, offset + Interpreter::stackElementSize, rd->successor()); |
0 | 357 #endif |
358 } | |
359 | |
360 // Known good alignment in _LP64 but unknown otherwise | |
361 void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) { | |
362 assert_not_delayed(); | |
363 | |
364 #ifdef _LP64 | |
365 stx(l, r1, offset); | |
366 // store something more useful here | |
1506 | 367 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) |
0 | 368 #else |
369 st(l, r1, offset); | |
1506 | 370 st(l->successor(), r1, offset + Interpreter::stackElementSize); |
0 | 371 #endif |
372 } | |
373 | |
374 void InterpreterMacroAssembler::pop_i(Register r) { | |
375 assert_not_delayed(); | |
376 ld(Lesp, Interpreter::expr_offset_in_bytes(0), r); | |
1506 | 377 inc(Lesp, Interpreter::stackElementSize); |
0 | 378 debug_only(verify_esp(Lesp)); |
379 } | |
380 | |
381 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) { | |
382 assert_not_delayed(); | |
383 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r); | |
1506 | 384 inc(Lesp, Interpreter::stackElementSize); |
0 | 385 debug_only(verify_esp(Lesp)); |
386 } | |
387 | |
388 void InterpreterMacroAssembler::pop_l(Register r) { | |
389 assert_not_delayed(); | |
390 load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r); | |
1506 | 391 inc(Lesp, 2*Interpreter::stackElementSize); |
0 | 392 debug_only(verify_esp(Lesp)); |
393 } | |
394 | |
395 | |
396 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) { | |
397 assert_not_delayed(); | |
398 ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f); | |
1506 | 399 inc(Lesp, Interpreter::stackElementSize); |
0 | 400 debug_only(verify_esp(Lesp)); |
401 } | |
402 | |
403 | |
404 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) { | |
405 assert_not_delayed(); | |
406 load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f); | |
1506 | 407 inc(Lesp, 2*Interpreter::stackElementSize); |
0 | 408 debug_only(verify_esp(Lesp)); |
409 } | |
410 | |
411 | |
412 void InterpreterMacroAssembler::push_i(Register r) { | |
413 assert_not_delayed(); | |
414 debug_only(verify_esp(Lesp)); | |
1506 | 415 st(r, Lesp, 0); |
416 dec(Lesp, Interpreter::stackElementSize); | |
0 | 417 } |
418 | |
419 void InterpreterMacroAssembler::push_ptr(Register r) { | |
420 assert_not_delayed(); | |
1506 | 421 st_ptr(r, Lesp, 0); |
422 dec(Lesp, Interpreter::stackElementSize); | |
0 | 423 } |
424 | |
425 // remember: our convention for longs in SPARC is: | |
426 // O0 (Otos_l1) has high-order part in first word, | |
427 // O1 (Otos_l2) has low-order part in second word | |
428 | |
429 void InterpreterMacroAssembler::push_l(Register r) { | |
430 assert_not_delayed(); | |
431 debug_only(verify_esp(Lesp)); | |
1506 | 432 // Longs are stored in memory-correct order, even if unaligned. |
433 int offset = -Interpreter::stackElementSize; | |
0 | 434 store_unaligned_long(r, Lesp, offset); |
1506 | 435 dec(Lesp, 2 * Interpreter::stackElementSize); |
0 | 436 } |
437 | |
438 | |
439 void InterpreterMacroAssembler::push_f(FloatRegister f) { | |
440 assert_not_delayed(); | |
441 debug_only(verify_esp(Lesp)); | |
1506 | 442 stf(FloatRegisterImpl::S, f, Lesp, 0); |
443 dec(Lesp, Interpreter::stackElementSize); | |
0 | 444 } |
445 | |
446 | |
447 void InterpreterMacroAssembler::push_d(FloatRegister d) { | |
448 assert_not_delayed(); | |
449 debug_only(verify_esp(Lesp)); | |
1506 | 450 // Longs are stored in memory-correct order, even if unaligned. |
451 int offset = -Interpreter::stackElementSize; | |
0 | 452 store_unaligned_double(d, Lesp, offset); |
1506 | 453 dec(Lesp, 2 * Interpreter::stackElementSize); |
0 | 454 } |
455 | |
456 | |
457 void InterpreterMacroAssembler::push(TosState state) { | |
458 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
459 switch (state) { | |
460 case atos: push_ptr(); break; | |
461 case btos: push_i(); break; | |
462 case ctos: | |
463 case stos: push_i(); break; | |
464 case itos: push_i(); break; | |
465 case ltos: push_l(); break; | |
466 case ftos: push_f(); break; | |
467 case dtos: push_d(); break; | |
468 case vtos: /* nothing to do */ break; | |
469 default : ShouldNotReachHere(); | |
470 } | |
471 } | |
472 | |
473 | |
474 void InterpreterMacroAssembler::pop(TosState state) { | |
475 switch (state) { | |
476 case atos: pop_ptr(); break; | |
477 case btos: pop_i(); break; | |
478 case ctos: | |
479 case stos: pop_i(); break; | |
480 case itos: pop_i(); break; | |
481 case ltos: pop_l(); break; | |
482 case ftos: pop_f(); break; | |
483 case dtos: pop_d(); break; | |
484 case vtos: /* nothing to do */ break; | |
485 default : ShouldNotReachHere(); | |
486 } | |
487 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
488 } | |
489 | |
490 | |
1506 | 491 // Helpers for swap and dup |
492 void InterpreterMacroAssembler::load_ptr(int n, Register val) { | |
0 | 493 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val); |
494 } | |
1506 | 495 void InterpreterMacroAssembler::store_ptr(int n, Register val) { |
0 | 496 st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n)); |
497 } | |
498 | |
499 | |
500 void InterpreterMacroAssembler::load_receiver(Register param_count, | |
501 Register recv) { | |
1506 | 502 sll(param_count, Interpreter::logStackElementSize, param_count); |
6266
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6123
diff
changeset
|
503 ld_ptr(Lesp, param_count, recv); // gets receiver oop |
0 | 504 } |
505 | |
506 void InterpreterMacroAssembler::empty_expression_stack() { | |
507 // Reset Lesp. | |
508 sub( Lmonitors, wordSize, Lesp ); | |
509 | |
510 // Reset SP by subtracting more space from Lesp. | |
511 Label done; | |
727 | 512 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!"); |
0 | 513 |
514 // A native does not need to do this, since its callee does not change SP. | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
515 ld(Lmethod, Method::access_flags_offset(), Gframe_size); // Load access flags. |
0 | 516 btst(JVM_ACC_NATIVE, Gframe_size); |
517 br(Assembler::notZero, false, Assembler::pt, done); | |
518 delayed()->nop(); | |
519 | |
520 // Compute max expression stack+register save area | |
7183
b2dbd323c668
8003848: Make ConstMethod::generic_signature_index optional and move Method::_max_stack to ConstMethod.
jiangli
parents:
6805
diff
changeset
|
521 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Gframe_size); |
b2dbd323c668
8003848: Make ConstMethod::generic_signature_index optional and move Method::_max_stack to ConstMethod.
jiangli
parents:
6805
diff
changeset
|
522 lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size); // Load max stack. |
0 | 523 add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size ); |
524 | |
525 // | |
526 // now set up a stack frame with the size computed above | |
527 // | |
528 //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below | |
529 sll( Gframe_size, LogBytesPerWord, Gframe_size ); | |
530 sub( Lesp, Gframe_size, Gframe_size ); | |
531 and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary | |
532 debug_only(verify_sp(Gframe_size, G4_scratch)); | |
533 #ifdef _LP64 | |
534 sub(Gframe_size, STACK_BIAS, Gframe_size ); | |
535 #endif | |
536 mov(Gframe_size, SP); | |
537 | |
538 bind(done); | |
539 } | |
540 | |
541 | |
542 #ifdef ASSERT | |
543 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) { | |
544 Label Bad, OK; | |
545 | |
546 // Saved SP must be aligned. | |
547 #ifdef _LP64 | |
548 btst(2*BytesPerWord-1, Rsp); | |
549 #else | |
550 btst(LongAlignmentMask, Rsp); | |
551 #endif | |
552 br(Assembler::notZero, false, Assembler::pn, Bad); | |
553 delayed()->nop(); | |
554 | |
555 // Saved SP, plus register window size, must not be above FP. | |
556 add(Rsp, frame::register_save_words * wordSize, Rtemp); | |
557 #ifdef _LP64 | |
558 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP | |
559 #endif | |
3839 | 560 cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad); |
0 | 561 |
562 // Saved SP must not be ridiculously below current SP. | |
563 size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K); | |
564 set(maxstack, Rtemp); | |
565 sub(SP, Rtemp, Rtemp); | |
566 #ifdef _LP64 | |
567 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp | |
568 #endif | |
3839 | 569 cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad); |
570 | |
571 ba_short(OK); | |
0 | 572 |
573 bind(Bad); | |
574 stop("on return to interpreted call, restored SP is corrupted"); | |
575 | |
576 bind(OK); | |
577 } | |
578 | |
579 | |
580 void InterpreterMacroAssembler::verify_esp(Register Resp) { | |
581 // about to read or write Resp[0] | |
582 // make sure it is not in the monitors or the register save area | |
583 Label OK1, OK2; | |
584 | |
585 cmp(Resp, Lmonitors); | |
586 brx(Assembler::lessUnsigned, true, Assembler::pt, OK1); | |
587 delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp); | |
588 stop("too many pops: Lesp points into monitor area"); | |
589 bind(OK1); | |
590 #ifdef _LP64 | |
591 sub(Resp, STACK_BIAS, Resp); | |
592 #endif | |
593 cmp(Resp, SP); | |
594 brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2); | |
595 delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp); | |
596 stop("too many pushes: Lesp points into register window"); | |
597 bind(OK2); | |
598 } | |
599 #endif // ASSERT | |
600 | |
601 // Load compiled (i2c) or interpreter entry when calling from interpreted and | |
602 // do the call. Centralized so that all interpreter calls will do the same actions. | |
603 // If jvmti single stepping is on for a thread we must not call compiled code. | |
604 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) { | |
605 | |
606 // Assume we want to go compiled if available | |
607 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
608 ld_ptr(G5_method, in_bytes(Method::from_interpreted_offset()), target); |
0 | 609 |
610 if (JvmtiExport::can_post_interpreter_events()) { | |
611 // JVMTI events, such as single-stepping, are implemented partly by avoiding running | |
612 // compiled code in threads for which the event is enabled. Check here for | |
613 // interp_only_mode if these events CAN be enabled. | |
614 verify_thread(); | |
615 Label skip_compiled_code; | |
616 | |
727 | 617 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
0 | 618 ld(interp_only, scratch); |
3839 | 619 cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
620 delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target); |
0 | 621 bind(skip_compiled_code); |
622 } | |
623 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
624 // the i2c_adapters need Method* in G5_method (right? %%%) |
0 | 625 // do the call |
626 #ifdef ASSERT | |
627 { | |
628 Label ok; | |
3839 | 629 br_notnull_short(target, Assembler::pt, ok); |
0 | 630 stop("null entry point"); |
631 bind(ok); | |
632 } | |
633 #endif // ASSERT | |
634 | |
635 // Adjust Rret first so Llast_SP can be same as Rret | |
636 add(Rret, -frame::pc_return_offset, O7); | |
637 add(Lesp, BytesPerWord, Gargs); // setup parameter pointer | |
638 // Record SP so we can remove any stack space allocated by adapter transition | |
639 jmp(target, 0); | |
640 delayed()->mov(SP, Llast_SP); | |
641 } | |
642 | |
643 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) { | |
644 assert_not_delayed(); | |
645 | |
646 Label not_taken; | |
647 if (ptr_compare) brx(cc, false, Assembler::pn, not_taken); | |
648 else br (cc, false, Assembler::pn, not_taken); | |
649 delayed()->nop(); | |
650 | |
651 TemplateTable::branch(false,false); | |
652 | |
653 bind(not_taken); | |
654 | |
655 profile_not_taken_branch(G3_scratch); | |
656 } | |
657 | |
658 | |
659 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp( | |
660 int bcp_offset, | |
661 Register Rtmp, | |
662 Register Rdst, | |
663 signedOrNot is_signed, | |
664 setCCOrNot should_set_CC ) { | |
665 assert(Rtmp != Rdst, "need separate temp register"); | |
666 assert_not_delayed(); | |
667 switch (is_signed) { | |
668 default: ShouldNotReachHere(); | |
669 | |
670 case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte | |
671 case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte | |
672 } | |
673 ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte | |
674 sll( Rdst, BitsPerByte, Rdst); | |
675 switch (should_set_CC ) { | |
676 default: ShouldNotReachHere(); | |
677 | |
678 case set_CC: orcc( Rdst, Rtmp, Rdst ); break; | |
679 case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break; | |
680 } | |
681 } | |
682 | |
683 | |
684 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp( | |
685 int bcp_offset, | |
686 Register Rtmp, | |
687 Register Rdst, | |
688 setCCOrNot should_set_CC ) { | |
689 assert(Rtmp != Rdst, "need separate temp register"); | |
690 assert_not_delayed(); | |
691 add( Lbcp, bcp_offset, Rtmp); | |
692 andcc( Rtmp, 3, G0); | |
693 Label aligned; | |
694 switch (should_set_CC ) { | |
695 default: ShouldNotReachHere(); | |
696 | |
697 case set_CC: break; | |
698 case dont_set_CC: break; | |
699 } | |
700 | |
701 br(Assembler::zero, true, Assembler::pn, aligned); | |
702 #ifdef _LP64 | |
703 delayed()->ldsw(Rtmp, 0, Rdst); | |
704 #else | |
705 delayed()->ld(Rtmp, 0, Rdst); | |
706 #endif | |
707 | |
708 ldub(Lbcp, bcp_offset + 3, Rdst); | |
709 ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst); | |
710 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst); | |
711 #ifdef _LP64 | |
712 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); | |
713 #else | |
714 // Unsigned load is faster than signed on some implementations | |
715 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); | |
716 #endif | |
717 or3(Rtmp, Rdst, Rdst ); | |
718 | |
719 bind(aligned); | |
720 if (should_set_CC == set_CC) tst(Rdst); | |
721 } | |
722 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
723 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register temp, Register index, |
1565 | 724 int bcp_offset, size_t index_size) { |
1503 | 725 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); |
1565 | 726 if (index_size == sizeof(u2)) { |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
727 get_2_byte_integer_at_bcp(bcp_offset, temp, index, Unsigned); |
1565 | 728 } else if (index_size == sizeof(u4)) { |
2416
38fea01eb669
6817525: turn on method handle functionality by default for JSR 292
twisti
parents:
2118
diff
changeset
|
729 assert(EnableInvokeDynamic, "giant index used only for JSR 292"); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
730 get_4_byte_integer_at_bcp(bcp_offset, temp, index); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
731 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
732 xor3(index, -1, index); // convert to plain index |
1565 | 733 } else if (index_size == sizeof(u1)) { |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
734 ldub(Lbcp, bcp_offset, index); |
1565 | 735 } else { |
736 ShouldNotReachHere(); | |
1503 | 737 } |
738 } | |
739 | |
740 | |
741 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, | |
1565 | 742 int bcp_offset, size_t index_size) { |
0 | 743 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); |
744 assert_different_registers(cache, tmp); | |
745 assert_not_delayed(); | |
1565 | 746 get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size); |
1503 | 747 // convert from field index to ConstantPoolCacheEntry index and from |
748 // word index to byte offset | |
0 | 749 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); |
750 add(LcpoolCache, tmp, cache); | |
751 } | |
752 | |
753 | |
3852
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
754 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, |
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
755 Register temp, |
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
756 Register bytecode, |
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
757 int byte_no, |
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
758 int bcp_offset, |
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
759 size_t index_size) { |
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
760 get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
761 ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode); |
3852
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
762 const int shift_count = (1 + byte_no) * BitsPerByte; |
6266
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6123
diff
changeset
|
763 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || |
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6123
diff
changeset
|
764 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), |
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6123
diff
changeset
|
765 "correct shift count"); |
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6123
diff
changeset
|
766 srl(bytecode, shift_count, bytecode); |
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6123
diff
changeset
|
767 assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); |
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6123
diff
changeset
|
768 and3(bytecode, ConstantPoolCacheEntry::bytecode_1_mask, bytecode); |
3852
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
769 } |
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
770 |
fdb992d83a87
7071653: JSR 292: call site change notification should be pushed not pulled
twisti
parents:
3839
diff
changeset
|
771 |
1503 | 772 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, |
1565 | 773 int bcp_offset, size_t index_size) { |
0 | 774 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); |
775 assert_different_registers(cache, tmp); | |
776 assert_not_delayed(); | |
1565 | 777 if (index_size == sizeof(u2)) { |
778 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); | |
779 } else { | |
780 ShouldNotReachHere(); // other sizes not supported here | |
781 } | |
0 | 782 // convert from field index to ConstantPoolCacheEntry index |
783 // and from word index to byte offset | |
784 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); | |
785 // skip past the header | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
786 add(tmp, in_bytes(ConstantPoolCache::base_offset()), tmp); |
0 | 787 // construct pointer to cache entry |
788 add(LcpoolCache, tmp, cache); | |
789 } | |
790 | |
791 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
792 // Load object from cpool->resolved_references(index) |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
793 void InterpreterMacroAssembler::load_resolved_reference_at_index( |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
794 Register result, Register index) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
795 assert_different_registers(result, index); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
796 assert_not_delayed(); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
797 // convert from field index to resolved_references() index and from |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
798 // word index to byte offset. Since this is a java object, it can be compressed |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
799 Register tmp = index; // reuse |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
800 sll(index, LogBytesPerHeapOop, tmp); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
801 get_constant_pool(result); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
802 // load pointer for resolved_references[] objArray |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
803 ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
804 // JNIHandles::resolve(result) |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
805 ld_ptr(result, 0, result); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
806 // Add in the index |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
807 add(result, tmp, result); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
808 load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
809 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
810 |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
811 |
0 | 812 // Generate a subtype check: branch to ok_is_subtype if sub_klass is |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
813 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2. |
0 | 814 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, |
815 Register Rsuper_klass, | |
816 Register Rtmp1, | |
817 Register Rtmp2, | |
818 Register Rtmp3, | |
819 Label &ok_is_subtype ) { | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
820 Label not_subtype; |
0 | 821 |
822 // Profile the not-null value's klass. | |
823 profile_typecheck(Rsub_klass, Rtmp1); | |
824 | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
825 check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
826 Rtmp1, Rtmp2, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
827 &ok_is_subtype, ¬_subtype, NULL); |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
828 |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
829 check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
830 Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
831 &ok_is_subtype, NULL); |
0 | 832 |
833 bind(not_subtype); | |
834 profile_typecheck_failed(Rtmp1); | |
835 } | |
836 | |
837 // Separate these two to allow for delay slot in middle | |
838 // These are used to do a test and full jump to exception-throwing code. | |
839 | |
840 // %%%%% Could possibly reoptimize this by testing to see if could use | |
841 // a single conditional branch (i.e. if span is small enough. | |
842 // If you go that route, than get rid of the split and give up | |
843 // on the delay-slot hack. | |
844 | |
845 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition, | |
846 Label& ok ) { | |
847 assert_not_delayed(); | |
848 br(ok_condition, true, pt, ok); | |
849 // DELAY SLOT | |
850 } | |
851 | |
852 void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition, | |
853 Label& ok ) { | |
854 assert_not_delayed(); | |
855 bp( ok_condition, true, Assembler::xcc, pt, ok); | |
856 // DELAY SLOT | |
857 } | |
858 | |
859 void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition, | |
860 Label& ok ) { | |
861 assert_not_delayed(); | |
862 brx(ok_condition, true, pt, ok); | |
863 // DELAY SLOT | |
864 } | |
865 | |
866 void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point, | |
867 Register Rscratch, | |
868 Label& ok ) { | |
869 assert(throw_entry_point != NULL, "entry point must be generated by now"); | |
727 | 870 AddressLiteral dest(throw_entry_point); |
871 jump_to(dest, Rscratch); | |
0 | 872 delayed()->nop(); |
873 bind(ok); | |
874 } | |
875 | |
876 | |
877 // And if you cannot use the delay slot, here is a shorthand: | |
878 | |
879 void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition, | |
880 address throw_entry_point, | |
881 Register Rscratch ) { | |
882 Label ok; | |
883 if (ok_condition != never) { | |
884 throw_if_not_1_icc( ok_condition, ok); | |
885 delayed()->nop(); | |
886 } | |
887 throw_if_not_2( throw_entry_point, Rscratch, ok); | |
888 } | |
889 void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition, | |
890 address throw_entry_point, | |
891 Register Rscratch ) { | |
892 Label ok; | |
893 if (ok_condition != never) { | |
894 throw_if_not_1_xcc( ok_condition, ok); | |
895 delayed()->nop(); | |
896 } | |
897 throw_if_not_2( throw_entry_point, Rscratch, ok); | |
898 } | |
899 void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition, | |
900 address throw_entry_point, | |
901 Register Rscratch ) { | |
902 Label ok; | |
903 if (ok_condition != never) { | |
904 throw_if_not_1_x( ok_condition, ok); | |
905 delayed()->nop(); | |
906 } | |
907 throw_if_not_2( throw_entry_point, Rscratch, ok); | |
908 } | |
909 | |
910 // Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res | |
911 // Note: res is still shy of address by array offset into object. | |
912 | |
913 void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) { | |
914 assert_not_delayed(); | |
915 | |
916 verify_oop(array); | |
917 #ifdef _LP64 | |
918 // sign extend since tos (index) can be a 32bit value | |
919 sra(index, G0, index); | |
920 #endif // _LP64 | |
921 | |
922 // check array | |
923 Label ptr_ok; | |
924 tst(array); | |
925 throw_if_not_1_x( notZero, ptr_ok ); | |
926 delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index | |
927 throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok); | |
928 | |
929 Label index_ok; | |
930 cmp(index, tmp); | |
931 throw_if_not_1_icc( lessUnsigned, index_ok ); | |
932 if (index_shift > 0) delayed()->sll(index, index_shift, index); | |
933 else delayed()->add(array, index, res); // addr - const offset in index | |
934 // convention: move aberrant index into G3_scratch for exception message | |
935 mov(index, G3_scratch); | |
936 throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok); | |
937 | |
938 // add offset if didn't do it in delay slot | |
939 if (index_shift > 0) add(array, index, res); // addr - const offset in index | |
940 } | |
941 | |
942 | |
943 void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) { | |
944 assert_not_delayed(); | |
945 | |
946 // pop array | |
947 pop_ptr(array); | |
948 | |
949 // check array | |
950 index_check_without_pop(array, index, index_shift, tmp, res); | |
951 } | |
952 | |
953 | |
6123
2fe087c3e814
7172967: Eliminate constMethod's _method backpointer to methodOop.
jiangli
parents:
3852
diff
changeset
|
954 void InterpreterMacroAssembler::get_const(Register Rdst) { |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
955 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Rdst); |
6123
2fe087c3e814
7172967: Eliminate constMethod's _method backpointer to methodOop.
jiangli
parents:
3852
diff
changeset
|
956 } |
2fe087c3e814
7172967: Eliminate constMethod's _method backpointer to methodOop.
jiangli
parents:
3852
diff
changeset
|
957 |
2fe087c3e814
7172967: Eliminate constMethod's _method backpointer to methodOop.
jiangli
parents:
3852
diff
changeset
|
958 |
0 | 959 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) { |
6123
2fe087c3e814
7172967: Eliminate constMethod's _method backpointer to methodOop.
jiangli
parents:
3852
diff
changeset
|
960 get_const(Rdst); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
961 ld_ptr(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst); |
0 | 962 } |
963 | |
964 | |
965 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) { | |
966 get_constant_pool(Rdst); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
967 ld_ptr(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst); |
0 | 968 } |
969 | |
970 | |
971 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) { | |
972 get_constant_pool(Rcpool); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
973 ld_ptr(Rcpool, ConstantPool::tags_offset_in_bytes(), Rtags); |
0 | 974 } |
975 | |
976 | |
977 // unlock if synchronized method | |
978 // | |
979 // Unlock the receiver if this is a synchronized method. | |
980 // Unlock any Java monitors from syncronized blocks. | |
981 // | |
982 // If there are locked Java monitors | |
983 // If throw_monitor_exception | |
984 // throws IllegalMonitorStateException | |
985 // Else if install_monitor_exception | |
986 // installs IllegalMonitorStateException | |
987 // Else | |
988 // no error processing | |
989 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state, | |
990 bool throw_monitor_exception, | |
991 bool install_monitor_exception) { | |
992 Label unlocked, unlock, no_unlock; | |
993 | |
994 // get the value of _do_not_unlock_if_synchronized into G1_scratch | |
727 | 995 const Address do_not_unlock_if_synchronized(G2_thread, |
996 JavaThread::do_not_unlock_if_synchronized_offset()); | |
0 | 997 ldbool(do_not_unlock_if_synchronized, G1_scratch); |
998 stbool(G0, do_not_unlock_if_synchronized); // reset the flag | |
999 | |
1000 // check if synchronized method | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1001 const Address access_flags(Lmethod, Method::access_flags_offset()); |
0 | 1002 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); |
1003 push(state); // save tos | |
727 | 1004 ld(access_flags, G3_scratch); // Load access flags. |
0 | 1005 btst(JVM_ACC_SYNCHRONIZED, G3_scratch); |
727 | 1006 br(zero, false, pt, unlocked); |
0 | 1007 delayed()->nop(); |
1008 | |
1009 // Don't unlock anything if the _do_not_unlock_if_synchronized flag | |
1010 // is set. | |
3839 | 1011 cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock); |
0 | 1012 delayed()->nop(); |
1013 | |
1014 // BasicObjectLock will be first in list, since this is a synchronized method. However, need | |
1015 // to check that the object has not been unlocked by an explicit monitorexit bytecode. | |
1016 | |
1017 //Intel: if (throw_monitor_exception) ... else ... | |
1018 // Entry already unlocked, need to throw exception | |
1019 //... | |
1020 | |
1021 // pass top-most monitor elem | |
1022 add( top_most_monitor(), O1 ); | |
1023 | |
1024 ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch); | |
3839 | 1025 br_notnull_short(G3_scratch, pt, unlock); |
0 | 1026 |
1027 if (throw_monitor_exception) { | |
1028 // Entry already unlocked need to throw an exception | |
1029 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); | |
1030 should_not_reach_here(); | |
1031 } else { | |
1032 // Monitor already unlocked during a stack unroll. | |
1033 // If requested, install an illegal_monitor_state_exception. | |
1034 // Continue with stack unrolling. | |
1035 if (install_monitor_exception) { | |
1036 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); | |
1037 } | |
3839 | 1038 ba_short(unlocked); |
0 | 1039 } |
1040 | |
1041 bind(unlock); | |
1042 | |
1043 unlock_object(O1); | |
1044 | |
1045 bind(unlocked); | |
1046 | |
1047 // I0, I1: Might contain return value | |
1048 | |
1049 // Check that all monitors are unlocked | |
1050 { Label loop, exception, entry, restart; | |
1051 | |
1052 Register Rmptr = O0; | |
1053 Register Rtemp = O1; | |
1054 Register Rlimit = Lmonitors; | |
1055 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; | |
1056 assert( (delta & LongAlignmentMask) == 0, | |
1057 "sizeof BasicObjectLock must be even number of doublewords"); | |
1058 | |
1059 #ifdef ASSERT | |
1060 add(top_most_monitor(), Rmptr, delta); | |
1061 { Label L; | |
1062 // ensure that Rmptr starts out above (or at) Rlimit | |
3839 | 1063 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); |
0 | 1064 stop("monitor stack has negative size"); |
1065 bind(L); | |
1066 } | |
1067 #endif | |
1068 bind(restart); | |
3839 | 1069 ba(entry); |
0 | 1070 delayed()-> |
1071 add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry | |
1072 | |
1073 // Entry is still locked, need to throw exception | |
1074 bind(exception); | |
1075 if (throw_monitor_exception) { | |
1076 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); | |
1077 should_not_reach_here(); | |
1078 } else { | |
1079 // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception. | |
1080 // Unlock does not block, so don't have to worry about the frame | |
1081 unlock_object(Rmptr); | |
1082 if (install_monitor_exception) { | |
1083 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); | |
1084 } | |
3839 | 1085 ba_short(restart); |
0 | 1086 } |
1087 | |
1088 bind(loop); | |
1089 cmp(Rtemp, G0); // check if current entry is used | |
1090 brx(Assembler::notEqual, false, pn, exception); | |
1091 delayed()-> | |
1092 dec(Rmptr, delta); // otherwise advance to next entry | |
1093 #ifdef ASSERT | |
1094 { Label L; | |
1095 // ensure that Rmptr has not somehow stepped below Rlimit | |
3839 | 1096 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); |
0 | 1097 stop("ran off the end of the monitor stack"); |
1098 bind(L); | |
1099 } | |
1100 #endif | |
1101 bind(entry); | |
1102 cmp(Rmptr, Rlimit); // check if bottom reached | |
1103 brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry | |
1104 delayed()-> | |
1105 ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp); | |
1106 } | |
1107 | |
1108 bind(no_unlock); | |
1109 pop(state); | |
1110 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
1111 } | |
1112 | |
1113 | |
1114 // remove activation | |
1115 // | |
1116 // Unlock the receiver if this is a synchronized method. | |
1117 // Unlock any Java monitors from syncronized blocks. | |
1118 // Remove the activation from the stack. | |
1119 // | |
1120 // If there are locked Java monitors | |
1121 // If throw_monitor_exception | |
1122 // throws IllegalMonitorStateException | |
1123 // Else if install_monitor_exception | |
1124 // installs IllegalMonitorStateException | |
1125 // Else | |
1126 // no error processing | |
1127 void InterpreterMacroAssembler::remove_activation(TosState state, | |
1128 bool throw_monitor_exception, | |
1129 bool install_monitor_exception) { | |
1130 | |
1131 unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception); | |
1132 | |
1133 // save result (push state before jvmti call and pop it afterwards) and notify jvmti | |
1134 notify_method_exit(false, state, NotifyJVMTI); | |
1135 | |
1136 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
1137 verify_thread(); | |
1138 | |
1139 // return tos | |
1140 assert(Otos_l1 == Otos_i, "adjust code below"); | |
1141 switch (state) { | |
1142 #ifdef _LP64 | |
1143 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0 | |
1144 #else | |
1145 case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1 | |
1146 #endif | |
1147 case btos: // fall through | |
1148 case ctos: | |
1149 case stos: // fall through | |
1150 case atos: // fall through | |
1151 case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I0 | |
1152 case ftos: // fall through | |
1153 case dtos: // fall through | |
1154 case vtos: /* nothing to do */ break; | |
1155 default : ShouldNotReachHere(); | |
1156 } | |
1157 | |
1158 #if defined(COMPILER2) && !defined(_LP64) | |
1159 if (state == ltos) { | |
1160 // C2 expects long results in G1 we can't tell if we're returning to interpreted | |
1161 // or compiled so just be safe use G1 and O0/O1 | |
1162 | |
1163 // Shift bits into high (msb) of G1 | |
1164 sllx(Otos_l1->after_save(), 32, G1); | |
1165 // Zero extend low bits | |
1166 srl (Otos_l2->after_save(), 0, Otos_l2->after_save()); | |
1167 or3 (Otos_l2->after_save(), G1, G1); | |
1168 } | |
1169 #endif /* COMPILER2 */ | |
1170 | |
1171 } | |
1172 #endif /* CC_INTERP */ | |
1173 | |
1174 | |
1175 // Lock object | |
1176 // | |
1177 // Argument - lock_reg points to the BasicObjectLock to be used for locking, | |
1178 // it must be initialized with the object to lock | |
1179 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) { | |
1180 if (UseHeavyMonitors) { | |
1181 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); | |
1182 } | |
1183 else { | |
1184 Register obj_reg = Object; | |
1185 Register mark_reg = G4_scratch; | |
1186 Register temp_reg = G1_scratch; | |
727 | 1187 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes()); |
1188 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); | |
0 | 1189 Label done; |
1190 | |
1191 Label slow_case; | |
1192 | |
1193 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg); | |
1194 | |
1195 // load markOop from object into mark_reg | |
1196 ld_ptr(mark_addr, mark_reg); | |
1197 | |
1198 if (UseBiasedLocking) { | |
1199 biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case); | |
1200 } | |
1201 | |
1202 // get the address of basicLock on stack that will be stored in the object | |
1203 // we need a temporary register here as we do not want to clobber lock_reg | |
1204 // (cas clobbers the destination register) | |
1205 mov(lock_reg, temp_reg); | |
1206 // set mark reg to be (markOop of object | UNLOCK_VALUE) | |
1207 or3(mark_reg, markOopDesc::unlocked_value, mark_reg); | |
1208 // initialize the box (Must happen before we update the object mark!) | |
1209 st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes()); | |
1210 // compare and exchange object_addr, markOop | 1, stack address of basicLock | |
1211 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); | |
1212 casx_under_lock(mark_addr.base(), mark_reg, temp_reg, | |
1213 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); | |
1214 | |
1215 // if the compare and exchange succeeded we are done (we saw an unlocked object) | |
3839 | 1216 cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done); |
0 | 1217 |
1218 // We did not see an unlocked object so try the fast recursive case | |
1219 | |
1220 // Check if owner is self by comparing the value in the markOop of object | |
1221 // with the stack pointer | |
1222 sub(temp_reg, SP, temp_reg); | |
1223 #ifdef _LP64 | |
1224 sub(temp_reg, STACK_BIAS, temp_reg); | |
1225 #endif | |
1226 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); | |
1227 | |
1228 // Composite "andcc" test: | |
1229 // (a) %sp -vs- markword proximity check, and, | |
1230 // (b) verify mark word LSBs == 0 (Stack-locked). | |
1231 // | |
1232 // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size()) | |
1233 // Note that the page size used for %sp proximity testing is arbitrary and is | |
1234 // unrelated to the actual MMU page size. We use a 'logical' page size of | |
1235 // 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate | |
1236 // field of the andcc instruction. | |
1237 andcc (temp_reg, 0xFFFFF003, G0) ; | |
1238 | |
1239 // if condition is true we are done and hence we can store 0 in the displaced | |
1240 // header indicating it is a recursive lock and be done | |
1241 brx(Assembler::zero, true, Assembler::pt, done); | |
1242 delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes()); | |
1243 | |
1244 // none of the above fast optimizations worked so we have to get into the | |
1245 // slow case of monitor enter | |
1246 bind(slow_case); | |
1247 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); | |
1248 | |
1249 bind(done); | |
1250 } | |
1251 } | |
1252 | |
1253 // Unlocks an object. Used in monitorexit bytecode and remove_activation. | |
1254 // | |
1255 // Argument - lock_reg points to the BasicObjectLock for lock | |
1256 // Throw IllegalMonitorException if object is not locked by current thread | |
1257 void InterpreterMacroAssembler::unlock_object(Register lock_reg) { | |
1258 if (UseHeavyMonitors) { | |
1259 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); | |
1260 } else { | |
1261 Register obj_reg = G3_scratch; | |
1262 Register mark_reg = G4_scratch; | |
1263 Register displaced_header_reg = G1_scratch; | |
727 | 1264 Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes()); |
1265 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); | |
0 | 1266 Label done; |
1267 | |
1268 if (UseBiasedLocking) { | |
1269 // load the object out of the BasicObjectLock | |
1270 ld_ptr(lockobj_addr, obj_reg); | |
1271 biased_locking_exit(mark_addr, mark_reg, done, true); | |
1272 st_ptr(G0, lockobj_addr); // free entry | |
1273 } | |
1274 | |
1275 // Test first if we are in the fast recursive case | |
727 | 1276 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes()); |
1277 ld_ptr(lock_addr, displaced_header_reg); | |
0 | 1278 br_null(displaced_header_reg, true, Assembler::pn, done); |
1279 delayed()->st_ptr(G0, lockobj_addr); // free entry | |
1280 | |
1281 // See if it is still a light weight lock, if so we just unlock | |
1282 // the object and we are done | |
1283 | |
1284 if (!UseBiasedLocking) { | |
1285 // load the object out of the BasicObjectLock | |
1286 ld_ptr(lockobj_addr, obj_reg); | |
1287 } | |
1288 | |
1289 // we have the displaced header in displaced_header_reg | |
1290 // we expect to see the stack address of the basicLock in case the | |
1291 // lock is still a light weight lock (lock_reg) | |
1292 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); | |
1293 casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg, | |
1294 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); | |
1295 cmp(lock_reg, displaced_header_reg); | |
1296 brx(Assembler::equal, true, Assembler::pn, done); | |
1297 delayed()->st_ptr(G0, lockobj_addr); // free entry | |
1298 | |
1299 // The lock has been converted into a heavy lock and hence | |
1300 // we need to get into the slow case | |
1301 | |
1302 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); | |
1303 | |
1304 bind(done); | |
1305 } | |
1306 } | |
1307 | |
1308 #ifndef CC_INTERP | |
1309 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1310 // Get the method data pointer from the Method* and set the |
0 | 1311 // specified register to its value. |
1312 | |
2118
dd031b2226de
4930919: race condition in MDO creation at back branch locations
iveresov
parents:
1972
diff
changeset
|
1313 void InterpreterMacroAssembler::set_method_data_pointer() { |
0 | 1314 assert(ProfileInterpreter, "must be profiling interpreter"); |
1315 Label get_continue; | |
1316 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1317 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); |
0 | 1318 test_method_data_pointer(get_continue); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1319 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); |
0 | 1320 bind(get_continue); |
1321 } | |
1322 | |
1323 // Set the method data pointer for the current bcp. | |
1324 | |
1325 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { | |
1326 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1327 Label zero_continue; | |
1328 | |
1329 // Test MDO to avoid the call if it is NULL. | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1330 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); |
0 | 1331 test_method_data_pointer(zero_continue); |
1332 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1333 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); |
2118
dd031b2226de
4930919: race condition in MDO creation at back branch locations
iveresov
parents:
1972
diff
changeset
|
1334 add(ImethodDataPtr, O0, ImethodDataPtr); |
0 | 1335 bind(zero_continue); |
1336 } | |
1337 | |
1338 // Test ImethodDataPtr. If it is null, continue at the specified label | |
1339 | |
1340 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) { | |
1341 assert(ProfileInterpreter, "must be profiling interpreter"); | |
3839 | 1342 br_null_short(ImethodDataPtr, Assembler::pn, zero_continue); |
0 | 1343 } |
1344 | |
1345 void InterpreterMacroAssembler::verify_method_data_pointer() { | |
1346 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1347 #ifdef ASSERT | |
1348 Label verify_continue; | |
1349 test_method_data_pointer(verify_continue); | |
1350 | |
1351 // If the mdp is valid, it will point to a DataLayout header which is | |
1352 // consistent with the bcp. The converse is highly probable also. | |
1353 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1354 ld_ptr(Lmethod, Method::const_offset(), O5); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1355 add(G3_scratch, in_bytes(ConstMethod::codes_offset()), G3_scratch); |
0 | 1356 add(G3_scratch, O5, G3_scratch); |
1357 cmp(Lbcp, G3_scratch); | |
1358 brx(Assembler::equal, false, Assembler::pt, verify_continue); | |
1359 | |
1360 Register temp_reg = O5; | |
1361 delayed()->mov(ImethodDataPtr, temp_reg); | |
1362 // %%% should use call_VM_leaf here? | |
1363 //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr); | |
1364 save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1); | |
727 | 1365 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS); |
0 | 1366 stf(FloatRegisterImpl::D, Ftos_d, d_save); |
1367 mov(temp_reg->after_save(), O2); | |
1368 save_thread(L7_thread_cache); | |
1369 call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none); | |
1370 delayed()->nop(); | |
1371 restore_thread(L7_thread_cache); | |
1372 ldf(FloatRegisterImpl::D, d_save, Ftos_d); | |
1373 restore(); | |
1374 bind(verify_continue); | |
1375 #endif // ASSERT | |
1376 } | |
1377 | |
1378 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count, | |
1379 Register Rtmp, | |
1380 Label &profile_continue) { | |
1381 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1382 // Control will flow to "profile_continue" if the counter is less than the | |
1383 // limit or if we call profile_method() | |
1384 | |
1385 Label done; | |
1386 | |
1387 // if no method data exists, and the counter is high enough, make one | |
3839 | 1388 br_notnull_short(ImethodDataPtr, Assembler::pn, done); |
0 | 1389 |
1390 // Test to see if we should create a method data oop | |
727 | 1391 AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit); |
1392 sethi(profile_limit, Rtmp); | |
1393 ld(Rtmp, profile_limit.low10(), Rtmp); | |
6805
69fb89ec6fa7
7198084: NPG: distance is too big for short branches in test_invocation_counter_for_mdp()
kvn
parents:
6725
diff
changeset
|
1394 cmp(invocation_count, Rtmp); |
69fb89ec6fa7
7198084: NPG: distance is too big for short branches in test_invocation_counter_for_mdp()
kvn
parents:
6725
diff
changeset
|
1395 // Use long branches because call_VM() code and following code generated by |
69fb89ec6fa7
7198084: NPG: distance is too big for short branches in test_invocation_counter_for_mdp()
kvn
parents:
6725
diff
changeset
|
1396 // test_backedge_count_for_osr() is large in debug VM. |
69fb89ec6fa7
7198084: NPG: distance is too big for short branches in test_invocation_counter_for_mdp()
kvn
parents:
6725
diff
changeset
|
1397 br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue); |
69fb89ec6fa7
7198084: NPG: distance is too big for short branches in test_invocation_counter_for_mdp()
kvn
parents:
6725
diff
changeset
|
1398 delayed()->nop(); |
0 | 1399 |
1400 // Build it now. | |
2118
dd031b2226de
4930919: race condition in MDO creation at back branch locations
iveresov
parents:
1972
diff
changeset
|
1401 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); |
dd031b2226de
4930919: race condition in MDO creation at back branch locations
iveresov
parents:
1972
diff
changeset
|
1402 set_method_data_pointer_for_bcp(); |
6805
69fb89ec6fa7
7198084: NPG: distance is too big for short branches in test_invocation_counter_for_mdp()
kvn
parents:
6725
diff
changeset
|
1403 ba(profile_continue); |
69fb89ec6fa7
7198084: NPG: distance is too big for short branches in test_invocation_counter_for_mdp()
kvn
parents:
6725
diff
changeset
|
1404 delayed()->nop(); |
0 | 1405 bind(done); |
1406 } | |
1407 | |
1408 // Store a value at some constant offset from the method data pointer. | |
1409 | |
1410 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) { | |
1411 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1412 st_ptr(value, ImethodDataPtr, constant); | |
1413 } | |
1414 | |
1415 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter, | |
1416 Register bumped_count, | |
1417 bool decrement) { | |
1418 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1419 | |
1420 // Load the counter. | |
1421 ld_ptr(counter, bumped_count); | |
1422 | |
1423 if (decrement) { | |
1424 // Decrement the register. Set condition codes. | |
1425 subcc(bumped_count, DataLayout::counter_increment, bumped_count); | |
1426 | |
1427 // If the decrement causes the counter to overflow, stay negative | |
1428 Label L; | |
1429 brx(Assembler::negative, true, Assembler::pn, L); | |
1430 | |
1431 // Store the decremented counter, if it is still negative. | |
1432 delayed()->st_ptr(bumped_count, counter); | |
1433 bind(L); | |
1434 } else { | |
1435 // Increment the register. Set carry flag. | |
1436 addcc(bumped_count, DataLayout::counter_increment, bumped_count); | |
1437 | |
1438 // If the increment causes the counter to overflow, pull back by 1. | |
1439 assert(DataLayout::counter_increment == 1, "subc works"); | |
1440 subc(bumped_count, G0, bumped_count); | |
1441 | |
1442 // Store the incremented counter. | |
1443 st_ptr(bumped_count, counter); | |
1444 } | |
1445 } | |
1446 | |
1447 // Increment the value at some constant offset from the method data pointer. | |
1448 | |
1449 void InterpreterMacroAssembler::increment_mdp_data_at(int constant, | |
1450 Register bumped_count, | |
1451 bool decrement) { | |
1452 // Locate the counter at a fixed offset from the mdp: | |
727 | 1453 Address counter(ImethodDataPtr, constant); |
0 | 1454 increment_mdp_data_at(counter, bumped_count, decrement); |
1455 } | |
1456 | |
1457 // Increment the value at some non-fixed (reg + constant) offset from | |
1458 // the method data pointer. | |
1459 | |
1460 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg, | |
1461 int constant, | |
1462 Register bumped_count, | |
1463 Register scratch2, | |
1464 bool decrement) { | |
1465 // Add the constant to reg to get the offset. | |
1466 add(ImethodDataPtr, reg, scratch2); | |
727 | 1467 Address counter(scratch2, constant); |
0 | 1468 increment_mdp_data_at(counter, bumped_count, decrement); |
1469 } | |
1470 | |
1471 // Set a flag value at the current method data pointer position. | |
1472 // Updates a single byte of the header, to avoid races with other header bits. | |
1473 | |
1474 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant, | |
1475 Register scratch) { | |
1476 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1477 // Load the data header | |
1478 ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch); | |
1479 | |
1480 // Set the flag | |
1481 or3(scratch, flag_constant, scratch); | |
1482 | |
1483 // Store the modified header. | |
1484 stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset())); | |
1485 } | |
1486 | |
1487 // Test the location at some offset from the method data pointer. | |
1488 // If it is not equal to value, branch to the not_equal_continue Label. | |
1489 // Set condition codes to match the nullness of the loaded value. | |
1490 | |
1491 void InterpreterMacroAssembler::test_mdp_data_at(int offset, | |
1492 Register value, | |
1493 Label& not_equal_continue, | |
1494 Register scratch) { | |
1495 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1496 ld_ptr(ImethodDataPtr, offset, scratch); | |
1497 cmp(value, scratch); | |
1498 brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue); | |
1499 delayed()->tst(scratch); | |
1500 } | |
1501 | |
1502 // Update the method data pointer by the displacement located at some fixed | |
1503 // offset from the method data pointer. | |
1504 | |
1505 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp, | |
1506 Register scratch) { | |
1507 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1508 ld_ptr(ImethodDataPtr, offset_of_disp, scratch); | |
1509 add(ImethodDataPtr, scratch, ImethodDataPtr); | |
1510 } | |
1511 | |
1512 // Update the method data pointer by the displacement located at the | |
1513 // offset (reg + offset_of_disp). | |
1514 | |
1515 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg, | |
1516 int offset_of_disp, | |
1517 Register scratch) { | |
1518 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1519 add(reg, offset_of_disp, scratch); | |
1520 ld_ptr(ImethodDataPtr, scratch, scratch); | |
1521 add(ImethodDataPtr, scratch, ImethodDataPtr); | |
1522 } | |
1523 | |
1524 // Update the method data pointer by a simple constant displacement. | |
1525 | |
1526 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) { | |
1527 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1528 add(ImethodDataPtr, constant, ImethodDataPtr); | |
1529 } | |
1530 | |
1531 // Update the method data pointer for a _ret bytecode whose target | |
1532 // was not among our cached targets. | |
1533 | |
1534 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state, | |
1535 Register return_bci) { | |
1536 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1537 push(state); | |
1538 st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile | |
1539 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); | |
1540 ld_ptr(l_tmp, return_bci); | |
1541 pop(state); | |
1542 } | |
1543 | |
1544 // Count a taken branch in the bytecodes. | |
1545 | |
1546 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) { | |
1547 if (ProfileInterpreter) { | |
1548 Label profile_continue; | |
1549 | |
1550 // If no method data exists, go to profile_continue. | |
1551 test_method_data_pointer(profile_continue); | |
1552 | |
1553 // We are taking a branch. Increment the taken count. | |
1554 increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count); | |
1555 | |
1556 // The method data pointer needs to be updated to reflect the new target. | |
1557 update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch); | |
1558 bind (profile_continue); | |
1559 } | |
1560 } | |
1561 | |
1562 | |
1563 // Count a not-taken branch in the bytecodes. | |
1564 | |
1565 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) { | |
1566 if (ProfileInterpreter) { | |
1567 Label profile_continue; | |
1568 | |
1569 // If no method data exists, go to profile_continue. | |
1570 test_method_data_pointer(profile_continue); | |
1571 | |
1572 // We are taking a branch. Increment the not taken count. | |
1573 increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch); | |
1574 | |
1575 // The method data pointer needs to be updated to correspond to the | |
1576 // next bytecode. | |
1577 update_mdp_by_constant(in_bytes(BranchData::branch_data_size())); | |
1578 bind (profile_continue); | |
1579 } | |
1580 } | |
1581 | |
1582 | |
1583 // Count a non-virtual call in the bytecodes. | |
1584 | |
1585 void InterpreterMacroAssembler::profile_call(Register scratch) { | |
1586 if (ProfileInterpreter) { | |
1587 Label profile_continue; | |
1588 | |
1589 // If no method data exists, go to profile_continue. | |
1590 test_method_data_pointer(profile_continue); | |
1591 | |
1592 // We are making a call. Increment the count. | |
1593 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); | |
1594 | |
1595 // The method data pointer needs to be updated to reflect the new target. | |
1596 update_mdp_by_constant(in_bytes(CounterData::counter_data_size())); | |
1597 bind (profile_continue); | |
1598 } | |
1599 } | |
1600 | |
1601 | |
1602 // Count a final call in the bytecodes. | |
1603 | |
1604 void InterpreterMacroAssembler::profile_final_call(Register scratch) { | |
1605 if (ProfileInterpreter) { | |
1606 Label profile_continue; | |
1607 | |
1608 // If no method data exists, go to profile_continue. | |
1609 test_method_data_pointer(profile_continue); | |
1610 | |
1611 // We are making a call. Increment the count. | |
1612 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); | |
1613 | |
1614 // The method data pointer needs to be updated to reflect the new target. | |
1615 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); | |
1616 bind (profile_continue); | |
1617 } | |
1618 } | |
1619 | |
1620 | |
1621 // Count a virtual call in the bytecodes. | |
1622 | |
1623 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, | |
1503 | 1624 Register scratch, |
1625 bool receiver_can_be_null) { | |
0 | 1626 if (ProfileInterpreter) { |
1627 Label profile_continue; | |
1628 | |
1629 // If no method data exists, go to profile_continue. | |
1630 test_method_data_pointer(profile_continue); | |
1631 | |
1503 | 1632 |
1633 Label skip_receiver_profile; | |
1634 if (receiver_can_be_null) { | |
1635 Label not_null; | |
3839 | 1636 br_notnull_short(receiver, Assembler::pt, not_null); |
1503 | 1637 // We are making a call. Increment the count for null receiver. |
1638 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); | |
3839 | 1639 ba_short(skip_receiver_profile); |
1503 | 1640 bind(not_null); |
1641 } | |
1642 | |
0 | 1643 // Record the receiver type. |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1644 record_klass_in_profile(receiver, scratch, true); |
1503 | 1645 bind(skip_receiver_profile); |
0 | 1646 |
1647 // The method data pointer needs to be updated to reflect the new target. | |
1648 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); | |
1649 bind (profile_continue); | |
1650 } | |
1651 } | |
1652 | |
1653 void InterpreterMacroAssembler::record_klass_in_profile_helper( | |
1654 Register receiver, Register scratch, | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1655 int start_row, Label& done, bool is_virtual_call) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1656 if (TypeProfileWidth == 0) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1657 if (is_virtual_call) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1658 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1659 } |
967
6918603297f7
6858208: jvm crash when specifying TypeProfileWidth=0 on jdk 6.0
poonam
parents:
727
diff
changeset
|
1660 return; |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1661 } |
967
6918603297f7
6858208: jvm crash when specifying TypeProfileWidth=0 on jdk 6.0
poonam
parents:
727
diff
changeset
|
1662 |
0 | 1663 int last_row = VirtualCallData::row_limit() - 1; |
1664 assert(start_row <= last_row, "must be work left to do"); | |
1665 // Test this row for both the receiver and for null. | |
1666 // Take any of three different outcomes: | |
1667 // 1. found receiver => increment count and goto done | |
1668 // 2. found null => keep looking for case 1, maybe allocate this cell | |
1669 // 3. found something else => keep looking for cases 1 and 2 | |
1670 // Case 3 is handled by a recursive call. | |
1671 for (int row = start_row; row <= last_row; row++) { | |
1672 Label next_test; | |
1673 bool test_for_null_also = (row == start_row); | |
1674 | |
1675 // See if the receiver is receiver[n]. | |
1676 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row)); | |
1677 test_mdp_data_at(recvr_offset, receiver, next_test, scratch); | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1678 // delayed()->tst(scratch); |
0 | 1679 |
1680 // The receiver is receiver[n]. Increment count[n]. | |
1681 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row)); | |
1682 increment_mdp_data_at(count_offset, scratch); | |
3839 | 1683 ba_short(done); |
0 | 1684 bind(next_test); |
1685 | |
1686 if (test_for_null_also) { | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1687 Label found_null; |
0 | 1688 // Failed the equality check on receiver[n]... Test for null. |
1689 if (start_row == last_row) { | |
1690 // The only thing left to do is handle the null case. | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1691 if (is_virtual_call) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1692 brx(Assembler::zero, false, Assembler::pn, found_null); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1693 delayed()->nop(); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1694 // Receiver did not match any saved receiver and there is no empty row for it. |
1251
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1206
diff
changeset
|
1695 // Increment total counter to indicate polymorphic case. |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1696 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); |
3839 | 1697 ba_short(done); |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1698 bind(found_null); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1699 } else { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1700 brx(Assembler::notZero, false, Assembler::pt, done); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1701 delayed()->nop(); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1702 } |
0 | 1703 break; |
1704 } | |
1705 // Since null is rare, make it be the branch-taken case. | |
1706 brx(Assembler::zero, false, Assembler::pn, found_null); | |
1707 delayed()->nop(); | |
1708 | |
1709 // Put all the "Case 3" tests here. | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1710 record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call); |
0 | 1711 |
1712 // Found a null. Keep searching for a matching receiver, | |
1713 // but remember that this is an empty (unused) slot. | |
1714 bind(found_null); | |
1715 } | |
1716 } | |
1717 | |
1718 // In the fall-through case, we found no matching receiver, but we | |
1719 // observed the receiver[start_row] is NULL. | |
1720 | |
1721 // Fill in the receiver field and increment the count. | |
1722 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); | |
1723 set_mdp_data_at(recvr_offset, receiver); | |
1724 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); | |
1725 mov(DataLayout::counter_increment, scratch); | |
1726 set_mdp_data_at(count_offset, scratch); | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1727 if (start_row > 0) { |
3839 | 1728 ba_short(done); |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1729 } |
0 | 1730 } |
1731 | |
1732 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1733 Register scratch, bool is_virtual_call) { |
0 | 1734 assert(ProfileInterpreter, "must be profiling"); |
1735 Label done; | |
1736 | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1737 record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call); |
0 | 1738 |
1739 bind (done); | |
1740 } | |
1741 | |
1742 | |
1743 // Count a ret in the bytecodes. | |
1744 | |
1745 void InterpreterMacroAssembler::profile_ret(TosState state, | |
1746 Register return_bci, | |
1747 Register scratch) { | |
1748 if (ProfileInterpreter) { | |
1749 Label profile_continue; | |
1750 uint row; | |
1751 | |
1752 // If no method data exists, go to profile_continue. | |
1753 test_method_data_pointer(profile_continue); | |
1754 | |
1755 // Update the total ret count. | |
1756 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); | |
1757 | |
1758 for (row = 0; row < RetData::row_limit(); row++) { | |
1759 Label next_test; | |
1760 | |
1761 // See if return_bci is equal to bci[n]: | |
1762 test_mdp_data_at(in_bytes(RetData::bci_offset(row)), | |
1763 return_bci, next_test, scratch); | |
1764 | |
1765 // return_bci is equal to bci[n]. Increment the count. | |
1766 increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch); | |
1767 | |
1768 // The method data pointer needs to be updated to reflect the new target. | |
1769 update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch); | |
3839 | 1770 ba_short(profile_continue); |
0 | 1771 bind(next_test); |
1772 } | |
1773 | |
1774 update_mdp_for_ret(state, return_bci); | |
1775 | |
1776 bind (profile_continue); | |
1777 } | |
1778 } | |
1779 | |
1780 // Profile an unexpected null in the bytecodes. | |
1781 void InterpreterMacroAssembler::profile_null_seen(Register scratch) { | |
1782 if (ProfileInterpreter) { | |
1783 Label profile_continue; | |
1784 | |
1785 // If no method data exists, go to profile_continue. | |
1786 test_method_data_pointer(profile_continue); | |
1787 | |
1788 set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch); | |
1789 | |
1790 // The method data pointer needs to be updated. | |
1791 int mdp_delta = in_bytes(BitData::bit_data_size()); | |
1792 if (TypeProfileCasts) { | |
1793 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); | |
1794 } | |
1795 update_mdp_by_constant(mdp_delta); | |
1796 | |
1797 bind (profile_continue); | |
1798 } | |
1799 } | |
1800 | |
1801 void InterpreterMacroAssembler::profile_typecheck(Register klass, | |
1802 Register scratch) { | |
1803 if (ProfileInterpreter) { | |
1804 Label profile_continue; | |
1805 | |
1806 // If no method data exists, go to profile_continue. | |
1807 test_method_data_pointer(profile_continue); | |
1808 | |
1809 int mdp_delta = in_bytes(BitData::bit_data_size()); | |
1810 if (TypeProfileCasts) { | |
1811 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); | |
1812 | |
1813 // Record the object type. | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1814 record_klass_in_profile(klass, scratch, false); |
0 | 1815 } |
1816 | |
1817 // The method data pointer needs to be updated. | |
1818 update_mdp_by_constant(mdp_delta); | |
1819 | |
1820 bind (profile_continue); | |
1821 } | |
1822 } | |
1823 | |
1824 void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) { | |
1825 if (ProfileInterpreter && TypeProfileCasts) { | |
1826 Label profile_continue; | |
1827 | |
1828 // If no method data exists, go to profile_continue. | |
1829 test_method_data_pointer(profile_continue); | |
1830 | |
1831 int count_offset = in_bytes(CounterData::count_offset()); | |
1832 // Back up the address, since we have already bumped the mdp. | |
1833 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); | |
1834 | |
1835 // *Decrement* the counter. We expect to see zero or small negatives. | |
1836 increment_mdp_data_at(count_offset, scratch, true); | |
1837 | |
1838 bind (profile_continue); | |
1839 } | |
1840 } | |
1841 | |
1842 // Count the default case of a switch construct. | |
1843 | |
1844 void InterpreterMacroAssembler::profile_switch_default(Register scratch) { | |
1845 if (ProfileInterpreter) { | |
1846 Label profile_continue; | |
1847 | |
1848 // If no method data exists, go to profile_continue. | |
1849 test_method_data_pointer(profile_continue); | |
1850 | |
1851 // Update the default case count | |
1852 increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()), | |
1853 scratch); | |
1854 | |
1855 // The method data pointer needs to be updated. | |
1856 update_mdp_by_offset( | |
1857 in_bytes(MultiBranchData::default_displacement_offset()), | |
1858 scratch); | |
1859 | |
1860 bind (profile_continue); | |
1861 } | |
1862 } | |
1863 | |
1864 // Count the index'th case of a switch construct. | |
1865 | |
1866 void InterpreterMacroAssembler::profile_switch_case(Register index, | |
1867 Register scratch, | |
1868 Register scratch2, | |
1869 Register scratch3) { | |
1870 if (ProfileInterpreter) { | |
1871 Label profile_continue; | |
1872 | |
1873 // If no method data exists, go to profile_continue. | |
1874 test_method_data_pointer(profile_continue); | |
1875 | |
1876 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes() | |
1877 set(in_bytes(MultiBranchData::per_case_size()), scratch); | |
1878 smul(index, scratch, scratch); | |
1879 add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch); | |
1880 | |
1881 // Update the case count | |
1882 increment_mdp_data_at(scratch, | |
1883 in_bytes(MultiBranchData::relative_count_offset()), | |
1884 scratch2, | |
1885 scratch3); | |
1886 | |
1887 // The method data pointer needs to be updated. | |
1888 update_mdp_by_offset(scratch, | |
1889 in_bytes(MultiBranchData::relative_displacement_offset()), | |
1890 scratch2); | |
1891 | |
1892 bind (profile_continue); | |
1893 } | |
1894 } | |
1895 | |
1896 // add a InterpMonitorElem to stack (see frame_sparc.hpp) | |
1897 | |
1898 void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty, | |
1899 Register Rtemp, | |
1900 Register Rtemp2 ) { | |
1901 | |
1902 Register Rlimit = Lmonitors; | |
1903 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; | |
1904 assert( (delta & LongAlignmentMask) == 0, | |
1905 "sizeof BasicObjectLock must be even number of doublewords"); | |
1906 | |
1907 sub( SP, delta, SP); | |
1908 sub( Lesp, delta, Lesp); | |
1909 sub( Lmonitors, delta, Lmonitors); | |
1910 | |
1911 if (!stack_is_empty) { | |
1912 | |
1913 // must copy stack contents down | |
1914 | |
1915 Label start_copying, next; | |
1916 | |
1917 // untested("monitor stack expansion"); | |
1918 compute_stack_base(Rtemp); | |
3839 | 1919 ba(start_copying); |
1920 delayed()->cmp(Rtemp, Rlimit); // done? duplicated below | |
0 | 1921 |
1922 // note: must copy from low memory upwards | |
1923 // On entry to loop, | |
1924 // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS) | |
1925 // Loop mutates Rtemp | |
1926 | |
1927 bind( next); | |
1928 | |
1929 st_ptr(Rtemp2, Rtemp, 0); | |
1930 inc(Rtemp, wordSize); | |
1931 cmp(Rtemp, Rlimit); // are we done? (duplicated above) | |
1932 | |
1933 bind( start_copying ); | |
1934 | |
1935 brx( notEqual, true, pn, next ); | |
1936 delayed()->ld_ptr( Rtemp, delta, Rtemp2 ); | |
1937 | |
1938 // done copying stack | |
1939 } | |
1940 } | |
1941 | |
1942 // Locals | |
1943 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) { | |
1944 assert_not_delayed(); | |
1506 | 1945 sll(index, Interpreter::logStackElementSize, index); |
0 | 1946 sub(Llocals, index, index); |
1506 | 1947 ld_ptr(index, 0, dst); |
0 | 1948 // Note: index must hold the effective address--the iinc template uses it |
1949 } | |
1950 | |
1951 // Just like access_local_ptr but the tag is a returnAddress | |
1952 void InterpreterMacroAssembler::access_local_returnAddress(Register index, | |
1953 Register dst ) { | |
1954 assert_not_delayed(); | |
1506 | 1955 sll(index, Interpreter::logStackElementSize, index); |
0 | 1956 sub(Llocals, index, index); |
1506 | 1957 ld_ptr(index, 0, dst); |
0 | 1958 } |
1959 | |
1960 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) { | |
1961 assert_not_delayed(); | |
1506 | 1962 sll(index, Interpreter::logStackElementSize, index); |
0 | 1963 sub(Llocals, index, index); |
1506 | 1964 ld(index, 0, dst); |
0 | 1965 // Note: index must hold the effective address--the iinc template uses it |
1966 } | |
1967 | |
1968 | |
1969 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) { | |
1970 assert_not_delayed(); | |
1506 | 1971 sll(index, Interpreter::logStackElementSize, index); |
0 | 1972 sub(Llocals, index, index); |
1973 // First half stored at index n+1 (which grows down from Llocals[n]) | |
1974 load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst); | |
1975 } | |
1976 | |
1977 | |
1978 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) { | |
1979 assert_not_delayed(); | |
1506 | 1980 sll(index, Interpreter::logStackElementSize, index); |
0 | 1981 sub(Llocals, index, index); |
1506 | 1982 ldf(FloatRegisterImpl::S, index, 0, dst); |
0 | 1983 } |
1984 | |
1985 | |
1986 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) { | |
1987 assert_not_delayed(); | |
1506 | 1988 sll(index, Interpreter::logStackElementSize, index); |
0 | 1989 sub(Llocals, index, index); |
1990 load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst); | |
1991 } | |
1992 | |
1993 | |
1994 #ifdef ASSERT | |
1995 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) { | |
1996 Label L; | |
1997 | |
1998 assert(Rindex != Rscratch, "Registers cannot be same"); | |
1999 assert(Rindex != Rscratch1, "Registers cannot be same"); | |
2000 assert(Rlimit != Rscratch, "Registers cannot be same"); | |
2001 assert(Rlimit != Rscratch1, "Registers cannot be same"); | |
2002 assert(Rscratch1 != Rscratch, "Registers cannot be same"); | |
2003 | |
2004 // untested("reg area corruption"); | |
2005 add(Rindex, offset, Rscratch); | |
2006 add(Rlimit, 64 + STACK_BIAS, Rscratch1); | |
3839 | 2007 cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L); |
0 | 2008 stop("regsave area is being clobbered"); |
2009 bind(L); | |
2010 } | |
2011 #endif // ASSERT | |
2012 | |
2013 | |
2014 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) { | |
2015 assert_not_delayed(); | |
1506 | 2016 sll(index, Interpreter::logStackElementSize, index); |
0 | 2017 sub(Llocals, index, index); |
1506 | 2018 debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);) |
2019 st(src, index, 0); | |
0 | 2020 } |
2021 | |
1506 | 2022 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) { |
0 | 2023 assert_not_delayed(); |
1506 | 2024 sll(index, Interpreter::logStackElementSize, index); |
0 | 2025 sub(Llocals, index, index); |
1506 | 2026 #ifdef ASSERT |
2027 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); | |
2028 #endif | |
2029 st_ptr(src, index, 0); | |
0 | 2030 } |
2031 | |
2032 | |
2033 | |
1506 | 2034 void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) { |
2035 st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n)); | |
0 | 2036 } |
2037 | |
2038 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) { | |
2039 assert_not_delayed(); | |
1506 | 2040 sll(index, Interpreter::logStackElementSize, index); |
0 | 2041 sub(Llocals, index, index); |
1506 | 2042 #ifdef ASSERT |
0 | 2043 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); |
1506 | 2044 #endif |
0 | 2045 store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1 |
2046 } | |
2047 | |
2048 | |
2049 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) { | |
2050 assert_not_delayed(); | |
1506 | 2051 sll(index, Interpreter::logStackElementSize, index); |
0 | 2052 sub(Llocals, index, index); |
1506 | 2053 #ifdef ASSERT |
2054 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); | |
2055 #endif | |
2056 stf(FloatRegisterImpl::S, src, index, 0); | |
0 | 2057 } |
2058 | |
2059 | |
2060 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) { | |
2061 assert_not_delayed(); | |
1506 | 2062 sll(index, Interpreter::logStackElementSize, index); |
0 | 2063 sub(Llocals, index, index); |
1506 | 2064 #ifdef ASSERT |
0 | 2065 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); |
1506 | 2066 #endif |
0 | 2067 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1)); |
2068 } | |
2069 | |
2070 | |
2071 int InterpreterMacroAssembler::top_most_monitor_byte_offset() { | |
2072 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; | |
2073 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong); | |
2074 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS; | |
2075 } | |
2076 | |
2077 | |
2078 Address InterpreterMacroAssembler::top_most_monitor() { | |
727 | 2079 return Address(FP, top_most_monitor_byte_offset()); |
0 | 2080 } |
2081 | |
2082 | |
2083 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) { | |
2084 add( Lesp, wordSize, Rdest ); | |
2085 } | |
2086 | |
2087 #endif /* CC_INTERP */ | |
2088 | |
2089 void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) { | |
2090 assert(UseCompiler, "incrementing must be useful"); | |
2091 #ifdef CC_INTERP | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2092 Address inv_counter(G5_method, Method::invocation_counter_offset() + |
727 | 2093 InvocationCounter::counter_offset()); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2094 Address be_counter (G5_method, Method::backedge_counter_offset() + |
727 | 2095 InvocationCounter::counter_offset()); |
0 | 2096 #else |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2097 Address inv_counter(Lmethod, Method::invocation_counter_offset() + |
727 | 2098 InvocationCounter::counter_offset()); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2099 Address be_counter (Lmethod, Method::backedge_counter_offset() + |
727 | 2100 InvocationCounter::counter_offset()); |
0 | 2101 #endif /* CC_INTERP */ |
2102 int delta = InvocationCounter::count_increment; | |
2103 | |
2104 // Load each counter in a register | |
2105 ld( inv_counter, Rtmp ); | |
2106 ld( be_counter, Rtmp2 ); | |
2107 | |
2108 assert( is_simm13( delta ), " delta too large."); | |
2109 | |
2110 // Add the delta to the invocation counter and store the result | |
2111 add( Rtmp, delta, Rtmp ); | |
2112 | |
2113 // Mask the backedge counter | |
2114 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); | |
2115 | |
2116 // Store value | |
2117 st( Rtmp, inv_counter); | |
2118 | |
2119 // Add invocation counter + backedge counter | |
2120 add( Rtmp, Rtmp2, Rtmp); | |
2121 | |
2122 // Note that this macro must leave the backedge_count + invocation_count in Rtmp! | |
2123 } | |
2124 | |
2125 void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) { | |
2126 assert(UseCompiler, "incrementing must be useful"); | |
2127 #ifdef CC_INTERP | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2128 Address be_counter (G5_method, Method::backedge_counter_offset() + |
727 | 2129 InvocationCounter::counter_offset()); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2130 Address inv_counter(G5_method, Method::invocation_counter_offset() + |
727 | 2131 InvocationCounter::counter_offset()); |
0 | 2132 #else |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2133 Address be_counter (Lmethod, Method::backedge_counter_offset() + |
727 | 2134 InvocationCounter::counter_offset()); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2135 Address inv_counter(Lmethod, Method::invocation_counter_offset() + |
727 | 2136 InvocationCounter::counter_offset()); |
0 | 2137 #endif /* CC_INTERP */ |
2138 int delta = InvocationCounter::count_increment; | |
2139 // Load each counter in a register | |
2140 ld( be_counter, Rtmp ); | |
2141 ld( inv_counter, Rtmp2 ); | |
2142 | |
2143 // Add the delta to the backedge counter | |
2144 add( Rtmp, delta, Rtmp ); | |
2145 | |
2146 // Mask the invocation counter, add to backedge counter | |
2147 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); | |
2148 | |
2149 // and store the result to memory | |
2150 st( Rtmp, be_counter ); | |
2151 | |
2152 // Add backedge + invocation counter | |
2153 add( Rtmp, Rtmp2, Rtmp ); | |
2154 | |
2155 // Note that this macro must leave backedge_count + invocation_count in Rtmp! | |
2156 } | |
2157 | |
2158 #ifndef CC_INTERP | |
2159 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count, | |
2160 Register branch_bcp, | |
2161 Register Rtmp ) { | |
2162 Label did_not_overflow; | |
2163 Label overflow_with_error; | |
2164 assert_different_registers(backedge_count, Rtmp, branch_bcp); | |
2165 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); | |
2166 | |
727 | 2167 AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit); |
0 | 2168 load_contents(limit, Rtmp); |
3839 | 2169 cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow); |
0 | 2170 |
2171 // When ProfileInterpreter is on, the backedge_count comes from the | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2172 // MethodData*, which value does not get reset on the call to |
0 | 2173 // frequency_counter_overflow(). To avoid excessive calls to the overflow |
2174 // routine while the method is being compiled, add a second test to make sure | |
2175 // the overflow function is called only once every overflow_frequency. | |
2176 if (ProfileInterpreter) { | |
2177 const int overflow_frequency = 1024; | |
2178 andcc(backedge_count, overflow_frequency-1, Rtmp); | |
2179 brx(Assembler::notZero, false, Assembler::pt, did_not_overflow); | |
2180 delayed()->nop(); | |
2181 } | |
2182 | |
2183 // overflow in loop, pass branch bytecode | |
2184 set(6,Rtmp); | |
2185 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp); | |
2186 | |
2187 // Was an OSR adapter generated? | |
2188 // O0 = osr nmethod | |
3839 | 2189 br_null_short(O0, Assembler::pn, overflow_with_error); |
0 | 2190 |
2191 // Has the nmethod been invalidated already? | |
2192 ld(O0, nmethod::entry_bci_offset(), O2); | |
3839 | 2193 cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, overflow_with_error); |
0 | 2194 |
2195 // migrate the interpreter frame off of the stack | |
2196 | |
2197 mov(G2_thread, L7); | |
2198 // save nmethod | |
2199 mov(O0, L6); | |
2200 set_last_Java_frame(SP, noreg); | |
2201 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); | |
2202 reset_last_Java_frame(); | |
2203 mov(L7, G2_thread); | |
2204 | |
2205 // move OSR nmethod to I1 | |
2206 mov(L6, I1); | |
2207 | |
2208 // OSR buffer to I0 | |
2209 mov(O0, I0); | |
2210 | |
2211 // remove the interpreter frame | |
2212 restore(I5_savedSP, 0, SP); | |
2213 | |
2214 // Jump to the osr code. | |
2215 ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); | |
2216 jmp(O2, G0); | |
2217 delayed()->nop(); | |
2218 | |
2219 bind(overflow_with_error); | |
2220 | |
2221 bind(did_not_overflow); | |
2222 } | |
2223 | |
2224 | |
2225 | |
2226 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) { | |
2227 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); } | |
2228 } | |
2229 | |
2230 | |
2231 // local helper function for the verify_oop_or_return_address macro | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2232 static bool verify_return_address(Method* m, int bci) { |
0 | 2233 #ifndef PRODUCT |
2234 address pc = (address)(m->constMethod()) | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2235 + in_bytes(ConstMethod::codes_offset()) + bci; |
0 | 2236 // assume it is a valid return address if it is inside m and is preceded by a jsr |
2237 if (!m->contains(pc)) return false; | |
2238 address jsr_pc; | |
2239 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr); | |
2240 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true; | |
2241 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w); | |
2242 if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true; | |
2243 #endif // PRODUCT | |
2244 return false; | |
2245 } | |
2246 | |
2247 | |
2248 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) { | |
2249 if (!VerifyOops) return; | |
2250 // the VM documentation for the astore[_wide] bytecode allows | |
2251 // the TOS to be not only an oop but also a return address | |
2252 Label test; | |
2253 Label skip; | |
2254 // See if it is an address (in the current method): | |
2255 | |
2256 mov(reg, Rtmp); | |
2257 const int log2_bytecode_size_limit = 16; | |
2258 srl(Rtmp, log2_bytecode_size_limit, Rtmp); | |
3839 | 2259 br_notnull_short( Rtmp, pt, test ); |
0 | 2260 |
2261 // %%% should use call_VM_leaf here? | |
2262 save_frame_and_mov(0, Lmethod, O0, reg, O1); | |
2263 save_thread(L7_thread_cache); | |
2264 call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none); | |
2265 delayed()->nop(); | |
2266 restore_thread(L7_thread_cache); | |
2267 br_notnull( O0, false, pt, skip ); | |
2268 delayed()->restore(); | |
2269 | |
2270 // Perform a more elaborate out-of-line call | |
2271 // Not an address; verify it: | |
2272 bind(test); | |
2273 verify_oop(reg); | |
2274 bind(skip); | |
2275 } | |
2276 | |
2277 | |
2278 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { | |
2279 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth); | |
2280 } | |
2281 #endif /* CC_INTERP */ | |
2282 | |
2283 // Inline assembly for: | |
2284 // | |
2285 // if (thread is in interp_only_mode) { | |
2286 // InterpreterRuntime::post_method_entry(); | |
2287 // } | |
2288 // if (DTraceMethodProbes) { | |
605 | 2289 // SharedRuntime::dtrace_method_entry(method, receiver); |
0 | 2290 // } |
610
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2291 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2292 // SharedRuntime::rc_trace_method_entry(method, receiver); |
0 | 2293 // } |
2294 | |
2295 void InterpreterMacroAssembler::notify_method_entry() { | |
2296 | |
2297 // C++ interpreter only uses this for native methods. | |
2298 | |
2299 // Whenever JVMTI puts a thread in interp_only_mode, method | |
2300 // entry/exit events are sent for that thread to track stack | |
2301 // depth. If it is possible to enter interp_only_mode we add | |
2302 // the code to check if the event should be sent. | |
2303 if (JvmtiExport::can_post_interpreter_events()) { | |
2304 Label L; | |
2305 Register temp_reg = O5; | |
727 | 2306 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
0 | 2307 ld(interp_only, temp_reg); |
3839 | 2308 cmp_and_br_short(temp_reg, 0, equal, pt, L); |
0 | 2309 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry)); |
2310 bind(L); | |
2311 } | |
2312 | |
2313 { | |
2314 Register temp_reg = O5; | |
2315 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); | |
2316 call_VM_leaf(noreg, | |
2317 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), | |
2318 G2_thread, Lmethod); | |
2319 } | |
610
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2320 |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2321 // RedefineClasses() tracing support for obsolete method entry |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2322 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2323 call_VM_leaf(noreg, |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2324 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2325 G2_thread, Lmethod); |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2326 } |
0 | 2327 } |
2328 | |
2329 | |
2330 // Inline assembly for: | |
2331 // | |
2332 // if (thread is in interp_only_mode) { | |
2333 // // save result | |
2334 // InterpreterRuntime::post_method_exit(); | |
2335 // // restore result | |
2336 // } | |
2337 // if (DTraceMethodProbes) { | |
2338 // SharedRuntime::dtrace_method_exit(thread, method); | |
2339 // } | |
2340 // | |
2341 // Native methods have their result stored in d_tmp and l_tmp | |
2342 // Java methods have their result stored in the expression stack | |
2343 | |
2344 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, | |
2345 TosState state, | |
2346 NotifyMethodExitMode mode) { | |
2347 // C++ interpreter only uses this for native methods. | |
2348 | |
2349 // Whenever JVMTI puts a thread in interp_only_mode, method | |
2350 // entry/exit events are sent for that thread to track stack | |
2351 // depth. If it is possible to enter interp_only_mode we add | |
2352 // the code to check if the event should be sent. | |
2353 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { | |
2354 Label L; | |
2355 Register temp_reg = O5; | |
727 | 2356 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
0 | 2357 ld(interp_only, temp_reg); |
3839 | 2358 cmp_and_br_short(temp_reg, 0, equal, pt, L); |
0 | 2359 |
2360 // Note: frame::interpreter_frame_result has a dependency on how the | |
2361 // method result is saved across the call to post_method_exit. For | |
2362 // native methods it assumes the result registers are saved to | |
2363 // l_scratch and d_scratch. If this changes then the interpreter_frame_result | |
2364 // implementation will need to be updated too. | |
2365 | |
2366 save_return_value(state, is_native_method); | |
2367 call_VM(noreg, | |
2368 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); | |
2369 restore_return_value(state, is_native_method); | |
2370 bind(L); | |
2371 } | |
2372 | |
2373 { | |
2374 Register temp_reg = O5; | |
2375 // Dtrace notification | |
2376 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); | |
2377 save_return_value(state, is_native_method); | |
2378 call_VM_leaf( | |
2379 noreg, | |
2380 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), | |
2381 G2_thread, Lmethod); | |
2382 restore_return_value(state, is_native_method); | |
2383 } | |
2384 } | |
2385 | |
2386 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) { | |
2387 #ifdef CC_INTERP | |
2388 // result potentially in O0/O1: save it across calls | |
2389 stf(FloatRegisterImpl::D, F0, STATE(_native_fresult)); | |
2390 #ifdef _LP64 | |
2391 stx(O0, STATE(_native_lresult)); | |
2392 #else | |
2393 std(O0, STATE(_native_lresult)); | |
2394 #endif | |
2395 #else // CC_INTERP | |
2396 if (is_native_call) { | |
2397 stf(FloatRegisterImpl::D, F0, d_tmp); | |
2398 #ifdef _LP64 | |
2399 stx(O0, l_tmp); | |
2400 #else | |
2401 std(O0, l_tmp); | |
2402 #endif | |
2403 } else { | |
2404 push(state); | |
2405 } | |
2406 #endif // CC_INTERP | |
2407 } | |
2408 | |
2409 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) { | |
2410 #ifdef CC_INTERP | |
2411 ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0); | |
2412 #ifdef _LP64 | |
2413 ldx(STATE(_native_lresult), O0); | |
2414 #else | |
2415 ldd(STATE(_native_lresult), O0); | |
2416 #endif | |
2417 #else // CC_INTERP | |
2418 if (is_native_call) { | |
2419 ldf(FloatRegisterImpl::D, d_tmp, F0); | |
2420 #ifdef _LP64 | |
2421 ldx(l_tmp, O0); | |
2422 #else | |
2423 ldd(l_tmp, O0); | |
2424 #endif | |
2425 } else { | |
2426 pop(state); | |
2427 } | |
2428 #endif // CC_INTERP | |
2429 } | |
1783 | 2430 |
2431 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. | |
2432 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, | |
2433 int increment, int mask, | |
2434 Register scratch1, Register scratch2, | |
2435 Condition cond, Label *where) { | |
2436 ld(counter_addr, scratch1); | |
2437 add(scratch1, increment, scratch1); | |
2438 if (is_simm13(mask)) { | |
2439 andcc(scratch1, mask, G0); | |
2440 } else { | |
2441 set(mask, scratch2); | |
2442 andcc(scratch1, scratch2, G0); | |
2443 } | |
2444 br(cond, false, Assembler::pn, *where); | |
2445 delayed()->st(scratch1, counter_addr); | |
2446 } |