Mercurial > hg > graal-jvmci-8
annotate src/cpu/sparc/vm/interp_masm_sparc.cpp @ 1972:f95d63e2154a
6989984: Use standard include model for Hospot
Summary: Replaced MakeDeps and the includeDB files with more standardized solutions.
Reviewed-by: coleenp, kvn, kamg
author | stefank |
---|---|
date | Tue, 23 Nov 2010 13:22:55 -0800 |
parents | d5d065957597 |
children | dd031b2226de |
rev | line source |
---|---|
0 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1506
diff
changeset
|
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1506
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1506
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1506
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "interp_masm_sparc.hpp" | |
27 #include "interpreter/interpreter.hpp" | |
28 #include "interpreter/interpreterRuntime.hpp" | |
29 #include "oops/arrayOop.hpp" | |
30 #include "oops/markOop.hpp" | |
31 #include "oops/methodDataOop.hpp" | |
32 #include "oops/methodOop.hpp" | |
33 #include "prims/jvmtiExport.hpp" | |
34 #include "prims/jvmtiRedefineClassesTrace.hpp" | |
35 #include "prims/jvmtiThreadState.hpp" | |
36 #include "runtime/basicLock.hpp" | |
37 #include "runtime/biasedLocking.hpp" | |
38 #include "runtime/sharedRuntime.hpp" | |
39 #ifdef TARGET_OS_FAMILY_linux | |
40 # include "thread_linux.inline.hpp" | |
41 #endif | |
42 #ifdef TARGET_OS_FAMILY_solaris | |
43 # include "thread_solaris.inline.hpp" | |
44 #endif | |
0 | 45 |
46 #ifndef CC_INTERP | |
47 #ifndef FAST_DISPATCH | |
48 #define FAST_DISPATCH 1 | |
49 #endif | |
50 #undef FAST_DISPATCH | |
51 | |
52 // Implementation of InterpreterMacroAssembler | |
53 | |
54 // This file specializes the assember with interpreter-specific macros | |
55 | |
727 | 56 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS); |
57 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS); | |
0 | 58 |
59 #else // CC_INTERP | |
60 #ifndef STATE | |
61 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name)) | |
62 #endif // STATE | |
63 | |
64 #endif // CC_INTERP | |
65 | |
66 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) { | |
67 // Note: this algorithm is also used by C1's OSR entry sequence. | |
68 // Any changes should also be applied to CodeEmitter::emit_osr_entry(). | |
69 assert_different_registers(args_size, locals_size); | |
70 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted. | |
71 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words | |
72 // Use br/mov combination because it works on both V8 and V9 and is | |
73 // faster. | |
74 Label skip_move; | |
75 br(Assembler::negative, true, Assembler::pt, skip_move); | |
76 delayed()->mov(G0, delta); | |
77 bind(skip_move); | |
78 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned) | |
79 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes | |
80 } | |
81 | |
82 #ifndef CC_INTERP | |
83 | |
84 // Dispatch code executed in the prolog of a bytecode which does not do it's | |
85 // own dispatch. The dispatch address is computed and placed in IdispatchAddress | |
86 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) { | |
87 assert_not_delayed(); | |
88 #ifdef FAST_DISPATCH | |
89 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since | |
90 // they both use I2. | |
91 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); | |
92 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode | |
93 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); | |
94 // add offset to correct dispatch table | |
95 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize | |
96 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr | |
97 #else | |
727 | 98 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode |
0 | 99 // dispatch table to use |
727 | 100 AddressLiteral tbl(Interpreter::dispatch_table(state)); |
101 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize | |
102 set(tbl, G3_scratch); // compute addr of table | |
103 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr | |
0 | 104 #endif |
105 } | |
106 | |
107 | |
108 // Dispatch code executed in the epilog of a bytecode which does not do it's | |
109 // own dispatch. The dispatch address in IdispatchAddress is used for the | |
110 // dispatch. | |
111 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) { | |
112 assert_not_delayed(); | |
113 verify_FPU(1, state); | |
114 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
115 jmp( IdispatchAddress, 0 ); | |
116 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); | |
117 else delayed()->nop(); | |
118 } | |
119 | |
120 | |
121 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) { | |
122 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) | |
123 assert_not_delayed(); | |
124 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode | |
125 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr); | |
126 } | |
127 | |
128 | |
129 void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) { | |
130 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) | |
131 assert_not_delayed(); | |
132 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode | |
133 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false); | |
134 } | |
135 | |
136 | |
137 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { | |
138 // load current bytecode | |
139 assert_not_delayed(); | |
140 ldub( Lbcp, 0, Lbyte_code); // load next bytecode | |
141 dispatch_base(state, table); | |
142 } | |
143 | |
144 | |
145 void InterpreterMacroAssembler::call_VM_leaf_base( | |
146 Register java_thread, | |
147 address entry_point, | |
148 int number_of_arguments | |
149 ) { | |
150 if (!java_thread->is_valid()) | |
151 java_thread = L7_thread_cache; | |
152 // super call | |
153 MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments); | |
154 } | |
155 | |
156 | |
157 void InterpreterMacroAssembler::call_VM_base( | |
158 Register oop_result, | |
159 Register java_thread, | |
160 Register last_java_sp, | |
161 address entry_point, | |
162 int number_of_arguments, | |
163 bool check_exception | |
164 ) { | |
165 if (!java_thread->is_valid()) | |
166 java_thread = L7_thread_cache; | |
167 // See class ThreadInVMfromInterpreter, which assumes that the interpreter | |
168 // takes responsibility for setting its own thread-state on call-out. | |
169 // However, ThreadInVMfromInterpreter resets the state to "in_Java". | |
170 | |
171 //save_bcp(); // save bcp | |
172 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception); | |
173 //restore_bcp(); // restore bcp | |
174 //restore_locals(); // restore locals pointer | |
175 } | |
176 | |
177 | |
178 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) { | |
179 if (JvmtiExport::can_pop_frame()) { | |
180 Label L; | |
181 | |
182 // Check the "pending popframe condition" flag in the current thread | |
727 | 183 ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg); |
0 | 184 |
185 // Initiate popframe handling only if it is not already being processed. If the flag | |
186 // has the popframe_processing bit set, it means that this code is called *during* popframe | |
187 // handling - we don't want to reenter. | |
188 btst(JavaThread::popframe_pending_bit, scratch_reg); | |
189 br(zero, false, pt, L); | |
190 delayed()->nop(); | |
191 btst(JavaThread::popframe_processing_bit, scratch_reg); | |
192 br(notZero, false, pt, L); | |
193 delayed()->nop(); | |
194 | |
195 // Call Interpreter::remove_activation_preserving_args_entry() to get the | |
196 // address of the same-named entrypoint in the generated interpreter code. | |
197 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); | |
198 | |
199 // Jump to Interpreter::_remove_activation_preserving_args_entry | |
200 jmpl(O0, G0, G0); | |
201 delayed()->nop(); | |
202 bind(L); | |
203 } | |
204 } | |
205 | |
206 | |
207 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { | |
208 Register thr_state = G4_scratch; | |
727 | 209 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); |
210 const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset()); | |
211 const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset()); | |
212 const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset()); | |
0 | 213 switch (state) { |
214 case ltos: ld_long(val_addr, Otos_l); break; | |
215 case atos: ld_ptr(oop_addr, Otos_l); | |
216 st_ptr(G0, oop_addr); break; | |
217 case btos: // fall through | |
218 case ctos: // fall through | |
219 case stos: // fall through | |
220 case itos: ld(val_addr, Otos_l1); break; | |
221 case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break; | |
222 case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break; | |
223 case vtos: /* nothing to do */ break; | |
224 default : ShouldNotReachHere(); | |
225 } | |
226 // Clean up tos value in the jvmti thread state | |
227 or3(G0, ilgl, G3_scratch); | |
228 stw(G3_scratch, tos_addr); | |
229 st_long(G0, val_addr); | |
230 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
231 } | |
232 | |
233 | |
234 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) { | |
235 if (JvmtiExport::can_force_early_return()) { | |
236 Label L; | |
237 Register thr_state = G3_scratch; | |
727 | 238 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); |
0 | 239 tst(thr_state); |
240 br(zero, false, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; | |
241 delayed()->nop(); | |
242 | |
243 // Initiate earlyret handling only if it is not already being processed. | |
244 // If the flag has the earlyret_processing bit set, it means that this code | |
245 // is called *during* earlyret handling - we don't want to reenter. | |
727 | 246 ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch); |
0 | 247 cmp(G4_scratch, JvmtiThreadState::earlyret_pending); |
248 br(Assembler::notEqual, false, pt, L); | |
249 delayed()->nop(); | |
250 | |
251 // Call Interpreter::remove_activation_early_entry() to get the address of the | |
252 // same-named entrypoint in the generated interpreter code | |
727 | 253 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1); |
0 | 254 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1); |
255 | |
256 // Jump to Interpreter::_remove_activation_early_entry | |
257 jmpl(O0, G0, G0); | |
258 delayed()->nop(); | |
259 bind(L); | |
260 } | |
261 } | |
262 | |
263 | |
1295 | 264 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { |
0 | 265 mov(arg_1, O0); |
1295 | 266 mov(arg_2, O1); |
267 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2); | |
0 | 268 } |
269 #endif /* CC_INTERP */ | |
270 | |
271 | |
272 #ifndef CC_INTERP | |
273 | |
274 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) { | |
275 assert_not_delayed(); | |
276 dispatch_Lbyte_code(state, table); | |
277 } | |
278 | |
279 | |
280 void InterpreterMacroAssembler::dispatch_normal(TosState state) { | |
281 dispatch_base(state, Interpreter::normal_table(state)); | |
282 } | |
283 | |
284 | |
285 void InterpreterMacroAssembler::dispatch_only(TosState state) { | |
286 dispatch_base(state, Interpreter::dispatch_table(state)); | |
287 } | |
288 | |
289 | |
290 // common code to dispatch and dispatch_only | |
291 // dispatch value in Lbyte_code and increment Lbcp | |
292 | |
293 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) { | |
294 verify_FPU(1, state); | |
295 // %%%%% maybe implement +VerifyActivationFrameSize here | |
296 //verify_thread(); //too slow; we will just verify on method entry & exit | |
297 if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
298 #ifdef FAST_DISPATCH | |
299 if (table == Interpreter::dispatch_table(state)) { | |
300 // use IdispatchTables | |
301 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); | |
302 // add offset to correct dispatch table | |
303 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize | |
304 ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr | |
305 } else { | |
306 #endif | |
307 // dispatch table to use | |
727 | 308 AddressLiteral tbl(table); |
0 | 309 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize |
727 | 310 set(tbl, G3_scratch); // compute addr of table |
0 | 311 ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr |
312 #ifdef FAST_DISPATCH | |
313 } | |
314 #endif | |
315 jmp( G3_scratch, 0 ); | |
316 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); | |
317 else delayed()->nop(); | |
318 } | |
319 | |
320 | |
321 // Helpers for expression stack | |
322 | |
323 // Longs and doubles are Category 2 computational types in the | |
324 // JVM specification (section 3.11.1) and take 2 expression stack or | |
325 // local slots. | |
326 // Aligning them on 32 bit with tagged stacks is hard because the code generated | |
327 // for the dup* bytecodes depends on what types are already on the stack. | |
328 // If the types are split into the two stack/local slots, that is much easier | |
329 // (and we can use 0 for non-reference tags). | |
330 | |
331 // Known good alignment in _LP64 but unknown otherwise | |
332 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) { | |
333 assert_not_delayed(); | |
334 | |
335 #ifdef _LP64 | |
336 ldf(FloatRegisterImpl::D, r1, offset, d); | |
337 #else | |
338 ldf(FloatRegisterImpl::S, r1, offset, d); | |
1506 | 339 ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor()); |
0 | 340 #endif |
341 } | |
342 | |
343 // Known good alignment in _LP64 but unknown otherwise | |
344 void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) { | |
345 assert_not_delayed(); | |
346 | |
347 #ifdef _LP64 | |
348 stf(FloatRegisterImpl::D, d, r1, offset); | |
349 // store something more useful here | |
1506 | 350 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) |
0 | 351 #else |
352 stf(FloatRegisterImpl::S, d, r1, offset); | |
1506 | 353 stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize); |
0 | 354 #endif |
355 } | |
356 | |
357 | |
358 // Known good alignment in _LP64 but unknown otherwise | |
359 void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) { | |
360 assert_not_delayed(); | |
361 #ifdef _LP64 | |
362 ldx(r1, offset, rd); | |
363 #else | |
364 ld(r1, offset, rd); | |
1506 | 365 ld(r1, offset + Interpreter::stackElementSize, rd->successor()); |
0 | 366 #endif |
367 } | |
368 | |
369 // Known good alignment in _LP64 but unknown otherwise | |
370 void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) { | |
371 assert_not_delayed(); | |
372 | |
373 #ifdef _LP64 | |
374 stx(l, r1, offset); | |
375 // store something more useful here | |
1506 | 376 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) |
0 | 377 #else |
378 st(l, r1, offset); | |
1506 | 379 st(l->successor(), r1, offset + Interpreter::stackElementSize); |
0 | 380 #endif |
381 } | |
382 | |
383 void InterpreterMacroAssembler::pop_i(Register r) { | |
384 assert_not_delayed(); | |
385 ld(Lesp, Interpreter::expr_offset_in_bytes(0), r); | |
1506 | 386 inc(Lesp, Interpreter::stackElementSize); |
0 | 387 debug_only(verify_esp(Lesp)); |
388 } | |
389 | |
390 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) { | |
391 assert_not_delayed(); | |
392 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r); | |
1506 | 393 inc(Lesp, Interpreter::stackElementSize); |
0 | 394 debug_only(verify_esp(Lesp)); |
395 } | |
396 | |
397 void InterpreterMacroAssembler::pop_l(Register r) { | |
398 assert_not_delayed(); | |
399 load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r); | |
1506 | 400 inc(Lesp, 2*Interpreter::stackElementSize); |
0 | 401 debug_only(verify_esp(Lesp)); |
402 } | |
403 | |
404 | |
405 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) { | |
406 assert_not_delayed(); | |
407 ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f); | |
1506 | 408 inc(Lesp, Interpreter::stackElementSize); |
0 | 409 debug_only(verify_esp(Lesp)); |
410 } | |
411 | |
412 | |
413 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) { | |
414 assert_not_delayed(); | |
415 load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f); | |
1506 | 416 inc(Lesp, 2*Interpreter::stackElementSize); |
0 | 417 debug_only(verify_esp(Lesp)); |
418 } | |
419 | |
420 | |
421 void InterpreterMacroAssembler::push_i(Register r) { | |
422 assert_not_delayed(); | |
423 debug_only(verify_esp(Lesp)); | |
1506 | 424 st(r, Lesp, 0); |
425 dec(Lesp, Interpreter::stackElementSize); | |
0 | 426 } |
427 | |
428 void InterpreterMacroAssembler::push_ptr(Register r) { | |
429 assert_not_delayed(); | |
1506 | 430 st_ptr(r, Lesp, 0); |
431 dec(Lesp, Interpreter::stackElementSize); | |
0 | 432 } |
433 | |
434 // remember: our convention for longs in SPARC is: | |
435 // O0 (Otos_l1) has high-order part in first word, | |
436 // O1 (Otos_l2) has low-order part in second word | |
437 | |
438 void InterpreterMacroAssembler::push_l(Register r) { | |
439 assert_not_delayed(); | |
440 debug_only(verify_esp(Lesp)); | |
1506 | 441 // Longs are stored in memory-correct order, even if unaligned. |
442 int offset = -Interpreter::stackElementSize; | |
0 | 443 store_unaligned_long(r, Lesp, offset); |
1506 | 444 dec(Lesp, 2 * Interpreter::stackElementSize); |
0 | 445 } |
446 | |
447 | |
448 void InterpreterMacroAssembler::push_f(FloatRegister f) { | |
449 assert_not_delayed(); | |
450 debug_only(verify_esp(Lesp)); | |
1506 | 451 stf(FloatRegisterImpl::S, f, Lesp, 0); |
452 dec(Lesp, Interpreter::stackElementSize); | |
0 | 453 } |
454 | |
455 | |
456 void InterpreterMacroAssembler::push_d(FloatRegister d) { | |
457 assert_not_delayed(); | |
458 debug_only(verify_esp(Lesp)); | |
1506 | 459 // Longs are stored in memory-correct order, even if unaligned. |
460 int offset = -Interpreter::stackElementSize; | |
0 | 461 store_unaligned_double(d, Lesp, offset); |
1506 | 462 dec(Lesp, 2 * Interpreter::stackElementSize); |
0 | 463 } |
464 | |
465 | |
466 void InterpreterMacroAssembler::push(TosState state) { | |
467 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
468 switch (state) { | |
469 case atos: push_ptr(); break; | |
470 case btos: push_i(); break; | |
471 case ctos: | |
472 case stos: push_i(); break; | |
473 case itos: push_i(); break; | |
474 case ltos: push_l(); break; | |
475 case ftos: push_f(); break; | |
476 case dtos: push_d(); break; | |
477 case vtos: /* nothing to do */ break; | |
478 default : ShouldNotReachHere(); | |
479 } | |
480 } | |
481 | |
482 | |
483 void InterpreterMacroAssembler::pop(TosState state) { | |
484 switch (state) { | |
485 case atos: pop_ptr(); break; | |
486 case btos: pop_i(); break; | |
487 case ctos: | |
488 case stos: pop_i(); break; | |
489 case itos: pop_i(); break; | |
490 case ltos: pop_l(); break; | |
491 case ftos: pop_f(); break; | |
492 case dtos: pop_d(); break; | |
493 case vtos: /* nothing to do */ break; | |
494 default : ShouldNotReachHere(); | |
495 } | |
496 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
497 } | |
498 | |
499 | |
1506 | 500 // Helpers for swap and dup |
501 void InterpreterMacroAssembler::load_ptr(int n, Register val) { | |
0 | 502 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val); |
503 } | |
1506 | 504 void InterpreterMacroAssembler::store_ptr(int n, Register val) { |
0 | 505 st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n)); |
506 } | |
507 | |
508 | |
509 void InterpreterMacroAssembler::load_receiver(Register param_count, | |
510 Register recv) { | |
1506 | 511 sll(param_count, Interpreter::logStackElementSize, param_count); |
0 | 512 ld_ptr(Lesp, param_count, recv); // gets receiver Oop |
513 } | |
514 | |
515 void InterpreterMacroAssembler::empty_expression_stack() { | |
516 // Reset Lesp. | |
517 sub( Lmonitors, wordSize, Lesp ); | |
518 | |
519 // Reset SP by subtracting more space from Lesp. | |
520 Label done; | |
521 verify_oop(Lmethod); | |
727 | 522 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!"); |
0 | 523 |
524 // A native does not need to do this, since its callee does not change SP. | |
727 | 525 ld(Lmethod, methodOopDesc::access_flags_offset(), Gframe_size); // Load access flags. |
0 | 526 btst(JVM_ACC_NATIVE, Gframe_size); |
527 br(Assembler::notZero, false, Assembler::pt, done); | |
528 delayed()->nop(); | |
529 | |
530 // Compute max expression stack+register save area | |
727 | 531 lduh(Lmethod, in_bytes(methodOopDesc::max_stack_offset()), Gframe_size); // Load max stack. |
0 | 532 add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size ); |
533 | |
534 // | |
535 // now set up a stack frame with the size computed above | |
536 // | |
537 //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below | |
538 sll( Gframe_size, LogBytesPerWord, Gframe_size ); | |
539 sub( Lesp, Gframe_size, Gframe_size ); | |
540 and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary | |
541 debug_only(verify_sp(Gframe_size, G4_scratch)); | |
542 #ifdef _LP64 | |
543 sub(Gframe_size, STACK_BIAS, Gframe_size ); | |
544 #endif | |
545 mov(Gframe_size, SP); | |
546 | |
547 bind(done); | |
548 } | |
549 | |
550 | |
551 #ifdef ASSERT | |
552 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) { | |
553 Label Bad, OK; | |
554 | |
555 // Saved SP must be aligned. | |
556 #ifdef _LP64 | |
557 btst(2*BytesPerWord-1, Rsp); | |
558 #else | |
559 btst(LongAlignmentMask, Rsp); | |
560 #endif | |
561 br(Assembler::notZero, false, Assembler::pn, Bad); | |
562 delayed()->nop(); | |
563 | |
564 // Saved SP, plus register window size, must not be above FP. | |
565 add(Rsp, frame::register_save_words * wordSize, Rtemp); | |
566 #ifdef _LP64 | |
567 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP | |
568 #endif | |
569 cmp(Rtemp, FP); | |
570 brx(Assembler::greaterUnsigned, false, Assembler::pn, Bad); | |
571 delayed()->nop(); | |
572 | |
573 // Saved SP must not be ridiculously below current SP. | |
574 size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K); | |
575 set(maxstack, Rtemp); | |
576 sub(SP, Rtemp, Rtemp); | |
577 #ifdef _LP64 | |
578 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp | |
579 #endif | |
580 cmp(Rsp, Rtemp); | |
581 brx(Assembler::lessUnsigned, false, Assembler::pn, Bad); | |
582 delayed()->nop(); | |
583 | |
584 br(Assembler::always, false, Assembler::pn, OK); | |
585 delayed()->nop(); | |
586 | |
587 bind(Bad); | |
588 stop("on return to interpreted call, restored SP is corrupted"); | |
589 | |
590 bind(OK); | |
591 } | |
592 | |
593 | |
594 void InterpreterMacroAssembler::verify_esp(Register Resp) { | |
595 // about to read or write Resp[0] | |
596 // make sure it is not in the monitors or the register save area | |
597 Label OK1, OK2; | |
598 | |
599 cmp(Resp, Lmonitors); | |
600 brx(Assembler::lessUnsigned, true, Assembler::pt, OK1); | |
601 delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp); | |
602 stop("too many pops: Lesp points into monitor area"); | |
603 bind(OK1); | |
604 #ifdef _LP64 | |
605 sub(Resp, STACK_BIAS, Resp); | |
606 #endif | |
607 cmp(Resp, SP); | |
608 brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2); | |
609 delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp); | |
610 stop("too many pushes: Lesp points into register window"); | |
611 bind(OK2); | |
612 } | |
613 #endif // ASSERT | |
614 | |
615 // Load compiled (i2c) or interpreter entry when calling from interpreted and | |
616 // do the call. Centralized so that all interpreter calls will do the same actions. | |
617 // If jvmti single stepping is on for a thread we must not call compiled code. | |
618 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) { | |
619 | |
620 // Assume we want to go compiled if available | |
621 | |
622 ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target); | |
623 | |
624 if (JvmtiExport::can_post_interpreter_events()) { | |
625 // JVMTI events, such as single-stepping, are implemented partly by avoiding running | |
626 // compiled code in threads for which the event is enabled. Check here for | |
627 // interp_only_mode if these events CAN be enabled. | |
628 verify_thread(); | |
629 Label skip_compiled_code; | |
630 | |
727 | 631 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
0 | 632 ld(interp_only, scratch); |
633 tst(scratch); | |
634 br(Assembler::notZero, true, Assembler::pn, skip_compiled_code); | |
635 delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target); | |
636 bind(skip_compiled_code); | |
637 } | |
638 | |
639 // the i2c_adapters need methodOop in G5_method (right? %%%) | |
640 // do the call | |
641 #ifdef ASSERT | |
642 { | |
643 Label ok; | |
644 br_notnull(target, false, Assembler::pt, ok); | |
645 delayed()->nop(); | |
646 stop("null entry point"); | |
647 bind(ok); | |
648 } | |
649 #endif // ASSERT | |
650 | |
651 // Adjust Rret first so Llast_SP can be same as Rret | |
652 add(Rret, -frame::pc_return_offset, O7); | |
653 add(Lesp, BytesPerWord, Gargs); // setup parameter pointer | |
654 // Record SP so we can remove any stack space allocated by adapter transition | |
655 jmp(target, 0); | |
656 delayed()->mov(SP, Llast_SP); | |
657 } | |
658 | |
659 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) { | |
660 assert_not_delayed(); | |
661 | |
662 Label not_taken; | |
663 if (ptr_compare) brx(cc, false, Assembler::pn, not_taken); | |
664 else br (cc, false, Assembler::pn, not_taken); | |
665 delayed()->nop(); | |
666 | |
667 TemplateTable::branch(false,false); | |
668 | |
669 bind(not_taken); | |
670 | |
671 profile_not_taken_branch(G3_scratch); | |
672 } | |
673 | |
674 | |
675 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp( | |
676 int bcp_offset, | |
677 Register Rtmp, | |
678 Register Rdst, | |
679 signedOrNot is_signed, | |
680 setCCOrNot should_set_CC ) { | |
681 assert(Rtmp != Rdst, "need separate temp register"); | |
682 assert_not_delayed(); | |
683 switch (is_signed) { | |
684 default: ShouldNotReachHere(); | |
685 | |
686 case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte | |
687 case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte | |
688 } | |
689 ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte | |
690 sll( Rdst, BitsPerByte, Rdst); | |
691 switch (should_set_CC ) { | |
692 default: ShouldNotReachHere(); | |
693 | |
694 case set_CC: orcc( Rdst, Rtmp, Rdst ); break; | |
695 case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break; | |
696 } | |
697 } | |
698 | |
699 | |
700 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp( | |
701 int bcp_offset, | |
702 Register Rtmp, | |
703 Register Rdst, | |
704 setCCOrNot should_set_CC ) { | |
705 assert(Rtmp != Rdst, "need separate temp register"); | |
706 assert_not_delayed(); | |
707 add( Lbcp, bcp_offset, Rtmp); | |
708 andcc( Rtmp, 3, G0); | |
709 Label aligned; | |
710 switch (should_set_CC ) { | |
711 default: ShouldNotReachHere(); | |
712 | |
713 case set_CC: break; | |
714 case dont_set_CC: break; | |
715 } | |
716 | |
717 br(Assembler::zero, true, Assembler::pn, aligned); | |
718 #ifdef _LP64 | |
719 delayed()->ldsw(Rtmp, 0, Rdst); | |
720 #else | |
721 delayed()->ld(Rtmp, 0, Rdst); | |
722 #endif | |
723 | |
724 ldub(Lbcp, bcp_offset + 3, Rdst); | |
725 ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst); | |
726 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst); | |
727 #ifdef _LP64 | |
728 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); | |
729 #else | |
730 // Unsigned load is faster than signed on some implementations | |
731 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); | |
732 #endif | |
733 or3(Rtmp, Rdst, Rdst ); | |
734 | |
735 bind(aligned); | |
736 if (should_set_CC == set_CC) tst(Rdst); | |
737 } | |
738 | |
739 | |
1503 | 740 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp, |
1565 | 741 int bcp_offset, size_t index_size) { |
1503 | 742 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); |
1565 | 743 if (index_size == sizeof(u2)) { |
1503 | 744 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); |
1565 | 745 } else if (index_size == sizeof(u4)) { |
1503 | 746 assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic"); |
747 get_4_byte_integer_at_bcp(bcp_offset, cache, tmp); | |
748 assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line"); | |
749 xor3(tmp, -1, tmp); // convert to plain index | |
1565 | 750 } else if (index_size == sizeof(u1)) { |
751 assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles"); | |
752 ldub(Lbcp, bcp_offset, tmp); | |
753 } else { | |
754 ShouldNotReachHere(); | |
1503 | 755 } |
756 } | |
757 | |
758 | |
759 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, | |
1565 | 760 int bcp_offset, size_t index_size) { |
0 | 761 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); |
762 assert_different_registers(cache, tmp); | |
763 assert_not_delayed(); | |
1565 | 764 get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size); |
1503 | 765 // convert from field index to ConstantPoolCacheEntry index and from |
766 // word index to byte offset | |
0 | 767 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); |
768 add(LcpoolCache, tmp, cache); | |
769 } | |
770 | |
771 | |
1503 | 772 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, |
1565 | 773 int bcp_offset, size_t index_size) { |
0 | 774 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); |
775 assert_different_registers(cache, tmp); | |
776 assert_not_delayed(); | |
1565 | 777 if (index_size == sizeof(u2)) { |
778 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); | |
779 } else { | |
780 ShouldNotReachHere(); // other sizes not supported here | |
781 } | |
0 | 782 // convert from field index to ConstantPoolCacheEntry index |
783 // and from word index to byte offset | |
784 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); | |
785 // skip past the header | |
786 add(tmp, in_bytes(constantPoolCacheOopDesc::base_offset()), tmp); | |
787 // construct pointer to cache entry | |
788 add(LcpoolCache, tmp, cache); | |
789 } | |
790 | |
791 | |
792 // Generate a subtype check: branch to ok_is_subtype if sub_klass is | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
793 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2. |
0 | 794 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, |
795 Register Rsuper_klass, | |
796 Register Rtmp1, | |
797 Register Rtmp2, | |
798 Register Rtmp3, | |
799 Label &ok_is_subtype ) { | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
800 Label not_subtype; |
0 | 801 |
802 // Profile the not-null value's klass. | |
803 profile_typecheck(Rsub_klass, Rtmp1); | |
804 | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
805 check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
806 Rtmp1, Rtmp2, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
807 &ok_is_subtype, ¬_subtype, NULL); |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
808 |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
809 check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
810 Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
614
diff
changeset
|
811 &ok_is_subtype, NULL); |
0 | 812 |
813 bind(not_subtype); | |
814 profile_typecheck_failed(Rtmp1); | |
815 } | |
816 | |
817 // Separate these two to allow for delay slot in middle | |
818 // These are used to do a test and full jump to exception-throwing code. | |
819 | |
820 // %%%%% Could possibly reoptimize this by testing to see if could use | |
821 // a single conditional branch (i.e. if span is small enough. | |
822 // If you go that route, than get rid of the split and give up | |
823 // on the delay-slot hack. | |
824 | |
825 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition, | |
826 Label& ok ) { | |
827 assert_not_delayed(); | |
828 br(ok_condition, true, pt, ok); | |
829 // DELAY SLOT | |
830 } | |
831 | |
832 void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition, | |
833 Label& ok ) { | |
834 assert_not_delayed(); | |
835 bp( ok_condition, true, Assembler::xcc, pt, ok); | |
836 // DELAY SLOT | |
837 } | |
838 | |
839 void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition, | |
840 Label& ok ) { | |
841 assert_not_delayed(); | |
842 brx(ok_condition, true, pt, ok); | |
843 // DELAY SLOT | |
844 } | |
845 | |
846 void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point, | |
847 Register Rscratch, | |
848 Label& ok ) { | |
849 assert(throw_entry_point != NULL, "entry point must be generated by now"); | |
727 | 850 AddressLiteral dest(throw_entry_point); |
851 jump_to(dest, Rscratch); | |
0 | 852 delayed()->nop(); |
853 bind(ok); | |
854 } | |
855 | |
856 | |
857 // And if you cannot use the delay slot, here is a shorthand: | |
858 | |
859 void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition, | |
860 address throw_entry_point, | |
861 Register Rscratch ) { | |
862 Label ok; | |
863 if (ok_condition != never) { | |
864 throw_if_not_1_icc( ok_condition, ok); | |
865 delayed()->nop(); | |
866 } | |
867 throw_if_not_2( throw_entry_point, Rscratch, ok); | |
868 } | |
869 void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition, | |
870 address throw_entry_point, | |
871 Register Rscratch ) { | |
872 Label ok; | |
873 if (ok_condition != never) { | |
874 throw_if_not_1_xcc( ok_condition, ok); | |
875 delayed()->nop(); | |
876 } | |
877 throw_if_not_2( throw_entry_point, Rscratch, ok); | |
878 } | |
879 void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition, | |
880 address throw_entry_point, | |
881 Register Rscratch ) { | |
882 Label ok; | |
883 if (ok_condition != never) { | |
884 throw_if_not_1_x( ok_condition, ok); | |
885 delayed()->nop(); | |
886 } | |
887 throw_if_not_2( throw_entry_point, Rscratch, ok); | |
888 } | |
889 | |
890 // Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res | |
891 // Note: res is still shy of address by array offset into object. | |
892 | |
893 void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) { | |
894 assert_not_delayed(); | |
895 | |
896 verify_oop(array); | |
897 #ifdef _LP64 | |
898 // sign extend since tos (index) can be a 32bit value | |
899 sra(index, G0, index); | |
900 #endif // _LP64 | |
901 | |
902 // check array | |
903 Label ptr_ok; | |
904 tst(array); | |
905 throw_if_not_1_x( notZero, ptr_ok ); | |
906 delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index | |
907 throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok); | |
908 | |
909 Label index_ok; | |
910 cmp(index, tmp); | |
911 throw_if_not_1_icc( lessUnsigned, index_ok ); | |
912 if (index_shift > 0) delayed()->sll(index, index_shift, index); | |
913 else delayed()->add(array, index, res); // addr - const offset in index | |
914 // convention: move aberrant index into G3_scratch for exception message | |
915 mov(index, G3_scratch); | |
916 throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok); | |
917 | |
918 // add offset if didn't do it in delay slot | |
919 if (index_shift > 0) add(array, index, res); // addr - const offset in index | |
920 } | |
921 | |
922 | |
923 void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) { | |
924 assert_not_delayed(); | |
925 | |
926 // pop array | |
927 pop_ptr(array); | |
928 | |
929 // check array | |
930 index_check_without_pop(array, index, index_shift, tmp, res); | |
931 } | |
932 | |
933 | |
934 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) { | |
935 ld_ptr(Lmethod, in_bytes(methodOopDesc::constants_offset()), Rdst); | |
936 } | |
937 | |
938 | |
939 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) { | |
940 get_constant_pool(Rdst); | |
941 ld_ptr(Rdst, constantPoolOopDesc::cache_offset_in_bytes(), Rdst); | |
942 } | |
943 | |
944 | |
945 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) { | |
946 get_constant_pool(Rcpool); | |
947 ld_ptr(Rcpool, constantPoolOopDesc::tags_offset_in_bytes(), Rtags); | |
948 } | |
949 | |
950 | |
951 // unlock if synchronized method | |
952 // | |
953 // Unlock the receiver if this is a synchronized method. | |
954 // Unlock any Java monitors from syncronized blocks. | |
955 // | |
956 // If there are locked Java monitors | |
957 // If throw_monitor_exception | |
958 // throws IllegalMonitorStateException | |
959 // Else if install_monitor_exception | |
960 // installs IllegalMonitorStateException | |
961 // Else | |
962 // no error processing | |
963 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state, | |
964 bool throw_monitor_exception, | |
965 bool install_monitor_exception) { | |
966 Label unlocked, unlock, no_unlock; | |
967 | |
968 // get the value of _do_not_unlock_if_synchronized into G1_scratch | |
727 | 969 const Address do_not_unlock_if_synchronized(G2_thread, |
970 JavaThread::do_not_unlock_if_synchronized_offset()); | |
0 | 971 ldbool(do_not_unlock_if_synchronized, G1_scratch); |
972 stbool(G0, do_not_unlock_if_synchronized); // reset the flag | |
973 | |
974 // check if synchronized method | |
727 | 975 const Address access_flags(Lmethod, methodOopDesc::access_flags_offset()); |
0 | 976 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); |
977 push(state); // save tos | |
727 | 978 ld(access_flags, G3_scratch); // Load access flags. |
0 | 979 btst(JVM_ACC_SYNCHRONIZED, G3_scratch); |
727 | 980 br(zero, false, pt, unlocked); |
0 | 981 delayed()->nop(); |
982 | |
983 // Don't unlock anything if the _do_not_unlock_if_synchronized flag | |
984 // is set. | |
985 tstbool(G1_scratch); | |
986 br(Assembler::notZero, false, pn, no_unlock); | |
987 delayed()->nop(); | |
988 | |
989 // BasicObjectLock will be first in list, since this is a synchronized method. However, need | |
990 // to check that the object has not been unlocked by an explicit monitorexit bytecode. | |
991 | |
992 //Intel: if (throw_monitor_exception) ... else ... | |
993 // Entry already unlocked, need to throw exception | |
994 //... | |
995 | |
996 // pass top-most monitor elem | |
997 add( top_most_monitor(), O1 ); | |
998 | |
999 ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch); | |
1000 br_notnull(G3_scratch, false, pt, unlock); | |
1001 delayed()->nop(); | |
1002 | |
1003 if (throw_monitor_exception) { | |
1004 // Entry already unlocked need to throw an exception | |
1005 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); | |
1006 should_not_reach_here(); | |
1007 } else { | |
1008 // Monitor already unlocked during a stack unroll. | |
1009 // If requested, install an illegal_monitor_state_exception. | |
1010 // Continue with stack unrolling. | |
1011 if (install_monitor_exception) { | |
1012 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); | |
1013 } | |
1014 ba(false, unlocked); | |
1015 delayed()->nop(); | |
1016 } | |
1017 | |
1018 bind(unlock); | |
1019 | |
1020 unlock_object(O1); | |
1021 | |
1022 bind(unlocked); | |
1023 | |
1024 // I0, I1: Might contain return value | |
1025 | |
1026 // Check that all monitors are unlocked | |
1027 { Label loop, exception, entry, restart; | |
1028 | |
1029 Register Rmptr = O0; | |
1030 Register Rtemp = O1; | |
1031 Register Rlimit = Lmonitors; | |
1032 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; | |
1033 assert( (delta & LongAlignmentMask) == 0, | |
1034 "sizeof BasicObjectLock must be even number of doublewords"); | |
1035 | |
1036 #ifdef ASSERT | |
1037 add(top_most_monitor(), Rmptr, delta); | |
1038 { Label L; | |
1039 // ensure that Rmptr starts out above (or at) Rlimit | |
1040 cmp(Rmptr, Rlimit); | |
1041 brx(Assembler::greaterEqualUnsigned, false, pn, L); | |
1042 delayed()->nop(); | |
1043 stop("monitor stack has negative size"); | |
1044 bind(L); | |
1045 } | |
1046 #endif | |
1047 bind(restart); | |
1048 ba(false, entry); | |
1049 delayed()-> | |
1050 add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry | |
1051 | |
1052 // Entry is still locked, need to throw exception | |
1053 bind(exception); | |
1054 if (throw_monitor_exception) { | |
1055 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); | |
1056 should_not_reach_here(); | |
1057 } else { | |
1058 // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception. | |
1059 // Unlock does not block, so don't have to worry about the frame | |
1060 unlock_object(Rmptr); | |
1061 if (install_monitor_exception) { | |
1062 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); | |
1063 } | |
1064 ba(false, restart); | |
1065 delayed()->nop(); | |
1066 } | |
1067 | |
1068 bind(loop); | |
1069 cmp(Rtemp, G0); // check if current entry is used | |
1070 brx(Assembler::notEqual, false, pn, exception); | |
1071 delayed()-> | |
1072 dec(Rmptr, delta); // otherwise advance to next entry | |
1073 #ifdef ASSERT | |
1074 { Label L; | |
1075 // ensure that Rmptr has not somehow stepped below Rlimit | |
1076 cmp(Rmptr, Rlimit); | |
1077 brx(Assembler::greaterEqualUnsigned, false, pn, L); | |
1078 delayed()->nop(); | |
1079 stop("ran off the end of the monitor stack"); | |
1080 bind(L); | |
1081 } | |
1082 #endif | |
1083 bind(entry); | |
1084 cmp(Rmptr, Rlimit); // check if bottom reached | |
1085 brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry | |
1086 delayed()-> | |
1087 ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp); | |
1088 } | |
1089 | |
1090 bind(no_unlock); | |
1091 pop(state); | |
1092 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
1093 } | |
1094 | |
1095 | |
1096 // remove activation | |
1097 // | |
1098 // Unlock the receiver if this is a synchronized method. | |
1099 // Unlock any Java monitors from syncronized blocks. | |
1100 // Remove the activation from the stack. | |
1101 // | |
1102 // If there are locked Java monitors | |
1103 // If throw_monitor_exception | |
1104 // throws IllegalMonitorStateException | |
1105 // Else if install_monitor_exception | |
1106 // installs IllegalMonitorStateException | |
1107 // Else | |
1108 // no error processing | |
1109 void InterpreterMacroAssembler::remove_activation(TosState state, | |
1110 bool throw_monitor_exception, | |
1111 bool install_monitor_exception) { | |
1112 | |
1113 unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception); | |
1114 | |
1115 // save result (push state before jvmti call and pop it afterwards) and notify jvmti | |
1116 notify_method_exit(false, state, NotifyJVMTI); | |
1117 | |
1118 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); | |
1119 verify_oop(Lmethod); | |
1120 verify_thread(); | |
1121 | |
1122 // return tos | |
1123 assert(Otos_l1 == Otos_i, "adjust code below"); | |
1124 switch (state) { | |
1125 #ifdef _LP64 | |
1126 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0 | |
1127 #else | |
1128 case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1 | |
1129 #endif | |
1130 case btos: // fall through | |
1131 case ctos: | |
1132 case stos: // fall through | |
1133 case atos: // fall through | |
1134 case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I0 | |
1135 case ftos: // fall through | |
1136 case dtos: // fall through | |
1137 case vtos: /* nothing to do */ break; | |
1138 default : ShouldNotReachHere(); | |
1139 } | |
1140 | |
1141 #if defined(COMPILER2) && !defined(_LP64) | |
1142 if (state == ltos) { | |
1143 // C2 expects long results in G1 we can't tell if we're returning to interpreted | |
1144 // or compiled so just be safe use G1 and O0/O1 | |
1145 | |
1146 // Shift bits into high (msb) of G1 | |
1147 sllx(Otos_l1->after_save(), 32, G1); | |
1148 // Zero extend low bits | |
1149 srl (Otos_l2->after_save(), 0, Otos_l2->after_save()); | |
1150 or3 (Otos_l2->after_save(), G1, G1); | |
1151 } | |
1152 #endif /* COMPILER2 */ | |
1153 | |
1154 } | |
1155 #endif /* CC_INTERP */ | |
1156 | |
1157 | |
1158 // Lock object | |
1159 // | |
1160 // Argument - lock_reg points to the BasicObjectLock to be used for locking, | |
1161 // it must be initialized with the object to lock | |
1162 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) { | |
1163 if (UseHeavyMonitors) { | |
1164 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); | |
1165 } | |
1166 else { | |
1167 Register obj_reg = Object; | |
1168 Register mark_reg = G4_scratch; | |
1169 Register temp_reg = G1_scratch; | |
727 | 1170 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes()); |
1171 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); | |
0 | 1172 Label done; |
1173 | |
1174 Label slow_case; | |
1175 | |
1176 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg); | |
1177 | |
1178 // load markOop from object into mark_reg | |
1179 ld_ptr(mark_addr, mark_reg); | |
1180 | |
1181 if (UseBiasedLocking) { | |
1182 biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case); | |
1183 } | |
1184 | |
1185 // get the address of basicLock on stack that will be stored in the object | |
1186 // we need a temporary register here as we do not want to clobber lock_reg | |
1187 // (cas clobbers the destination register) | |
1188 mov(lock_reg, temp_reg); | |
1189 // set mark reg to be (markOop of object | UNLOCK_VALUE) | |
1190 or3(mark_reg, markOopDesc::unlocked_value, mark_reg); | |
1191 // initialize the box (Must happen before we update the object mark!) | |
1192 st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes()); | |
1193 // compare and exchange object_addr, markOop | 1, stack address of basicLock | |
1194 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); | |
1195 casx_under_lock(mark_addr.base(), mark_reg, temp_reg, | |
1196 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); | |
1197 | |
1198 // if the compare and exchange succeeded we are done (we saw an unlocked object) | |
1199 cmp(mark_reg, temp_reg); | |
1200 brx(Assembler::equal, true, Assembler::pt, done); | |
1201 delayed()->nop(); | |
1202 | |
1203 // We did not see an unlocked object so try the fast recursive case | |
1204 | |
1205 // Check if owner is self by comparing the value in the markOop of object | |
1206 // with the stack pointer | |
1207 sub(temp_reg, SP, temp_reg); | |
1208 #ifdef _LP64 | |
1209 sub(temp_reg, STACK_BIAS, temp_reg); | |
1210 #endif | |
1211 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); | |
1212 | |
1213 // Composite "andcc" test: | |
1214 // (a) %sp -vs- markword proximity check, and, | |
1215 // (b) verify mark word LSBs == 0 (Stack-locked). | |
1216 // | |
1217 // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size()) | |
1218 // Note that the page size used for %sp proximity testing is arbitrary and is | |
1219 // unrelated to the actual MMU page size. We use a 'logical' page size of | |
1220 // 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate | |
1221 // field of the andcc instruction. | |
1222 andcc (temp_reg, 0xFFFFF003, G0) ; | |
1223 | |
1224 // if condition is true we are done and hence we can store 0 in the displaced | |
1225 // header indicating it is a recursive lock and be done | |
1226 brx(Assembler::zero, true, Assembler::pt, done); | |
1227 delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes()); | |
1228 | |
1229 // none of the above fast optimizations worked so we have to get into the | |
1230 // slow case of monitor enter | |
1231 bind(slow_case); | |
1232 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); | |
1233 | |
1234 bind(done); | |
1235 } | |
1236 } | |
1237 | |
1238 // Unlocks an object. Used in monitorexit bytecode and remove_activation. | |
1239 // | |
1240 // Argument - lock_reg points to the BasicObjectLock for lock | |
1241 // Throw IllegalMonitorException if object is not locked by current thread | |
1242 void InterpreterMacroAssembler::unlock_object(Register lock_reg) { | |
1243 if (UseHeavyMonitors) { | |
1244 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); | |
1245 } else { | |
1246 Register obj_reg = G3_scratch; | |
1247 Register mark_reg = G4_scratch; | |
1248 Register displaced_header_reg = G1_scratch; | |
727 | 1249 Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes()); |
1250 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); | |
0 | 1251 Label done; |
1252 | |
1253 if (UseBiasedLocking) { | |
1254 // load the object out of the BasicObjectLock | |
1255 ld_ptr(lockobj_addr, obj_reg); | |
1256 biased_locking_exit(mark_addr, mark_reg, done, true); | |
1257 st_ptr(G0, lockobj_addr); // free entry | |
1258 } | |
1259 | |
1260 // Test first if we are in the fast recursive case | |
727 | 1261 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes()); |
1262 ld_ptr(lock_addr, displaced_header_reg); | |
0 | 1263 br_null(displaced_header_reg, true, Assembler::pn, done); |
1264 delayed()->st_ptr(G0, lockobj_addr); // free entry | |
1265 | |
1266 // See if it is still a light weight lock, if so we just unlock | |
1267 // the object and we are done | |
1268 | |
1269 if (!UseBiasedLocking) { | |
1270 // load the object out of the BasicObjectLock | |
1271 ld_ptr(lockobj_addr, obj_reg); | |
1272 } | |
1273 | |
1274 // we have the displaced header in displaced_header_reg | |
1275 // we expect to see the stack address of the basicLock in case the | |
1276 // lock is still a light weight lock (lock_reg) | |
1277 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); | |
1278 casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg, | |
1279 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); | |
1280 cmp(lock_reg, displaced_header_reg); | |
1281 brx(Assembler::equal, true, Assembler::pn, done); | |
1282 delayed()->st_ptr(G0, lockobj_addr); // free entry | |
1283 | |
1284 // The lock has been converted into a heavy lock and hence | |
1285 // we need to get into the slow case | |
1286 | |
1287 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); | |
1288 | |
1289 bind(done); | |
1290 } | |
1291 } | |
1292 | |
1293 #ifndef CC_INTERP | |
1294 | |
1295 // Get the method data pointer from the methodOop and set the | |
1296 // specified register to its value. | |
1297 | |
1298 void InterpreterMacroAssembler::set_method_data_pointer_offset(Register Roff) { | |
1299 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1300 Label get_continue; | |
1301 | |
1302 ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr); | |
1303 test_method_data_pointer(get_continue); | |
1304 add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr); | |
1305 if (Roff != noreg) | |
1306 // Roff contains a method data index ("mdi"). It defaults to zero. | |
1307 add(ImethodDataPtr, Roff, ImethodDataPtr); | |
1308 bind(get_continue); | |
1309 } | |
1310 | |
1311 // Set the method data pointer for the current bcp. | |
1312 | |
1313 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { | |
1314 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1315 Label zero_continue; | |
1316 | |
1317 // Test MDO to avoid the call if it is NULL. | |
727 | 1318 ld_ptr(Lmethod, methodOopDesc::method_data_offset(), ImethodDataPtr); |
0 | 1319 test_method_data_pointer(zero_continue); |
1320 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp); | |
1321 set_method_data_pointer_offset(O0); | |
1322 bind(zero_continue); | |
1323 } | |
1324 | |
1325 // Test ImethodDataPtr. If it is null, continue at the specified label | |
1326 | |
1327 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) { | |
1328 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1329 #ifdef _LP64 | |
1330 bpr(Assembler::rc_z, false, Assembler::pn, ImethodDataPtr, zero_continue); | |
1331 #else | |
1332 tst(ImethodDataPtr); | |
1333 br(Assembler::zero, false, Assembler::pn, zero_continue); | |
1334 #endif | |
1335 delayed()->nop(); | |
1336 } | |
1337 | |
1338 void InterpreterMacroAssembler::verify_method_data_pointer() { | |
1339 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1340 #ifdef ASSERT | |
1341 Label verify_continue; | |
1342 test_method_data_pointer(verify_continue); | |
1343 | |
1344 // If the mdp is valid, it will point to a DataLayout header which is | |
1345 // consistent with the bcp. The converse is highly probable also. | |
1346 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch); | |
727 | 1347 ld_ptr(Lmethod, methodOopDesc::const_offset(), O5); |
0 | 1348 add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), G3_scratch); |
1349 add(G3_scratch, O5, G3_scratch); | |
1350 cmp(Lbcp, G3_scratch); | |
1351 brx(Assembler::equal, false, Assembler::pt, verify_continue); | |
1352 | |
1353 Register temp_reg = O5; | |
1354 delayed()->mov(ImethodDataPtr, temp_reg); | |
1355 // %%% should use call_VM_leaf here? | |
1356 //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr); | |
1357 save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1); | |
727 | 1358 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS); |
0 | 1359 stf(FloatRegisterImpl::D, Ftos_d, d_save); |
1360 mov(temp_reg->after_save(), O2); | |
1361 save_thread(L7_thread_cache); | |
1362 call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none); | |
1363 delayed()->nop(); | |
1364 restore_thread(L7_thread_cache); | |
1365 ldf(FloatRegisterImpl::D, d_save, Ftos_d); | |
1366 restore(); | |
1367 bind(verify_continue); | |
1368 #endif // ASSERT | |
1369 } | |
1370 | |
1371 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count, | |
1372 Register cur_bcp, | |
1373 Register Rtmp, | |
1374 Label &profile_continue) { | |
1375 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1376 // Control will flow to "profile_continue" if the counter is less than the | |
1377 // limit or if we call profile_method() | |
1378 | |
1379 Label done; | |
1380 | |
1381 // if no method data exists, and the counter is high enough, make one | |
1382 #ifdef _LP64 | |
1383 bpr(Assembler::rc_nz, false, Assembler::pn, ImethodDataPtr, done); | |
1384 #else | |
1385 tst(ImethodDataPtr); | |
1386 br(Assembler::notZero, false, Assembler::pn, done); | |
1387 #endif | |
1388 | |
1389 // Test to see if we should create a method data oop | |
727 | 1390 AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit); |
0 | 1391 #ifdef _LP64 |
1392 delayed()->nop(); | |
727 | 1393 sethi(profile_limit, Rtmp); |
0 | 1394 #else |
727 | 1395 delayed()->sethi(profile_limit, Rtmp); |
0 | 1396 #endif |
727 | 1397 ld(Rtmp, profile_limit.low10(), Rtmp); |
0 | 1398 cmp(invocation_count, Rtmp); |
1399 br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue); | |
1400 delayed()->nop(); | |
1401 | |
1402 // Build it now. | |
1403 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), cur_bcp); | |
1404 set_method_data_pointer_offset(O0); | |
1405 ba(false, profile_continue); | |
1406 delayed()->nop(); | |
1407 bind(done); | |
1408 } | |
1409 | |
1410 // Store a value at some constant offset from the method data pointer. | |
1411 | |
1412 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) { | |
1413 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1414 st_ptr(value, ImethodDataPtr, constant); | |
1415 } | |
1416 | |
1417 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter, | |
1418 Register bumped_count, | |
1419 bool decrement) { | |
1420 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1421 | |
1422 // Load the counter. | |
1423 ld_ptr(counter, bumped_count); | |
1424 | |
1425 if (decrement) { | |
1426 // Decrement the register. Set condition codes. | |
1427 subcc(bumped_count, DataLayout::counter_increment, bumped_count); | |
1428 | |
1429 // If the decrement causes the counter to overflow, stay negative | |
1430 Label L; | |
1431 brx(Assembler::negative, true, Assembler::pn, L); | |
1432 | |
1433 // Store the decremented counter, if it is still negative. | |
1434 delayed()->st_ptr(bumped_count, counter); | |
1435 bind(L); | |
1436 } else { | |
1437 // Increment the register. Set carry flag. | |
1438 addcc(bumped_count, DataLayout::counter_increment, bumped_count); | |
1439 | |
1440 // If the increment causes the counter to overflow, pull back by 1. | |
1441 assert(DataLayout::counter_increment == 1, "subc works"); | |
1442 subc(bumped_count, G0, bumped_count); | |
1443 | |
1444 // Store the incremented counter. | |
1445 st_ptr(bumped_count, counter); | |
1446 } | |
1447 } | |
1448 | |
1449 // Increment the value at some constant offset from the method data pointer. | |
1450 | |
1451 void InterpreterMacroAssembler::increment_mdp_data_at(int constant, | |
1452 Register bumped_count, | |
1453 bool decrement) { | |
1454 // Locate the counter at a fixed offset from the mdp: | |
727 | 1455 Address counter(ImethodDataPtr, constant); |
0 | 1456 increment_mdp_data_at(counter, bumped_count, decrement); |
1457 } | |
1458 | |
1459 // Increment the value at some non-fixed (reg + constant) offset from | |
1460 // the method data pointer. | |
1461 | |
1462 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg, | |
1463 int constant, | |
1464 Register bumped_count, | |
1465 Register scratch2, | |
1466 bool decrement) { | |
1467 // Add the constant to reg to get the offset. | |
1468 add(ImethodDataPtr, reg, scratch2); | |
727 | 1469 Address counter(scratch2, constant); |
0 | 1470 increment_mdp_data_at(counter, bumped_count, decrement); |
1471 } | |
1472 | |
1473 // Set a flag value at the current method data pointer position. | |
1474 // Updates a single byte of the header, to avoid races with other header bits. | |
1475 | |
1476 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant, | |
1477 Register scratch) { | |
1478 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1479 // Load the data header | |
1480 ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch); | |
1481 | |
1482 // Set the flag | |
1483 or3(scratch, flag_constant, scratch); | |
1484 | |
1485 // Store the modified header. | |
1486 stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset())); | |
1487 } | |
1488 | |
1489 // Test the location at some offset from the method data pointer. | |
1490 // If it is not equal to value, branch to the not_equal_continue Label. | |
1491 // Set condition codes to match the nullness of the loaded value. | |
1492 | |
1493 void InterpreterMacroAssembler::test_mdp_data_at(int offset, | |
1494 Register value, | |
1495 Label& not_equal_continue, | |
1496 Register scratch) { | |
1497 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1498 ld_ptr(ImethodDataPtr, offset, scratch); | |
1499 cmp(value, scratch); | |
1500 brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue); | |
1501 delayed()->tst(scratch); | |
1502 } | |
1503 | |
1504 // Update the method data pointer by the displacement located at some fixed | |
1505 // offset from the method data pointer. | |
1506 | |
1507 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp, | |
1508 Register scratch) { | |
1509 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1510 ld_ptr(ImethodDataPtr, offset_of_disp, scratch); | |
1511 add(ImethodDataPtr, scratch, ImethodDataPtr); | |
1512 } | |
1513 | |
1514 // Update the method data pointer by the displacement located at the | |
1515 // offset (reg + offset_of_disp). | |
1516 | |
1517 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg, | |
1518 int offset_of_disp, | |
1519 Register scratch) { | |
1520 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1521 add(reg, offset_of_disp, scratch); | |
1522 ld_ptr(ImethodDataPtr, scratch, scratch); | |
1523 add(ImethodDataPtr, scratch, ImethodDataPtr); | |
1524 } | |
1525 | |
1526 // Update the method data pointer by a simple constant displacement. | |
1527 | |
1528 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) { | |
1529 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1530 add(ImethodDataPtr, constant, ImethodDataPtr); | |
1531 } | |
1532 | |
1533 // Update the method data pointer for a _ret bytecode whose target | |
1534 // was not among our cached targets. | |
1535 | |
1536 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state, | |
1537 Register return_bci) { | |
1538 assert(ProfileInterpreter, "must be profiling interpreter"); | |
1539 push(state); | |
1540 st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile | |
1541 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); | |
1542 ld_ptr(l_tmp, return_bci); | |
1543 pop(state); | |
1544 } | |
1545 | |
1546 // Count a taken branch in the bytecodes. | |
1547 | |
1548 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) { | |
1549 if (ProfileInterpreter) { | |
1550 Label profile_continue; | |
1551 | |
1552 // If no method data exists, go to profile_continue. | |
1553 test_method_data_pointer(profile_continue); | |
1554 | |
1555 // We are taking a branch. Increment the taken count. | |
1556 increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count); | |
1557 | |
1558 // The method data pointer needs to be updated to reflect the new target. | |
1559 update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch); | |
1560 bind (profile_continue); | |
1561 } | |
1562 } | |
1563 | |
1564 | |
1565 // Count a not-taken branch in the bytecodes. | |
1566 | |
1567 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) { | |
1568 if (ProfileInterpreter) { | |
1569 Label profile_continue; | |
1570 | |
1571 // If no method data exists, go to profile_continue. | |
1572 test_method_data_pointer(profile_continue); | |
1573 | |
1574 // We are taking a branch. Increment the not taken count. | |
1575 increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch); | |
1576 | |
1577 // The method data pointer needs to be updated to correspond to the | |
1578 // next bytecode. | |
1579 update_mdp_by_constant(in_bytes(BranchData::branch_data_size())); | |
1580 bind (profile_continue); | |
1581 } | |
1582 } | |
1583 | |
1584 | |
1585 // Count a non-virtual call in the bytecodes. | |
1586 | |
1587 void InterpreterMacroAssembler::profile_call(Register scratch) { | |
1588 if (ProfileInterpreter) { | |
1589 Label profile_continue; | |
1590 | |
1591 // If no method data exists, go to profile_continue. | |
1592 test_method_data_pointer(profile_continue); | |
1593 | |
1594 // We are making a call. Increment the count. | |
1595 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); | |
1596 | |
1597 // The method data pointer needs to be updated to reflect the new target. | |
1598 update_mdp_by_constant(in_bytes(CounterData::counter_data_size())); | |
1599 bind (profile_continue); | |
1600 } | |
1601 } | |
1602 | |
1603 | |
1604 // Count a final call in the bytecodes. | |
1605 | |
1606 void InterpreterMacroAssembler::profile_final_call(Register scratch) { | |
1607 if (ProfileInterpreter) { | |
1608 Label profile_continue; | |
1609 | |
1610 // If no method data exists, go to profile_continue. | |
1611 test_method_data_pointer(profile_continue); | |
1612 | |
1613 // We are making a call. Increment the count. | |
1614 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); | |
1615 | |
1616 // The method data pointer needs to be updated to reflect the new target. | |
1617 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); | |
1618 bind (profile_continue); | |
1619 } | |
1620 } | |
1621 | |
1622 | |
1623 // Count a virtual call in the bytecodes. | |
1624 | |
1625 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, | |
1503 | 1626 Register scratch, |
1627 bool receiver_can_be_null) { | |
0 | 1628 if (ProfileInterpreter) { |
1629 Label profile_continue; | |
1630 | |
1631 // If no method data exists, go to profile_continue. | |
1632 test_method_data_pointer(profile_continue); | |
1633 | |
1503 | 1634 |
1635 Label skip_receiver_profile; | |
1636 if (receiver_can_be_null) { | |
1637 Label not_null; | |
1638 tst(receiver); | |
1639 brx(Assembler::notZero, false, Assembler::pt, not_null); | |
1640 delayed()->nop(); | |
1641 // We are making a call. Increment the count for null receiver. | |
1642 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); | |
1643 ba(false, skip_receiver_profile); | |
1644 delayed()->nop(); | |
1645 bind(not_null); | |
1646 } | |
1647 | |
0 | 1648 // Record the receiver type. |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1649 record_klass_in_profile(receiver, scratch, true); |
1503 | 1650 bind(skip_receiver_profile); |
0 | 1651 |
1652 // The method data pointer needs to be updated to reflect the new target. | |
1653 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); | |
1654 bind (profile_continue); | |
1655 } | |
1656 } | |
1657 | |
1658 void InterpreterMacroAssembler::record_klass_in_profile_helper( | |
1659 Register receiver, Register scratch, | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1660 int start_row, Label& done, bool is_virtual_call) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1661 if (TypeProfileWidth == 0) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1662 if (is_virtual_call) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1663 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1664 } |
967
6918603297f7
6858208: jvm crash when specifying TypeProfileWidth=0 on jdk 6.0
poonam
parents:
727
diff
changeset
|
1665 return; |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1666 } |
967
6918603297f7
6858208: jvm crash when specifying TypeProfileWidth=0 on jdk 6.0
poonam
parents:
727
diff
changeset
|
1667 |
0 | 1668 int last_row = VirtualCallData::row_limit() - 1; |
1669 assert(start_row <= last_row, "must be work left to do"); | |
1670 // Test this row for both the receiver and for null. | |
1671 // Take any of three different outcomes: | |
1672 // 1. found receiver => increment count and goto done | |
1673 // 2. found null => keep looking for case 1, maybe allocate this cell | |
1674 // 3. found something else => keep looking for cases 1 and 2 | |
1675 // Case 3 is handled by a recursive call. | |
1676 for (int row = start_row; row <= last_row; row++) { | |
1677 Label next_test; | |
1678 bool test_for_null_also = (row == start_row); | |
1679 | |
1680 // See if the receiver is receiver[n]. | |
1681 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row)); | |
1682 test_mdp_data_at(recvr_offset, receiver, next_test, scratch); | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1683 // delayed()->tst(scratch); |
0 | 1684 |
1685 // The receiver is receiver[n]. Increment count[n]. | |
1686 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row)); | |
1687 increment_mdp_data_at(count_offset, scratch); | |
1688 ba(false, done); | |
1689 delayed()->nop(); | |
1690 bind(next_test); | |
1691 | |
1692 if (test_for_null_also) { | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1693 Label found_null; |
0 | 1694 // Failed the equality check on receiver[n]... Test for null. |
1695 if (start_row == last_row) { | |
1696 // The only thing left to do is handle the null case. | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1697 if (is_virtual_call) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1698 brx(Assembler::zero, false, Assembler::pn, found_null); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1699 delayed()->nop(); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1700 // Receiver did not match any saved receiver and there is no empty row for it. |
1251
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1206
diff
changeset
|
1701 // Increment total counter to indicate polymorphic case. |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1702 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1703 ba(false, done); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1704 delayed()->nop(); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1705 bind(found_null); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1706 } else { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1707 brx(Assembler::notZero, false, Assembler::pt, done); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1708 delayed()->nop(); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1709 } |
0 | 1710 break; |
1711 } | |
1712 // Since null is rare, make it be the branch-taken case. | |
1713 brx(Assembler::zero, false, Assembler::pn, found_null); | |
1714 delayed()->nop(); | |
1715 | |
1716 // Put all the "Case 3" tests here. | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1717 record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call); |
0 | 1718 |
1719 // Found a null. Keep searching for a matching receiver, | |
1720 // but remember that this is an empty (unused) slot. | |
1721 bind(found_null); | |
1722 } | |
1723 } | |
1724 | |
1725 // In the fall-through case, we found no matching receiver, but we | |
1726 // observed the receiver[start_row] is NULL. | |
1727 | |
1728 // Fill in the receiver field and increment the count. | |
1729 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); | |
1730 set_mdp_data_at(recvr_offset, receiver); | |
1731 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); | |
1732 mov(DataLayout::counter_increment, scratch); | |
1733 set_mdp_data_at(count_offset, scratch); | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1734 if (start_row > 0) { |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1735 ba(false, done); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1736 delayed()->nop(); |
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1737 } |
0 | 1738 } |
1739 | |
1740 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1741 Register scratch, bool is_virtual_call) { |
0 | 1742 assert(ProfileInterpreter, "must be profiling"); |
1743 Label done; | |
1744 | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1745 record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call); |
0 | 1746 |
1747 bind (done); | |
1748 } | |
1749 | |
1750 | |
1751 // Count a ret in the bytecodes. | |
1752 | |
1753 void InterpreterMacroAssembler::profile_ret(TosState state, | |
1754 Register return_bci, | |
1755 Register scratch) { | |
1756 if (ProfileInterpreter) { | |
1757 Label profile_continue; | |
1758 uint row; | |
1759 | |
1760 // If no method data exists, go to profile_continue. | |
1761 test_method_data_pointer(profile_continue); | |
1762 | |
1763 // Update the total ret count. | |
1764 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); | |
1765 | |
1766 for (row = 0; row < RetData::row_limit(); row++) { | |
1767 Label next_test; | |
1768 | |
1769 // See if return_bci is equal to bci[n]: | |
1770 test_mdp_data_at(in_bytes(RetData::bci_offset(row)), | |
1771 return_bci, next_test, scratch); | |
1772 | |
1773 // return_bci is equal to bci[n]. Increment the count. | |
1774 increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch); | |
1775 | |
1776 // The method data pointer needs to be updated to reflect the new target. | |
1777 update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch); | |
1778 ba(false, profile_continue); | |
1779 delayed()->nop(); | |
1780 bind(next_test); | |
1781 } | |
1782 | |
1783 update_mdp_for_ret(state, return_bci); | |
1784 | |
1785 bind (profile_continue); | |
1786 } | |
1787 } | |
1788 | |
1789 // Profile an unexpected null in the bytecodes. | |
1790 void InterpreterMacroAssembler::profile_null_seen(Register scratch) { | |
1791 if (ProfileInterpreter) { | |
1792 Label profile_continue; | |
1793 | |
1794 // If no method data exists, go to profile_continue. | |
1795 test_method_data_pointer(profile_continue); | |
1796 | |
1797 set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch); | |
1798 | |
1799 // The method data pointer needs to be updated. | |
1800 int mdp_delta = in_bytes(BitData::bit_data_size()); | |
1801 if (TypeProfileCasts) { | |
1802 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); | |
1803 } | |
1804 update_mdp_by_constant(mdp_delta); | |
1805 | |
1806 bind (profile_continue); | |
1807 } | |
1808 } | |
1809 | |
1810 void InterpreterMacroAssembler::profile_typecheck(Register klass, | |
1811 Register scratch) { | |
1812 if (ProfileInterpreter) { | |
1813 Label profile_continue; | |
1814 | |
1815 // If no method data exists, go to profile_continue. | |
1816 test_method_data_pointer(profile_continue); | |
1817 | |
1818 int mdp_delta = in_bytes(BitData::bit_data_size()); | |
1819 if (TypeProfileCasts) { | |
1820 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); | |
1821 | |
1822 // Record the object type. | |
1206
87684f1a88b5
6614597: Performance variability in jvm2008 xml.validation
kvn
parents:
967
diff
changeset
|
1823 record_klass_in_profile(klass, scratch, false); |
0 | 1824 } |
1825 | |
1826 // The method data pointer needs to be updated. | |
1827 update_mdp_by_constant(mdp_delta); | |
1828 | |
1829 bind (profile_continue); | |
1830 } | |
1831 } | |
1832 | |
1833 void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) { | |
1834 if (ProfileInterpreter && TypeProfileCasts) { | |
1835 Label profile_continue; | |
1836 | |
1837 // If no method data exists, go to profile_continue. | |
1838 test_method_data_pointer(profile_continue); | |
1839 | |
1840 int count_offset = in_bytes(CounterData::count_offset()); | |
1841 // Back up the address, since we have already bumped the mdp. | |
1842 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); | |
1843 | |
1844 // *Decrement* the counter. We expect to see zero or small negatives. | |
1845 increment_mdp_data_at(count_offset, scratch, true); | |
1846 | |
1847 bind (profile_continue); | |
1848 } | |
1849 } | |
1850 | |
1851 // Count the default case of a switch construct. | |
1852 | |
1853 void InterpreterMacroAssembler::profile_switch_default(Register scratch) { | |
1854 if (ProfileInterpreter) { | |
1855 Label profile_continue; | |
1856 | |
1857 // If no method data exists, go to profile_continue. | |
1858 test_method_data_pointer(profile_continue); | |
1859 | |
1860 // Update the default case count | |
1861 increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()), | |
1862 scratch); | |
1863 | |
1864 // The method data pointer needs to be updated. | |
1865 update_mdp_by_offset( | |
1866 in_bytes(MultiBranchData::default_displacement_offset()), | |
1867 scratch); | |
1868 | |
1869 bind (profile_continue); | |
1870 } | |
1871 } | |
1872 | |
1873 // Count the index'th case of a switch construct. | |
1874 | |
1875 void InterpreterMacroAssembler::profile_switch_case(Register index, | |
1876 Register scratch, | |
1877 Register scratch2, | |
1878 Register scratch3) { | |
1879 if (ProfileInterpreter) { | |
1880 Label profile_continue; | |
1881 | |
1882 // If no method data exists, go to profile_continue. | |
1883 test_method_data_pointer(profile_continue); | |
1884 | |
1885 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes() | |
1886 set(in_bytes(MultiBranchData::per_case_size()), scratch); | |
1887 smul(index, scratch, scratch); | |
1888 add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch); | |
1889 | |
1890 // Update the case count | |
1891 increment_mdp_data_at(scratch, | |
1892 in_bytes(MultiBranchData::relative_count_offset()), | |
1893 scratch2, | |
1894 scratch3); | |
1895 | |
1896 // The method data pointer needs to be updated. | |
1897 update_mdp_by_offset(scratch, | |
1898 in_bytes(MultiBranchData::relative_displacement_offset()), | |
1899 scratch2); | |
1900 | |
1901 bind (profile_continue); | |
1902 } | |
1903 } | |
1904 | |
1905 // add a InterpMonitorElem to stack (see frame_sparc.hpp) | |
1906 | |
1907 void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty, | |
1908 Register Rtemp, | |
1909 Register Rtemp2 ) { | |
1910 | |
1911 Register Rlimit = Lmonitors; | |
1912 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; | |
1913 assert( (delta & LongAlignmentMask) == 0, | |
1914 "sizeof BasicObjectLock must be even number of doublewords"); | |
1915 | |
1916 sub( SP, delta, SP); | |
1917 sub( Lesp, delta, Lesp); | |
1918 sub( Lmonitors, delta, Lmonitors); | |
1919 | |
1920 if (!stack_is_empty) { | |
1921 | |
1922 // must copy stack contents down | |
1923 | |
1924 Label start_copying, next; | |
1925 | |
1926 // untested("monitor stack expansion"); | |
1927 compute_stack_base(Rtemp); | |
1928 ba( false, start_copying ); | |
1929 delayed()->cmp( Rtemp, Rlimit); // done? duplicated below | |
1930 | |
1931 // note: must copy from low memory upwards | |
1932 // On entry to loop, | |
1933 // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS) | |
1934 // Loop mutates Rtemp | |
1935 | |
1936 bind( next); | |
1937 | |
1938 st_ptr(Rtemp2, Rtemp, 0); | |
1939 inc(Rtemp, wordSize); | |
1940 cmp(Rtemp, Rlimit); // are we done? (duplicated above) | |
1941 | |
1942 bind( start_copying ); | |
1943 | |
1944 brx( notEqual, true, pn, next ); | |
1945 delayed()->ld_ptr( Rtemp, delta, Rtemp2 ); | |
1946 | |
1947 // done copying stack | |
1948 } | |
1949 } | |
1950 | |
1951 // Locals | |
1952 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) { | |
1953 assert_not_delayed(); | |
1506 | 1954 sll(index, Interpreter::logStackElementSize, index); |
0 | 1955 sub(Llocals, index, index); |
1506 | 1956 ld_ptr(index, 0, dst); |
0 | 1957 // Note: index must hold the effective address--the iinc template uses it |
1958 } | |
1959 | |
1960 // Just like access_local_ptr but the tag is a returnAddress | |
1961 void InterpreterMacroAssembler::access_local_returnAddress(Register index, | |
1962 Register dst ) { | |
1963 assert_not_delayed(); | |
1506 | 1964 sll(index, Interpreter::logStackElementSize, index); |
0 | 1965 sub(Llocals, index, index); |
1506 | 1966 ld_ptr(index, 0, dst); |
0 | 1967 } |
1968 | |
1969 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) { | |
1970 assert_not_delayed(); | |
1506 | 1971 sll(index, Interpreter::logStackElementSize, index); |
0 | 1972 sub(Llocals, index, index); |
1506 | 1973 ld(index, 0, dst); |
0 | 1974 // Note: index must hold the effective address--the iinc template uses it |
1975 } | |
1976 | |
1977 | |
1978 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) { | |
1979 assert_not_delayed(); | |
1506 | 1980 sll(index, Interpreter::logStackElementSize, index); |
0 | 1981 sub(Llocals, index, index); |
1982 // First half stored at index n+1 (which grows down from Llocals[n]) | |
1983 load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst); | |
1984 } | |
1985 | |
1986 | |
1987 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) { | |
1988 assert_not_delayed(); | |
1506 | 1989 sll(index, Interpreter::logStackElementSize, index); |
0 | 1990 sub(Llocals, index, index); |
1506 | 1991 ldf(FloatRegisterImpl::S, index, 0, dst); |
0 | 1992 } |
1993 | |
1994 | |
1995 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) { | |
1996 assert_not_delayed(); | |
1506 | 1997 sll(index, Interpreter::logStackElementSize, index); |
0 | 1998 sub(Llocals, index, index); |
1999 load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst); | |
2000 } | |
2001 | |
2002 | |
2003 #ifdef ASSERT | |
2004 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) { | |
2005 Label L; | |
2006 | |
2007 assert(Rindex != Rscratch, "Registers cannot be same"); | |
2008 assert(Rindex != Rscratch1, "Registers cannot be same"); | |
2009 assert(Rlimit != Rscratch, "Registers cannot be same"); | |
2010 assert(Rlimit != Rscratch1, "Registers cannot be same"); | |
2011 assert(Rscratch1 != Rscratch, "Registers cannot be same"); | |
2012 | |
2013 // untested("reg area corruption"); | |
2014 add(Rindex, offset, Rscratch); | |
2015 add(Rlimit, 64 + STACK_BIAS, Rscratch1); | |
2016 cmp(Rscratch, Rscratch1); | |
2017 brx(Assembler::greaterEqualUnsigned, false, pn, L); | |
2018 delayed()->nop(); | |
2019 stop("regsave area is being clobbered"); | |
2020 bind(L); | |
2021 } | |
2022 #endif // ASSERT | |
2023 | |
2024 | |
2025 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) { | |
2026 assert_not_delayed(); | |
1506 | 2027 sll(index, Interpreter::logStackElementSize, index); |
0 | 2028 sub(Llocals, index, index); |
1506 | 2029 debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);) |
2030 st(src, index, 0); | |
0 | 2031 } |
2032 | |
1506 | 2033 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) { |
0 | 2034 assert_not_delayed(); |
1506 | 2035 sll(index, Interpreter::logStackElementSize, index); |
0 | 2036 sub(Llocals, index, index); |
1506 | 2037 #ifdef ASSERT |
2038 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); | |
2039 #endif | |
2040 st_ptr(src, index, 0); | |
0 | 2041 } |
2042 | |
2043 | |
2044 | |
1506 | 2045 void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) { |
2046 st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n)); | |
0 | 2047 } |
2048 | |
2049 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) { | |
2050 assert_not_delayed(); | |
1506 | 2051 sll(index, Interpreter::logStackElementSize, index); |
0 | 2052 sub(Llocals, index, index); |
1506 | 2053 #ifdef ASSERT |
0 | 2054 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); |
1506 | 2055 #endif |
0 | 2056 store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1 |
2057 } | |
2058 | |
2059 | |
2060 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) { | |
2061 assert_not_delayed(); | |
1506 | 2062 sll(index, Interpreter::logStackElementSize, index); |
0 | 2063 sub(Llocals, index, index); |
1506 | 2064 #ifdef ASSERT |
2065 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); | |
2066 #endif | |
2067 stf(FloatRegisterImpl::S, src, index, 0); | |
0 | 2068 } |
2069 | |
2070 | |
2071 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) { | |
2072 assert_not_delayed(); | |
1506 | 2073 sll(index, Interpreter::logStackElementSize, index); |
0 | 2074 sub(Llocals, index, index); |
1506 | 2075 #ifdef ASSERT |
0 | 2076 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); |
1506 | 2077 #endif |
0 | 2078 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1)); |
2079 } | |
2080 | |
2081 | |
2082 int InterpreterMacroAssembler::top_most_monitor_byte_offset() { | |
2083 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; | |
2084 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong); | |
2085 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS; | |
2086 } | |
2087 | |
2088 | |
2089 Address InterpreterMacroAssembler::top_most_monitor() { | |
727 | 2090 return Address(FP, top_most_monitor_byte_offset()); |
0 | 2091 } |
2092 | |
2093 | |
2094 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) { | |
2095 add( Lesp, wordSize, Rdest ); | |
2096 } | |
2097 | |
2098 #endif /* CC_INTERP */ | |
2099 | |
2100 void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) { | |
2101 assert(UseCompiler, "incrementing must be useful"); | |
2102 #ifdef CC_INTERP | |
727 | 2103 Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() + |
2104 InvocationCounter::counter_offset()); | |
2105 Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() + | |
2106 InvocationCounter::counter_offset()); | |
0 | 2107 #else |
727 | 2108 Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() + |
2109 InvocationCounter::counter_offset()); | |
2110 Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() + | |
2111 InvocationCounter::counter_offset()); | |
0 | 2112 #endif /* CC_INTERP */ |
2113 int delta = InvocationCounter::count_increment; | |
2114 | |
2115 // Load each counter in a register | |
2116 ld( inv_counter, Rtmp ); | |
2117 ld( be_counter, Rtmp2 ); | |
2118 | |
2119 assert( is_simm13( delta ), " delta too large."); | |
2120 | |
2121 // Add the delta to the invocation counter and store the result | |
2122 add( Rtmp, delta, Rtmp ); | |
2123 | |
2124 // Mask the backedge counter | |
2125 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); | |
2126 | |
2127 // Store value | |
2128 st( Rtmp, inv_counter); | |
2129 | |
2130 // Add invocation counter + backedge counter | |
2131 add( Rtmp, Rtmp2, Rtmp); | |
2132 | |
2133 // Note that this macro must leave the backedge_count + invocation_count in Rtmp! | |
2134 } | |
2135 | |
2136 void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) { | |
2137 assert(UseCompiler, "incrementing must be useful"); | |
2138 #ifdef CC_INTERP | |
727 | 2139 Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() + |
2140 InvocationCounter::counter_offset()); | |
2141 Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() + | |
2142 InvocationCounter::counter_offset()); | |
0 | 2143 #else |
727 | 2144 Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() + |
2145 InvocationCounter::counter_offset()); | |
2146 Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() + | |
2147 InvocationCounter::counter_offset()); | |
0 | 2148 #endif /* CC_INTERP */ |
2149 int delta = InvocationCounter::count_increment; | |
2150 // Load each counter in a register | |
2151 ld( be_counter, Rtmp ); | |
2152 ld( inv_counter, Rtmp2 ); | |
2153 | |
2154 // Add the delta to the backedge counter | |
2155 add( Rtmp, delta, Rtmp ); | |
2156 | |
2157 // Mask the invocation counter, add to backedge counter | |
2158 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); | |
2159 | |
2160 // and store the result to memory | |
2161 st( Rtmp, be_counter ); | |
2162 | |
2163 // Add backedge + invocation counter | |
2164 add( Rtmp, Rtmp2, Rtmp ); | |
2165 | |
2166 // Note that this macro must leave backedge_count + invocation_count in Rtmp! | |
2167 } | |
2168 | |
2169 #ifndef CC_INTERP | |
2170 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count, | |
2171 Register branch_bcp, | |
2172 Register Rtmp ) { | |
2173 Label did_not_overflow; | |
2174 Label overflow_with_error; | |
2175 assert_different_registers(backedge_count, Rtmp, branch_bcp); | |
2176 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); | |
2177 | |
727 | 2178 AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit); |
0 | 2179 load_contents(limit, Rtmp); |
2180 cmp(backedge_count, Rtmp); | |
2181 br(Assembler::lessUnsigned, false, Assembler::pt, did_not_overflow); | |
2182 delayed()->nop(); | |
2183 | |
2184 // When ProfileInterpreter is on, the backedge_count comes from the | |
2185 // methodDataOop, which value does not get reset on the call to | |
2186 // frequency_counter_overflow(). To avoid excessive calls to the overflow | |
2187 // routine while the method is being compiled, add a second test to make sure | |
2188 // the overflow function is called only once every overflow_frequency. | |
2189 if (ProfileInterpreter) { | |
2190 const int overflow_frequency = 1024; | |
2191 andcc(backedge_count, overflow_frequency-1, Rtmp); | |
2192 brx(Assembler::notZero, false, Assembler::pt, did_not_overflow); | |
2193 delayed()->nop(); | |
2194 } | |
2195 | |
2196 // overflow in loop, pass branch bytecode | |
2197 set(6,Rtmp); | |
2198 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp); | |
2199 | |
2200 // Was an OSR adapter generated? | |
2201 // O0 = osr nmethod | |
2202 tst(O0); | |
2203 brx(Assembler::zero, false, Assembler::pn, overflow_with_error); | |
2204 delayed()->nop(); | |
2205 | |
2206 // Has the nmethod been invalidated already? | |
2207 ld(O0, nmethod::entry_bci_offset(), O2); | |
2208 cmp(O2, InvalidOSREntryBci); | |
2209 br(Assembler::equal, false, Assembler::pn, overflow_with_error); | |
2210 delayed()->nop(); | |
2211 | |
2212 // migrate the interpreter frame off of the stack | |
2213 | |
2214 mov(G2_thread, L7); | |
2215 // save nmethod | |
2216 mov(O0, L6); | |
2217 set_last_Java_frame(SP, noreg); | |
2218 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); | |
2219 reset_last_Java_frame(); | |
2220 mov(L7, G2_thread); | |
2221 | |
2222 // move OSR nmethod to I1 | |
2223 mov(L6, I1); | |
2224 | |
2225 // OSR buffer to I0 | |
2226 mov(O0, I0); | |
2227 | |
2228 // remove the interpreter frame | |
2229 restore(I5_savedSP, 0, SP); | |
2230 | |
2231 // Jump to the osr code. | |
2232 ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); | |
2233 jmp(O2, G0); | |
2234 delayed()->nop(); | |
2235 | |
2236 bind(overflow_with_error); | |
2237 | |
2238 bind(did_not_overflow); | |
2239 } | |
2240 | |
2241 | |
2242 | |
2243 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) { | |
2244 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); } | |
2245 } | |
2246 | |
2247 | |
2248 // local helper function for the verify_oop_or_return_address macro | |
2249 static bool verify_return_address(methodOopDesc* m, int bci) { | |
2250 #ifndef PRODUCT | |
2251 address pc = (address)(m->constMethod()) | |
2252 + in_bytes(constMethodOopDesc::codes_offset()) + bci; | |
2253 // assume it is a valid return address if it is inside m and is preceded by a jsr | |
2254 if (!m->contains(pc)) return false; | |
2255 address jsr_pc; | |
2256 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr); | |
2257 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true; | |
2258 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w); | |
2259 if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true; | |
2260 #endif // PRODUCT | |
2261 return false; | |
2262 } | |
2263 | |
2264 | |
2265 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) { | |
2266 if (!VerifyOops) return; | |
2267 // the VM documentation for the astore[_wide] bytecode allows | |
2268 // the TOS to be not only an oop but also a return address | |
2269 Label test; | |
2270 Label skip; | |
2271 // See if it is an address (in the current method): | |
2272 | |
2273 mov(reg, Rtmp); | |
2274 const int log2_bytecode_size_limit = 16; | |
2275 srl(Rtmp, log2_bytecode_size_limit, Rtmp); | |
2276 br_notnull( Rtmp, false, pt, test ); | |
2277 delayed()->nop(); | |
2278 | |
2279 // %%% should use call_VM_leaf here? | |
2280 save_frame_and_mov(0, Lmethod, O0, reg, O1); | |
2281 save_thread(L7_thread_cache); | |
2282 call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none); | |
2283 delayed()->nop(); | |
2284 restore_thread(L7_thread_cache); | |
2285 br_notnull( O0, false, pt, skip ); | |
2286 delayed()->restore(); | |
2287 | |
2288 // Perform a more elaborate out-of-line call | |
2289 // Not an address; verify it: | |
2290 bind(test); | |
2291 verify_oop(reg); | |
2292 bind(skip); | |
2293 } | |
2294 | |
2295 | |
2296 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { | |
2297 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth); | |
2298 } | |
2299 #endif /* CC_INTERP */ | |
2300 | |
2301 // Inline assembly for: | |
2302 // | |
2303 // if (thread is in interp_only_mode) { | |
2304 // InterpreterRuntime::post_method_entry(); | |
2305 // } | |
2306 // if (DTraceMethodProbes) { | |
605 | 2307 // SharedRuntime::dtrace_method_entry(method, receiver); |
0 | 2308 // } |
610
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2309 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2310 // SharedRuntime::rc_trace_method_entry(method, receiver); |
0 | 2311 // } |
2312 | |
2313 void InterpreterMacroAssembler::notify_method_entry() { | |
2314 | |
2315 // C++ interpreter only uses this for native methods. | |
2316 | |
2317 // Whenever JVMTI puts a thread in interp_only_mode, method | |
2318 // entry/exit events are sent for that thread to track stack | |
2319 // depth. If it is possible to enter interp_only_mode we add | |
2320 // the code to check if the event should be sent. | |
2321 if (JvmtiExport::can_post_interpreter_events()) { | |
2322 Label L; | |
2323 Register temp_reg = O5; | |
727 | 2324 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
0 | 2325 ld(interp_only, temp_reg); |
2326 tst(temp_reg); | |
2327 br(zero, false, pt, L); | |
2328 delayed()->nop(); | |
2329 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry)); | |
2330 bind(L); | |
2331 } | |
2332 | |
2333 { | |
2334 Register temp_reg = O5; | |
2335 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); | |
2336 call_VM_leaf(noreg, | |
2337 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), | |
2338 G2_thread, Lmethod); | |
2339 } | |
610
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2340 |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2341 // RedefineClasses() tracing support for obsolete method entry |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2342 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2343 call_VM_leaf(noreg, |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2344 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2345 G2_thread, Lmethod); |
70998f2e05ef
6805864: 4/3 Problem with jvmti->redefineClasses: some methods don't get redefined
dcubed
parents:
422
diff
changeset
|
2346 } |
0 | 2347 } |
2348 | |
2349 | |
2350 // Inline assembly for: | |
2351 // | |
2352 // if (thread is in interp_only_mode) { | |
2353 // // save result | |
2354 // InterpreterRuntime::post_method_exit(); | |
2355 // // restore result | |
2356 // } | |
2357 // if (DTraceMethodProbes) { | |
2358 // SharedRuntime::dtrace_method_exit(thread, method); | |
2359 // } | |
2360 // | |
2361 // Native methods have their result stored in d_tmp and l_tmp | |
2362 // Java methods have their result stored in the expression stack | |
2363 | |
2364 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, | |
2365 TosState state, | |
2366 NotifyMethodExitMode mode) { | |
2367 // C++ interpreter only uses this for native methods. | |
2368 | |
2369 // Whenever JVMTI puts a thread in interp_only_mode, method | |
2370 // entry/exit events are sent for that thread to track stack | |
2371 // depth. If it is possible to enter interp_only_mode we add | |
2372 // the code to check if the event should be sent. | |
2373 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { | |
2374 Label L; | |
2375 Register temp_reg = O5; | |
727 | 2376 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
0 | 2377 ld(interp_only, temp_reg); |
2378 tst(temp_reg); | |
2379 br(zero, false, pt, L); | |
2380 delayed()->nop(); | |
2381 | |
2382 // Note: frame::interpreter_frame_result has a dependency on how the | |
2383 // method result is saved across the call to post_method_exit. For | |
2384 // native methods it assumes the result registers are saved to | |
2385 // l_scratch and d_scratch. If this changes then the interpreter_frame_result | |
2386 // implementation will need to be updated too. | |
2387 | |
2388 save_return_value(state, is_native_method); | |
2389 call_VM(noreg, | |
2390 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); | |
2391 restore_return_value(state, is_native_method); | |
2392 bind(L); | |
2393 } | |
2394 | |
2395 { | |
2396 Register temp_reg = O5; | |
2397 // Dtrace notification | |
2398 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); | |
2399 save_return_value(state, is_native_method); | |
2400 call_VM_leaf( | |
2401 noreg, | |
2402 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), | |
2403 G2_thread, Lmethod); | |
2404 restore_return_value(state, is_native_method); | |
2405 } | |
2406 } | |
2407 | |
2408 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) { | |
2409 #ifdef CC_INTERP | |
2410 // result potentially in O0/O1: save it across calls | |
2411 stf(FloatRegisterImpl::D, F0, STATE(_native_fresult)); | |
2412 #ifdef _LP64 | |
2413 stx(O0, STATE(_native_lresult)); | |
2414 #else | |
2415 std(O0, STATE(_native_lresult)); | |
2416 #endif | |
2417 #else // CC_INTERP | |
2418 if (is_native_call) { | |
2419 stf(FloatRegisterImpl::D, F0, d_tmp); | |
2420 #ifdef _LP64 | |
2421 stx(O0, l_tmp); | |
2422 #else | |
2423 std(O0, l_tmp); | |
2424 #endif | |
2425 } else { | |
2426 push(state); | |
2427 } | |
2428 #endif // CC_INTERP | |
2429 } | |
2430 | |
2431 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) { | |
2432 #ifdef CC_INTERP | |
2433 ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0); | |
2434 #ifdef _LP64 | |
2435 ldx(STATE(_native_lresult), O0); | |
2436 #else | |
2437 ldd(STATE(_native_lresult), O0); | |
2438 #endif | |
2439 #else // CC_INTERP | |
2440 if (is_native_call) { | |
2441 ldf(FloatRegisterImpl::D, d_tmp, F0); | |
2442 #ifdef _LP64 | |
2443 ldx(l_tmp, O0); | |
2444 #else | |
2445 ldd(l_tmp, O0); | |
2446 #endif | |
2447 } else { | |
2448 pop(state); | |
2449 } | |
2450 #endif // CC_INTERP | |
2451 } | |
1783 | 2452 |
2453 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. | |
2454 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, | |
2455 int increment, int mask, | |
2456 Register scratch1, Register scratch2, | |
2457 Condition cond, Label *where) { | |
2458 ld(counter_addr, scratch1); | |
2459 add(scratch1, increment, scratch1); | |
2460 if (is_simm13(mask)) { | |
2461 andcc(scratch1, mask, G0); | |
2462 } else { | |
2463 set(mask, scratch2); | |
2464 andcc(scratch1, scratch2, G0); | |
2465 } | |
2466 br(cond, false, Assembler::pn, *where); | |
2467 delayed()->st(scratch1, counter_addr); | |
2468 } |