Mercurial > hg > truffle
annotate src/cpu/sparc/vm/templateInterpreter_sparc.cpp @ 693:2c1dbb844832
Merge
author | acorn |
---|---|
date | Thu, 02 Apr 2009 18:17:03 -0400 |
parents | d1605aabd0a1 |
children | e5b0439ef4ae |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_templateInterpreter_sparc.cpp.incl" | |
27 | |
28 #ifndef CC_INTERP | |
29 #ifndef FAST_DISPATCH | |
30 #define FAST_DISPATCH 1 | |
31 #endif | |
32 #undef FAST_DISPATCH | |
33 | |
34 | |
35 // Generation of Interpreter | |
36 // | |
37 // The InterpreterGenerator generates the interpreter into Interpreter::_code. | |
38 | |
39 | |
40 #define __ _masm-> | |
41 | |
42 | |
43 //---------------------------------------------------------------------------------------------------- | |
44 | |
45 | |
46 void InterpreterGenerator::save_native_result(void) { | |
47 // result potentially in O0/O1: save it across calls | |
48 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; | |
49 | |
50 // result potentially in F0/F1: save it across calls | |
51 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; | |
52 | |
53 // save and restore any potential method result value around the unlocking operation | |
54 __ stf(FloatRegisterImpl::D, F0, d_tmp); | |
55 #ifdef _LP64 | |
56 __ stx(O0, l_tmp); | |
57 #else | |
58 __ std(O0, l_tmp); | |
59 #endif | |
60 } | |
61 | |
62 void InterpreterGenerator::restore_native_result(void) { | |
63 const Address& l_tmp = InterpreterMacroAssembler::l_tmp; | |
64 const Address& d_tmp = InterpreterMacroAssembler::d_tmp; | |
65 | |
66 // Restore any method result value | |
67 __ ldf(FloatRegisterImpl::D, d_tmp, F0); | |
68 #ifdef _LP64 | |
69 __ ldx(l_tmp, O0); | |
70 #else | |
71 __ ldd(l_tmp, O0); | |
72 #endif | |
73 } | |
74 | |
75 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { | |
76 assert(!pass_oop || message == NULL, "either oop or message but not both"); | |
77 address entry = __ pc(); | |
78 // expression stack must be empty before entering the VM if an exception happened | |
79 __ empty_expression_stack(); | |
80 // load exception object | |
81 __ set((intptr_t)name, G3_scratch); | |
82 if (pass_oop) { | |
83 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i); | |
84 } else { | |
85 __ set((intptr_t)message, G4_scratch); | |
86 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch); | |
87 } | |
88 // throw exception | |
89 assert(Interpreter::throw_exception_entry() != NULL, "generate it first"); | |
90 Address thrower(G3_scratch, Interpreter::throw_exception_entry()); | |
91 __ jump_to (thrower); | |
92 __ delayed()->nop(); | |
93 return entry; | |
94 } | |
95 | |
96 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { | |
97 address entry = __ pc(); | |
98 // expression stack must be empty before entering the VM if an exception | |
99 // happened | |
100 __ empty_expression_stack(); | |
101 // load exception object | |
102 __ call_VM(Oexception, | |
103 CAST_FROM_FN_PTR(address, | |
104 InterpreterRuntime::throw_ClassCastException), | |
105 Otos_i); | |
106 __ should_not_reach_here(); | |
107 return entry; | |
108 } | |
109 | |
110 | |
111 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { | |
112 address entry = __ pc(); | |
113 // expression stack must be empty before entering the VM if an exception happened | |
114 __ empty_expression_stack(); | |
115 // convention: expect aberrant index in register G3_scratch, then shuffle the | |
116 // index to G4_scratch for the VM call | |
117 __ mov(G3_scratch, G4_scratch); | |
118 __ set((intptr_t)name, G3_scratch); | |
119 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch); | |
120 __ should_not_reach_here(); | |
121 return entry; | |
122 } | |
123 | |
124 | |
125 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { | |
126 address entry = __ pc(); | |
127 // expression stack must be empty before entering the VM if an exception happened | |
128 __ empty_expression_stack(); | |
129 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); | |
130 __ should_not_reach_here(); | |
131 return entry; | |
132 } | |
133 | |
134 | |
135 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { | |
136 address compiled_entry = __ pc(); | |
137 Label cont; | |
138 | |
139 address entry = __ pc(); | |
140 #if !defined(_LP64) && defined(COMPILER2) | |
141 // All return values are where we want them, except for Longs. C2 returns | |
142 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. | |
143 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit | |
144 // build even if we are returning from interpreted we just do a little | |
145 // stupid shuffing. | |
146 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to | |
147 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node | |
148 // first which would move g1 -> O0/O1 and destroy the exception we were throwing. | |
149 | |
150 if( state == ltos ) { | |
151 __ srl (G1, 0,O1); | |
152 __ srlx(G1,32,O0); | |
153 } | |
154 #endif /* !_LP64 && COMPILER2 */ | |
155 | |
156 | |
157 __ bind(cont); | |
158 | |
159 // The callee returns with the stack possibly adjusted by adapter transition | |
160 // We remove that possible adjustment here. | |
161 // All interpreter local registers are untouched. Any result is passed back | |
162 // in the O0/O1 or float registers. Before continuing, the arguments must be | |
163 // popped from the java expression stack; i.e., Lesp must be adjusted. | |
164 | |
165 __ mov(Llast_SP, SP); // Remove any adapter added stack space. | |
166 | |
167 | |
168 const Register cache = G3_scratch; | |
169 const Register size = G1_scratch; | |
170 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1); | |
171 __ ld_ptr(Address(cache, 0, in_bytes(constantPoolCacheOopDesc::base_offset()) + | |
172 in_bytes(ConstantPoolCacheEntry::flags_offset())), size); | |
173 __ and3(size, 0xFF, size); // argument size in words | |
174 __ sll(size, Interpreter::logStackElementSize(), size); // each argument size in bytes | |
175 __ add(Lesp, size, Lesp); // pop arguments | |
176 __ dispatch_next(state, step); | |
177 | |
178 return entry; | |
179 } | |
180 | |
181 | |
182 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { | |
183 address entry = __ pc(); | |
184 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache | |
185 { Label L; | |
186 Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset())); | |
187 | |
188 __ ld_ptr(exception_addr, Gtemp); | |
189 __ tst(Gtemp); | |
190 __ brx(Assembler::equal, false, Assembler::pt, L); | |
191 __ delayed()->nop(); | |
192 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); | |
193 __ should_not_reach_here(); | |
194 __ bind(L); | |
195 } | |
196 __ dispatch_next(state, step); | |
197 return entry; | |
198 } | |
199 | |
200 // A result handler converts/unboxes a native call result into | |
201 // a java interpreter/compiler result. The current frame is an | |
202 // interpreter frame. The activation frame unwind code must be | |
203 // consistent with that of TemplateTable::_return(...). In the | |
204 // case of native methods, the caller's SP was not modified. | |
205 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { | |
206 address entry = __ pc(); | |
207 Register Itos_i = Otos_i ->after_save(); | |
208 Register Itos_l = Otos_l ->after_save(); | |
209 Register Itos_l1 = Otos_l1->after_save(); | |
210 Register Itos_l2 = Otos_l2->after_save(); | |
211 switch (type) { | |
212 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false | |
213 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value! | |
214 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break; | |
215 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break; | |
216 case T_LONG : | |
217 #ifndef _LP64 | |
218 __ mov(O1, Itos_l2); // move other half of long | |
219 #endif // ifdef or no ifdef, fall through to the T_INT case | |
220 case T_INT : __ mov(O0, Itos_i); break; | |
221 case T_VOID : /* nothing to do */ break; | |
222 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break; | |
223 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break; | |
224 case T_OBJECT : | |
225 __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i); | |
226 __ verify_oop(Itos_i); | |
227 break; | |
228 default : ShouldNotReachHere(); | |
229 } | |
230 __ ret(); // return from interpreter activation | |
231 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame | |
232 NOT_PRODUCT(__ emit_long(0);) // marker for disassembly | |
233 return entry; | |
234 } | |
235 | |
236 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { | |
237 address entry = __ pc(); | |
238 __ push(state); | |
239 __ call_VM(noreg, runtime_entry); | |
240 __ dispatch_via(vtos, Interpreter::normal_table(vtos)); | |
241 return entry; | |
242 } | |
243 | |
244 | |
245 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { | |
246 address entry = __ pc(); | |
247 __ dispatch_next(state); | |
248 return entry; | |
249 } | |
250 | |
251 // | |
252 // Helpers for commoning out cases in the various type of method entries. | |
253 // | |
254 | |
255 // increment invocation count & check for overflow | |
256 // | |
257 // Note: checking for negative value instead of overflow | |
258 // so we have a 'sticky' overflow test | |
259 // | |
260 // Lmethod: method | |
261 // ??: invocation counter | |
262 // | |
263 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { | |
264 // Update standard invocation counters | |
265 __ increment_invocation_counter(O0, G3_scratch); | |
266 if (ProfileInterpreter) { // %%% Merge this into methodDataOop | |
267 Address interpreter_invocation_counter(Lmethod, 0, in_bytes(methodOopDesc::interpreter_invocation_counter_offset())); | |
268 __ ld(interpreter_invocation_counter, G3_scratch); | |
269 __ inc(G3_scratch); | |
270 __ st(G3_scratch, interpreter_invocation_counter); | |
271 } | |
272 | |
273 if (ProfileInterpreter && profile_method != NULL) { | |
274 // Test to see if we should create a method data oop | |
275 Address profile_limit(G3_scratch, (address)&InvocationCounter::InterpreterProfileLimit); | |
276 __ sethi(profile_limit); | |
277 __ ld(profile_limit, G3_scratch); | |
278 __ cmp(O0, G3_scratch); | |
279 __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue); | |
280 __ delayed()->nop(); | |
281 | |
282 // if no method data exists, go to profile_method | |
283 __ test_method_data_pointer(*profile_method); | |
284 } | |
285 | |
286 Address invocation_limit(G3_scratch, (address)&InvocationCounter::InterpreterInvocationLimit); | |
287 __ sethi(invocation_limit); | |
288 __ ld(invocation_limit, G3_scratch); | |
289 __ cmp(O0, G3_scratch); | |
290 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); | |
291 __ delayed()->nop(); | |
292 | |
293 } | |
294 | |
295 // Allocate monitor and lock method (asm interpreter) | |
296 // ebx - methodOop | |
297 // | |
298 void InterpreterGenerator::lock_method(void) { | |
299 const Address access_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); | |
300 __ ld(access_flags, O0); | |
301 | |
302 #ifdef ASSERT | |
303 { Label ok; | |
304 __ btst(JVM_ACC_SYNCHRONIZED, O0); | |
305 __ br( Assembler::notZero, false, Assembler::pt, ok); | |
306 __ delayed()->nop(); | |
307 __ stop("method doesn't need synchronization"); | |
308 __ bind(ok); | |
309 } | |
310 #endif // ASSERT | |
311 | |
312 // get synchronization object to O0 | |
313 { Label done; | |
314 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); | |
315 __ btst(JVM_ACC_STATIC, O0); | |
316 __ br( Assembler::zero, true, Assembler::pt, done); | |
317 __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case | |
318 | |
319 __ ld_ptr( Lmethod, in_bytes(methodOopDesc::constants_offset()), O0); | |
320 __ ld_ptr( O0, constantPoolOopDesc::pool_holder_offset_in_bytes(), O0); | |
321 | |
322 // lock the mirror, not the klassOop | |
323 __ ld_ptr( O0, mirror_offset, O0); | |
324 | |
325 #ifdef ASSERT | |
326 __ tst(O0); | |
327 __ breakpoint_trap(Assembler::zero); | |
328 #endif // ASSERT | |
329 | |
330 __ bind(done); | |
331 } | |
332 | |
333 __ add_monitor_to_stack(true, noreg, noreg); // allocate monitor elem | |
334 __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes()); // store object | |
335 // __ untested("lock_object from method entry"); | |
336 __ lock_object(Lmonitors, O0); | |
337 } | |
338 | |
339 | |
340 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size, | |
341 Register Rscratch, | |
342 Register Rscratch2) { | |
343 const int page_size = os::vm_page_size(); | |
344 Address saved_exception_pc(G2_thread, 0, | |
345 in_bytes(JavaThread::saved_exception_pc_offset())); | |
346 Label after_frame_check; | |
347 | |
348 assert_different_registers(Rframe_size, Rscratch, Rscratch2); | |
349 | |
350 __ set( page_size, Rscratch ); | |
351 __ cmp( Rframe_size, Rscratch ); | |
352 | |
353 __ br( Assembler::lessEqual, false, Assembler::pt, after_frame_check ); | |
354 __ delayed()->nop(); | |
355 | |
356 // get the stack base, and in debug, verify it is non-zero | |
357 __ ld_ptr( G2_thread, in_bytes(Thread::stack_base_offset()), Rscratch ); | |
358 #ifdef ASSERT | |
359 Label base_not_zero; | |
360 __ cmp( Rscratch, G0 ); | |
361 __ brx( Assembler::notEqual, false, Assembler::pn, base_not_zero ); | |
362 __ delayed()->nop(); | |
363 __ stop("stack base is zero in generate_stack_overflow_check"); | |
364 __ bind(base_not_zero); | |
365 #endif | |
366 | |
367 // get the stack size, and in debug, verify it is non-zero | |
368 assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" ); | |
369 __ ld_ptr( G2_thread, in_bytes(Thread::stack_size_offset()), Rscratch2 ); | |
370 #ifdef ASSERT | |
371 Label size_not_zero; | |
372 __ cmp( Rscratch2, G0 ); | |
373 __ brx( Assembler::notEqual, false, Assembler::pn, size_not_zero ); | |
374 __ delayed()->nop(); | |
375 __ stop("stack size is zero in generate_stack_overflow_check"); | |
376 __ bind(size_not_zero); | |
377 #endif | |
378 | |
379 // compute the beginning of the protected zone minus the requested frame size | |
380 __ sub( Rscratch, Rscratch2, Rscratch ); | |
381 __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 ); | |
382 __ add( Rscratch, Rscratch2, Rscratch ); | |
383 | |
384 // Add in the size of the frame (which is the same as subtracting it from the | |
385 // SP, which would take another register | |
386 __ add( Rscratch, Rframe_size, Rscratch ); | |
387 | |
388 // the frame is greater than one page in size, so check against | |
389 // the bottom of the stack | |
390 __ cmp( SP, Rscratch ); | |
391 __ brx( Assembler::greater, false, Assembler::pt, after_frame_check ); | |
392 __ delayed()->nop(); | |
393 | |
394 // Save the return address as the exception pc | |
395 __ st_ptr(O7, saved_exception_pc); | |
396 | |
397 // the stack will overflow, throw an exception | |
398 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); | |
399 | |
400 // if you get to here, then there is enough stack space | |
401 __ bind( after_frame_check ); | |
402 } | |
403 | |
404 | |
405 // | |
406 // Generate a fixed interpreter frame. This is identical setup for interpreted | |
407 // methods and for native methods hence the shared code. | |
408 | |
409 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { | |
410 // | |
411 // | |
412 // The entry code sets up a new interpreter frame in 4 steps: | |
413 // | |
414 // 1) Increase caller's SP by for the extra local space needed: | |
415 // (check for overflow) | |
416 // Efficient implementation of xload/xstore bytecodes requires | |
417 // that arguments and non-argument locals are in a contigously | |
418 // addressable memory block => non-argument locals must be | |
419 // allocated in the caller's frame. | |
420 // | |
421 // 2) Create a new stack frame and register window: | |
422 // The new stack frame must provide space for the standard | |
423 // register save area, the maximum java expression stack size, | |
424 // the monitor slots (0 slots initially), and some frame local | |
425 // scratch locations. | |
426 // | |
427 // 3) The following interpreter activation registers must be setup: | |
428 // Lesp : expression stack pointer | |
429 // Lbcp : bytecode pointer | |
430 // Lmethod : method | |
431 // Llocals : locals pointer | |
432 // Lmonitors : monitor pointer | |
433 // LcpoolCache: constant pool cache | |
434 // | |
435 // 4) Initialize the non-argument locals if necessary: | |
436 // Non-argument locals may need to be initialized to NULL | |
437 // for GC to work. If the oop-map information is accurate | |
438 // (in the absence of the JSR problem), no initialization | |
439 // is necessary. | |
440 // | |
441 // (gri - 2/25/2000) | |
442 | |
443 | |
444 const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())); | |
445 const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset())); | |
446 const Address max_stack (G5_method, 0, in_bytes(methodOopDesc::max_stack_offset())); | |
447 int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong ); | |
448 | |
449 const int extra_space = | |
450 rounded_vm_local_words + // frame local scratch space | |
451 frame::memory_parameter_word_sp_offset + // register save area | |
452 (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0); | |
453 | |
454 const Register Glocals_size = G3; | |
455 const Register Otmp1 = O3; | |
456 const Register Otmp2 = O4; | |
457 // Lscratch can't be used as a temporary because the call_stub uses | |
458 // it to assert that the stack frame was setup correctly. | |
459 | |
460 __ lduh( size_of_parameters, Glocals_size); | |
461 | |
462 // Gargs points to first local + BytesPerWord | |
463 // Set the saved SP after the register window save | |
464 // | |
465 assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP); | |
466 __ sll(Glocals_size, Interpreter::logStackElementSize(), Otmp1); | |
467 __ add(Gargs, Otmp1, Gargs); | |
468 | |
469 if (native_call) { | |
470 __ calc_mem_param_words( Glocals_size, Gframe_size ); | |
471 __ add( Gframe_size, extra_space, Gframe_size); | |
472 __ round_to( Gframe_size, WordsPerLong ); | |
473 __ sll( Gframe_size, LogBytesPerWord, Gframe_size ); | |
474 } else { | |
475 | |
476 // | |
477 // Compute number of locals in method apart from incoming parameters | |
478 // | |
479 __ lduh( size_of_locals, Otmp1 ); | |
480 __ sub( Otmp1, Glocals_size, Glocals_size ); | |
481 __ round_to( Glocals_size, WordsPerLong ); | |
482 __ sll( Glocals_size, Interpreter::logStackElementSize(), Glocals_size ); | |
483 | |
484 // see if the frame is greater than one page in size. If so, | |
485 // then we need to verify there is enough stack space remaining | |
486 // Frame_size = (max_stack + extra_space) * BytesPerWord; | |
487 __ lduh( max_stack, Gframe_size ); | |
488 __ add( Gframe_size, extra_space, Gframe_size ); | |
489 __ round_to( Gframe_size, WordsPerLong ); | |
490 __ sll( Gframe_size, Interpreter::logStackElementSize(), Gframe_size); | |
491 | |
492 // Add in java locals size for stack overflow check only | |
493 __ add( Gframe_size, Glocals_size, Gframe_size ); | |
494 | |
495 const Register Otmp2 = O4; | |
496 assert_different_registers(Otmp1, Otmp2, O5_savedSP); | |
497 generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2); | |
498 | |
499 __ sub( Gframe_size, Glocals_size, Gframe_size); | |
500 | |
501 // | |
502 // bump SP to accomodate the extra locals | |
503 // | |
504 __ sub( SP, Glocals_size, SP ); | |
505 } | |
506 | |
507 // | |
508 // now set up a stack frame with the size computed above | |
509 // | |
510 __ neg( Gframe_size ); | |
511 __ save( SP, Gframe_size, SP ); | |
512 | |
513 // | |
514 // now set up all the local cache registers | |
515 // | |
516 // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note | |
517 // that all present references to Lbyte_code initialize the register | |
518 // immediately before use | |
519 if (native_call) { | |
520 __ mov(G0, Lbcp); | |
521 } else { | |
522 __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::const_offset())), Lbcp ); | |
523 __ add(Address(Lbcp, 0, in_bytes(constMethodOopDesc::codes_offset())), Lbcp ); | |
524 } | |
525 __ mov( G5_method, Lmethod); // set Lmethod | |
526 __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache | |
527 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors | |
528 #ifdef _LP64 | |
529 __ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias | |
530 #endif | |
531 __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp | |
532 | |
533 // setup interpreter activation registers | |
534 __ sub(Gargs, BytesPerWord, Llocals); // set Llocals | |
535 | |
536 if (ProfileInterpreter) { | |
537 #ifdef FAST_DISPATCH | |
538 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since | |
539 // they both use I2. | |
540 assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); | |
541 #endif // FAST_DISPATCH | |
542 __ set_method_data_pointer(); | |
543 } | |
544 | |
545 } | |
546 | |
547 // Empty method, generate a very fast return. | |
548 | |
549 address InterpreterGenerator::generate_empty_entry(void) { | |
550 | |
551 // A method that does nother but return... | |
552 | |
553 address entry = __ pc(); | |
554 Label slow_path; | |
555 | |
556 __ verify_oop(G5_method); | |
557 | |
558 // do nothing for empty methods (do not even increment invocation counter) | |
559 if ( UseFastEmptyMethods) { | |
560 // If we need a safepoint check, generate full interpreter entry. | |
561 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); | |
562 __ load_contents(sync_state, G3_scratch); | |
563 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); | |
564 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); | |
565 __ delayed()->nop(); | |
566 | |
567 // Code: _return | |
568 __ retl(); | |
569 __ delayed()->mov(O5_savedSP, SP); | |
570 | |
571 __ bind(slow_path); | |
572 (void) generate_normal_entry(false); | |
573 | |
574 return entry; | |
575 } | |
576 return NULL; | |
577 } | |
578 | |
579 // Call an accessor method (assuming it is resolved, otherwise drop into | |
580 // vanilla (slow path) entry | |
581 | |
582 // Generates code to elide accessor methods | |
583 // Uses G3_scratch and G1_scratch as scratch | |
584 address InterpreterGenerator::generate_accessor_entry(void) { | |
585 | |
586 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; | |
587 // parameter size = 1 | |
588 // Note: We can only use this code if the getfield has been resolved | |
589 // and if we don't have a null-pointer exception => check for | |
590 // these conditions first and use slow path if necessary. | |
591 address entry = __ pc(); | |
592 Label slow_path; | |
593 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
594 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
595 // XXX: for compressed oops pointer loading and decoding doesn't fit in |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
596 // delay slot and damages G1 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
597 if ( UseFastAccessorMethods && !UseCompressedOops ) { |
0 | 598 // Check if we need to reach a safepoint and generate full interpreter |
599 // frame if so. | |
600 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); | |
601 __ load_contents(sync_state, G3_scratch); | |
602 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); | |
603 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); | |
604 __ delayed()->nop(); | |
605 | |
606 // Check if local 0 != NULL | |
607 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 | |
608 __ tst(Otos_i); // check if local 0 == NULL and go the slow path | |
609 __ brx(Assembler::zero, false, Assembler::pn, slow_path); | |
610 __ delayed()->nop(); | |
611 | |
612 | |
613 // read first instruction word and extract bytecode @ 1 and index @ 2 | |
614 // get first 4 bytes of the bytecodes (big endian!) | |
615 __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::const_offset())), G1_scratch); | |
616 __ ld(Address(G1_scratch, 0, in_bytes(constMethodOopDesc::codes_offset())), G1_scratch); | |
617 | |
618 // move index @ 2 far left then to the right most two bytes. | |
619 __ sll(G1_scratch, 2*BitsPerByte, G1_scratch); | |
620 __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words( | |
621 ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch); | |
622 | |
623 // get constant pool cache | |
624 __ ld_ptr(G5_method, in_bytes(methodOopDesc::constants_offset()), G3_scratch); | |
625 __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch); | |
626 | |
627 // get specific constant pool cache entry | |
628 __ add(G3_scratch, G1_scratch, G3_scratch); | |
629 | |
630 // Check the constant Pool cache entry to see if it has been resolved. | |
631 // If not, need the slow path. | |
632 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); | |
633 __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch); | |
634 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch); | |
635 __ and3(G1_scratch, 0xFF, G1_scratch); | |
636 __ cmp(G1_scratch, Bytecodes::_getfield); | |
637 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); | |
638 __ delayed()->nop(); | |
639 | |
640 // Get the type and return field offset from the constant pool cache | |
641 __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch); | |
642 __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch); | |
643 | |
644 Label xreturn_path; | |
645 // Need to differentiate between igetfield, agetfield, bgetfield etc. | |
646 // because they are different sizes. | |
647 // Get the type from the constant pool cache | |
648 __ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch); | |
649 // Make sure we don't need to mask G1_scratch for tosBits after the above shift | |
650 ConstantPoolCacheEntry::verify_tosBits(); | |
651 __ cmp(G1_scratch, atos ); | |
652 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); | |
653 __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i); | |
654 __ cmp(G1_scratch, itos); | |
655 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); | |
656 __ delayed()->ld(Otos_i, G3_scratch, Otos_i); | |
657 __ cmp(G1_scratch, stos); | |
658 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); | |
659 __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i); | |
660 __ cmp(G1_scratch, ctos); | |
661 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); | |
662 __ delayed()->lduh(Otos_i, G3_scratch, Otos_i); | |
663 #ifdef ASSERT | |
664 __ cmp(G1_scratch, btos); | |
665 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); | |
666 __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i); | |
667 __ should_not_reach_here(); | |
668 #endif | |
669 __ ldsb(Otos_i, G3_scratch, Otos_i); | |
670 __ bind(xreturn_path); | |
671 | |
672 // _ireturn/_areturn | |
673 __ retl(); // return from leaf routine | |
674 __ delayed()->mov(O5_savedSP, SP); | |
675 | |
676 // Generate regular method entry | |
677 __ bind(slow_path); | |
678 (void) generate_normal_entry(false); | |
679 return entry; | |
680 } | |
681 return NULL; | |
682 } | |
683 | |
684 // | |
685 // Interpreter stub for calling a native method. (asm interpreter) | |
686 // This sets up a somewhat different looking stack for calling the native method | |
687 // than the typical interpreter frame setup. | |
688 // | |
689 | |
690 address InterpreterGenerator::generate_native_entry(bool synchronized) { | |
691 address entry = __ pc(); | |
692 | |
693 // the following temporary registers are used during frame creation | |
694 const Register Gtmp1 = G3_scratch ; | |
695 const Register Gtmp2 = G1_scratch; | |
696 bool inc_counter = UseCompiler || CountCompiledCalls; | |
697 | |
698 // make sure registers are different! | |
699 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); | |
700 | |
701 const Address Laccess_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); | |
702 | |
703 __ verify_oop(G5_method); | |
704 | |
705 const Register Glocals_size = G3; | |
706 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); | |
707 | |
708 // make sure method is native & not abstract | |
709 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) | |
710 #ifdef ASSERT | |
711 __ ld(G5_method, in_bytes(methodOopDesc::access_flags_offset()), Gtmp1); | |
712 { | |
713 Label L; | |
714 __ btst(JVM_ACC_NATIVE, Gtmp1); | |
715 __ br(Assembler::notZero, false, Assembler::pt, L); | |
716 __ delayed()->nop(); | |
717 __ stop("tried to execute non-native method as native"); | |
718 __ bind(L); | |
719 } | |
720 { Label L; | |
721 __ btst(JVM_ACC_ABSTRACT, Gtmp1); | |
722 __ br(Assembler::zero, false, Assembler::pt, L); | |
723 __ delayed()->nop(); | |
724 __ stop("tried to execute abstract method as non-abstract"); | |
725 __ bind(L); | |
726 } | |
727 #endif // ASSERT | |
728 | |
729 // generate the code to allocate the interpreter stack frame | |
730 generate_fixed_frame(true); | |
731 | |
732 // | |
733 // No locals to initialize for native method | |
734 // | |
735 | |
736 // this slot will be set later, we initialize it to null here just in | |
737 // case we get a GC before the actual value is stored later | |
738 __ st_ptr(G0, Address(FP, 0, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS)); | |
739 | |
740 const Address do_not_unlock_if_synchronized(G2_thread, 0, | |
741 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); | |
742 // Since at this point in the method invocation the exception handler | |
743 // would try to exit the monitor of synchronized methods which hasn't | |
744 // been entered yet, we set the thread local variable | |
745 // _do_not_unlock_if_synchronized to true. If any exception was thrown by | |
746 // runtime, exception handling i.e. unlock_if_synchronized_method will | |
747 // check this thread local flag. | |
748 // This flag has two effects, one is to force an unwind in the topmost | |
749 // interpreter frame and not perform an unlock while doing so. | |
750 | |
751 __ movbool(true, G3_scratch); | |
752 __ stbool(G3_scratch, do_not_unlock_if_synchronized); | |
753 | |
754 // increment invocation counter and check for overflow | |
755 // | |
756 // Note: checking for negative value instead of overflow | |
757 // so we have a 'sticky' overflow test (may be of | |
758 // importance as soon as we have true MT/MP) | |
759 Label invocation_counter_overflow; | |
760 Label Lcontinue; | |
761 if (inc_counter) { | |
762 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); | |
763 | |
764 } | |
765 __ bind(Lcontinue); | |
766 | |
767 bang_stack_shadow_pages(true); | |
768 | |
769 // reset the _do_not_unlock_if_synchronized flag | |
770 __ stbool(G0, do_not_unlock_if_synchronized); | |
771 | |
772 // check for synchronized methods | |
773 // Must happen AFTER invocation_counter check and stack overflow check, | |
774 // so method is not locked if overflows. | |
775 | |
776 if (synchronized) { | |
777 lock_method(); | |
778 } else { | |
779 #ifdef ASSERT | |
780 { Label ok; | |
781 __ ld(Laccess_flags, O0); | |
782 __ btst(JVM_ACC_SYNCHRONIZED, O0); | |
783 __ br( Assembler::zero, false, Assembler::pt, ok); | |
784 __ delayed()->nop(); | |
785 __ stop("method needs synchronization"); | |
786 __ bind(ok); | |
787 } | |
788 #endif // ASSERT | |
789 } | |
790 | |
791 | |
792 // start execution | |
793 __ verify_thread(); | |
794 | |
795 // JVMTI support | |
796 __ notify_method_entry(); | |
797 | |
798 // native call | |
799 | |
800 // (note that O0 is never an oop--at most it is a handle) | |
801 // It is important not to smash any handles created by this call, | |
802 // until any oop handle in O0 is dereferenced. | |
803 | |
804 // (note that the space for outgoing params is preallocated) | |
805 | |
806 // get signature handler | |
807 { Label L; | |
808 __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch); | |
809 __ tst(G3_scratch); | |
810 __ brx(Assembler::notZero, false, Assembler::pt, L); | |
811 __ delayed()->nop(); | |
812 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod); | |
813 __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch); | |
814 __ bind(L); | |
815 } | |
816 | |
817 // Push a new frame so that the args will really be stored in | |
818 // Copy a few locals across so the new frame has the variables | |
819 // we need but these values will be dead at the jni call and | |
820 // therefore not gc volatile like the values in the current | |
821 // frame (Lmethod in particular) | |
822 | |
823 // Flush the method pointer to the register save area | |
824 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS); | |
825 __ mov(Llocals, O1); | |
826 // calculate where the mirror handle body is allocated in the interpreter frame: | |
827 | |
828 Address mirror(FP, 0, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS); | |
829 __ add(mirror, O2); | |
830 | |
831 // Calculate current frame size | |
832 __ sub(SP, FP, O3); // Calculate negative of current frame size | |
833 __ save(SP, O3, SP); // Allocate an identical sized frame | |
834 | |
835 // Note I7 has leftover trash. Slow signature handler will fill it in | |
836 // should we get there. Normal jni call will set reasonable last_Java_pc | |
837 // below (and fix I7 so the stack trace doesn't have a meaningless frame | |
838 // in it). | |
839 | |
840 // Load interpreter frame's Lmethod into same register here | |
841 | |
842 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); | |
843 | |
844 __ mov(I1, Llocals); | |
845 __ mov(I2, Lscratch2); // save the address of the mirror | |
846 | |
847 | |
848 // ONLY Lmethod and Llocals are valid here! | |
849 | |
850 // call signature handler, It will move the arg properly since Llocals in current frame | |
851 // matches that in outer frame | |
852 | |
853 __ callr(G3_scratch, 0); | |
854 __ delayed()->nop(); | |
855 | |
856 // Result handler is in Lscratch | |
857 | |
858 // Reload interpreter frame's Lmethod since slow signature handler may block | |
859 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); | |
860 | |
861 { Label not_static; | |
862 | |
863 __ ld(Laccess_flags, O0); | |
864 __ btst(JVM_ACC_STATIC, O0); | |
865 __ br( Assembler::zero, false, Assembler::pt, not_static); | |
866 __ delayed()-> | |
867 // get native function entry point(O0 is a good temp until the very end) | |
868 ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::native_function_offset())), O0); | |
869 // for static methods insert the mirror argument | |
870 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); | |
871 | |
872 __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc:: constants_offset())), O1); | |
873 __ ld_ptr(Address(O1, 0, constantPoolOopDesc::pool_holder_offset_in_bytes()), O1); | |
874 __ ld_ptr(O1, mirror_offset, O1); | |
875 #ifdef ASSERT | |
876 if (!PrintSignatureHandlers) // do not dirty the output with this | |
877 { Label L; | |
878 __ tst(O1); | |
879 __ brx(Assembler::notZero, false, Assembler::pt, L); | |
880 __ delayed()->nop(); | |
881 __ stop("mirror is missing"); | |
882 __ bind(L); | |
883 } | |
884 #endif // ASSERT | |
885 __ st_ptr(O1, Lscratch2, 0); | |
886 __ mov(Lscratch2, O1); | |
887 __ bind(not_static); | |
888 } | |
889 | |
890 // At this point, arguments have been copied off of stack into | |
891 // their JNI positions, which are O1..O5 and SP[68..]. | |
892 // Oops are boxed in-place on the stack, with handles copied to arguments. | |
893 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*. | |
894 | |
895 #ifdef ASSERT | |
896 { Label L; | |
897 __ tst(O0); | |
898 __ brx(Assembler::notZero, false, Assembler::pt, L); | |
899 __ delayed()->nop(); | |
900 __ stop("native entry point is missing"); | |
901 __ bind(L); | |
902 } | |
903 #endif // ASSERT | |
904 | |
905 // | |
906 // setup the frame anchor | |
907 // | |
908 // The scavenge function only needs to know that the PC of this frame is | |
909 // in the interpreter method entry code, it doesn't need to know the exact | |
910 // PC and hence we can use O7 which points to the return address from the | |
911 // previous call in the code stream (signature handler function) | |
912 // | |
913 // The other trick is we set last_Java_sp to FP instead of the usual SP because | |
914 // we have pushed the extra frame in order to protect the volatile register(s) | |
915 // in that frame when we return from the jni call | |
916 // | |
917 | |
918 __ set_last_Java_frame(FP, O7); | |
919 __ mov(O7, I7); // make dummy interpreter frame look like one above, | |
920 // not meaningless information that'll confuse me. | |
921 | |
922 // flush the windows now. We don't care about the current (protection) frame | |
923 // only the outer frames | |
924 | |
925 __ flush_windows(); | |
926 | |
927 // mark windows as flushed | |
928 Address flags(G2_thread, | |
929 0, | |
930 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset())); | |
931 __ set(JavaFrameAnchor::flushed, G3_scratch); | |
932 __ st(G3_scratch, flags); | |
933 | |
934 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready. | |
935 | |
936 Address thread_state(G2_thread, 0, in_bytes(JavaThread::thread_state_offset())); | |
937 #ifdef ASSERT | |
938 { Label L; | |
939 __ ld(thread_state, G3_scratch); | |
940 __ cmp(G3_scratch, _thread_in_Java); | |
941 __ br(Assembler::equal, false, Assembler::pt, L); | |
942 __ delayed()->nop(); | |
943 __ stop("Wrong thread state in native stub"); | |
944 __ bind(L); | |
945 } | |
946 #endif // ASSERT | |
947 __ set(_thread_in_native, G3_scratch); | |
948 __ st(G3_scratch, thread_state); | |
949 | |
950 // Call the jni method, using the delay slot to set the JNIEnv* argument. | |
951 __ save_thread(L7_thread_cache); // save Gthread | |
952 __ callr(O0, 0); | |
953 __ delayed()-> | |
954 add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0); | |
955 | |
956 // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD | |
957 | |
958 __ restore_thread(L7_thread_cache); // restore G2_thread | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
959 __ reinit_heapbase(); |
0 | 960 |
961 // must we block? | |
962 | |
963 // Block, if necessary, before resuming in _thread_in_Java state. | |
964 // In order for GC to work, don't clear the last_Java_sp until after blocking. | |
965 { Label no_block; | |
966 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); | |
967 | |
968 // Switch thread to "native transition" state before reading the synchronization state. | |
969 // This additional state is necessary because reading and testing the synchronization | |
970 // state is not atomic w.r.t. GC, as this scenario demonstrates: | |
971 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. | |
972 // VM thread changes sync state to synchronizing and suspends threads for GC. | |
973 // Thread A is resumed to finish this native method, but doesn't block here since it | |
974 // didn't see any synchronization is progress, and escapes. | |
975 __ set(_thread_in_native_trans, G3_scratch); | |
976 __ st(G3_scratch, thread_state); | |
977 if(os::is_MP()) { | |
978 if (UseMembar) { | |
979 // Force this write out before the read below | |
980 __ membar(Assembler::StoreLoad); | |
981 } else { | |
982 // Write serialization page so VM thread can do a pseudo remote membar. | |
983 // We use the current thread pointer to calculate a thread specific | |
984 // offset to write to within the page. This minimizes bus traffic | |
985 // due to cache line collision. | |
986 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); | |
987 } | |
988 } | |
989 __ load_contents(sync_state, G3_scratch); | |
990 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); | |
991 | |
992 Label L; | |
993 Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset())); | |
994 __ br(Assembler::notEqual, false, Assembler::pn, L); | |
995 __ delayed()-> | |
996 ld(suspend_state, G3_scratch); | |
997 __ cmp(G3_scratch, 0); | |
998 __ br(Assembler::equal, false, Assembler::pt, no_block); | |
999 __ delayed()->nop(); | |
1000 __ bind(L); | |
1001 | |
1002 // Block. Save any potential method result value before the operation and | |
1003 // use a leaf call to leave the last_Java_frame setup undisturbed. | |
1004 save_native_result(); | |
1005 __ call_VM_leaf(L7_thread_cache, | |
1006 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), | |
1007 G2_thread); | |
1008 | |
1009 // Restore any method result value | |
1010 restore_native_result(); | |
1011 __ bind(no_block); | |
1012 } | |
1013 | |
1014 // Clear the frame anchor now | |
1015 | |
1016 __ reset_last_Java_frame(); | |
1017 | |
1018 // Move the result handler address | |
1019 __ mov(Lscratch, G3_scratch); | |
1020 // return possible result to the outer frame | |
1021 #ifndef __LP64 | |
1022 __ mov(O0, I0); | |
1023 __ restore(O1, G0, O1); | |
1024 #else | |
1025 __ restore(O0, G0, O0); | |
1026 #endif /* __LP64 */ | |
1027 | |
1028 // Move result handler to expected register | |
1029 __ mov(G3_scratch, Lscratch); | |
1030 | |
1031 // Back in normal (native) interpreter frame. State is thread_in_native_trans | |
1032 // switch to thread_in_Java. | |
1033 | |
1034 __ set(_thread_in_Java, G3_scratch); | |
1035 __ st(G3_scratch, thread_state); | |
1036 | |
1037 // reset handle block | |
1038 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), G3_scratch); | |
1039 __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes()); | |
1040 | |
1041 // If we have an oop result store it where it will be safe for any further gc | |
1042 // until we return now that we've released the handle it might be protected by | |
1043 | |
1044 { | |
1045 Label no_oop, store_result; | |
1046 | |
1047 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch); | |
1048 __ cmp(G3_scratch, Lscratch); | |
1049 __ brx(Assembler::notEqual, false, Assembler::pt, no_oop); | |
1050 __ delayed()->nop(); | |
1051 __ addcc(G0, O0, O0); | |
1052 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL: | |
1053 __ delayed()->ld_ptr(O0, 0, O0); // unbox it | |
1054 __ mov(G0, O0); | |
1055 | |
1056 __ bind(store_result); | |
1057 // Store it where gc will look for it and result handler expects it. | |
1058 __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS); | |
1059 | |
1060 __ bind(no_oop); | |
1061 | |
1062 } | |
1063 | |
1064 | |
1065 // handle exceptions (exception handling will handle unlocking!) | |
1066 { Label L; | |
1067 Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset())); | |
1068 | |
1069 __ ld_ptr(exception_addr, Gtemp); | |
1070 __ tst(Gtemp); | |
1071 __ brx(Assembler::equal, false, Assembler::pt, L); | |
1072 __ delayed()->nop(); | |
1073 // Note: This could be handled more efficiently since we know that the native | |
1074 // method doesn't have an exception handler. We could directly return | |
1075 // to the exception handler for the caller. | |
1076 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); | |
1077 __ should_not_reach_here(); | |
1078 __ bind(L); | |
1079 } | |
1080 | |
1081 // JVMTI support (preserves thread register) | |
1082 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI); | |
1083 | |
1084 if (synchronized) { | |
1085 // save and restore any potential method result value around the unlocking operation | |
1086 save_native_result(); | |
1087 | |
1088 __ add( __ top_most_monitor(), O1); | |
1089 __ unlock_object(O1); | |
1090 | |
1091 restore_native_result(); | |
1092 } | |
1093 | |
1094 #if defined(COMPILER2) && !defined(_LP64) | |
1095 | |
1096 // C2 expects long results in G1 we can't tell if we're returning to interpreted | |
1097 // or compiled so just be safe. | |
1098 | |
1099 __ sllx(O0, 32, G1); // Shift bits into high G1 | |
1100 __ srl (O1, 0, O1); // Zero extend O1 | |
1101 __ or3 (O1, G1, G1); // OR 64 bits into G1 | |
1102 | |
1103 #endif /* COMPILER2 && !_LP64 */ | |
1104 | |
1105 // dispose of return address and remove activation | |
1106 #ifdef ASSERT | |
1107 { | |
1108 Label ok; | |
1109 __ cmp(I5_savedSP, FP); | |
1110 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok); | |
1111 __ delayed()->nop(); | |
1112 __ stop("bad I5_savedSP value"); | |
1113 __ should_not_reach_here(); | |
1114 __ bind(ok); | |
1115 } | |
1116 #endif | |
1117 if (TraceJumps) { | |
1118 // Move target to register that is recordable | |
1119 __ mov(Lscratch, G3_scratch); | |
1120 __ JMP(G3_scratch, 0); | |
1121 } else { | |
1122 __ jmp(Lscratch, 0); | |
1123 } | |
1124 __ delayed()->nop(); | |
1125 | |
1126 | |
1127 if (inc_counter) { | |
1128 // handle invocation counter overflow | |
1129 __ bind(invocation_counter_overflow); | |
1130 generate_counter_overflow(Lcontinue); | |
1131 } | |
1132 | |
1133 | |
1134 | |
1135 return entry; | |
1136 } | |
1137 | |
1138 | |
1139 // Generic method entry to (asm) interpreter | |
1140 //------------------------------------------------------------------------------------------------------------------------ | |
1141 // | |
1142 address InterpreterGenerator::generate_normal_entry(bool synchronized) { | |
1143 address entry = __ pc(); | |
1144 | |
1145 bool inc_counter = UseCompiler || CountCompiledCalls; | |
1146 | |
1147 // the following temporary registers are used during frame creation | |
1148 const Register Gtmp1 = G3_scratch ; | |
1149 const Register Gtmp2 = G1_scratch; | |
1150 | |
1151 // make sure registers are different! | |
1152 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); | |
1153 | |
1154 const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())); | |
1155 const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset())); | |
1156 // Seems like G5_method is live at the point this is used. So we could make this look consistent | |
1157 // and use in the asserts. | |
1158 const Address access_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); | |
1159 | |
1160 __ verify_oop(G5_method); | |
1161 | |
1162 const Register Glocals_size = G3; | |
1163 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); | |
1164 | |
1165 // make sure method is not native & not abstract | |
1166 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) | |
1167 #ifdef ASSERT | |
1168 __ ld(G5_method, in_bytes(methodOopDesc::access_flags_offset()), Gtmp1); | |
1169 { | |
1170 Label L; | |
1171 __ btst(JVM_ACC_NATIVE, Gtmp1); | |
1172 __ br(Assembler::zero, false, Assembler::pt, L); | |
1173 __ delayed()->nop(); | |
1174 __ stop("tried to execute native method as non-native"); | |
1175 __ bind(L); | |
1176 } | |
1177 { Label L; | |
1178 __ btst(JVM_ACC_ABSTRACT, Gtmp1); | |
1179 __ br(Assembler::zero, false, Assembler::pt, L); | |
1180 __ delayed()->nop(); | |
1181 __ stop("tried to execute abstract method as non-abstract"); | |
1182 __ bind(L); | |
1183 } | |
1184 #endif // ASSERT | |
1185 | |
1186 // generate the code to allocate the interpreter stack frame | |
1187 | |
1188 generate_fixed_frame(false); | |
1189 | |
1190 #ifdef FAST_DISPATCH | |
1191 __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables); | |
1192 // set bytecode dispatch table base | |
1193 #endif | |
1194 | |
1195 // | |
1196 // Code to initialize the extra (i.e. non-parm) locals | |
1197 // | |
1198 Register init_value = noreg; // will be G0 if we must clear locals | |
1199 // The way the code was setup before zerolocals was always true for vanilla java entries. | |
1200 // It could only be false for the specialized entries like accessor or empty which have | |
1201 // no extra locals so the testing was a waste of time and the extra locals were always | |
1202 // initialized. We removed this extra complication to already over complicated code. | |
1203 | |
1204 init_value = G0; | |
1205 Label clear_loop; | |
1206 | |
1207 // NOTE: If you change the frame layout, this code will need to | |
1208 // be updated! | |
1209 __ lduh( size_of_locals, O2 ); | |
1210 __ lduh( size_of_parameters, O1 ); | |
1211 __ sll( O2, Interpreter::logStackElementSize(), O2); | |
1212 __ sll( O1, Interpreter::logStackElementSize(), O1 ); | |
1213 __ sub( Llocals, O2, O2 ); | |
1214 __ sub( Llocals, O1, O1 ); | |
1215 | |
1216 __ bind( clear_loop ); | |
1217 __ inc( O2, wordSize ); | |
1218 | |
1219 __ cmp( O2, O1 ); | |
1220 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop ); | |
1221 __ delayed()->st_ptr( init_value, O2, 0 ); | |
1222 | |
1223 const Address do_not_unlock_if_synchronized(G2_thread, 0, | |
1224 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); | |
1225 // Since at this point in the method invocation the exception handler | |
1226 // would try to exit the monitor of synchronized methods which hasn't | |
1227 // been entered yet, we set the thread local variable | |
1228 // _do_not_unlock_if_synchronized to true. If any exception was thrown by | |
1229 // runtime, exception handling i.e. unlock_if_synchronized_method will | |
1230 // check this thread local flag. | |
1231 __ movbool(true, G3_scratch); | |
1232 __ stbool(G3_scratch, do_not_unlock_if_synchronized); | |
1233 | |
1234 // increment invocation counter and check for overflow | |
1235 // | |
1236 // Note: checking for negative value instead of overflow | |
1237 // so we have a 'sticky' overflow test (may be of | |
1238 // importance as soon as we have true MT/MP) | |
1239 Label invocation_counter_overflow; | |
1240 Label profile_method; | |
1241 Label profile_method_continue; | |
1242 Label Lcontinue; | |
1243 if (inc_counter) { | |
1244 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); | |
1245 if (ProfileInterpreter) { | |
1246 __ bind(profile_method_continue); | |
1247 } | |
1248 } | |
1249 __ bind(Lcontinue); | |
1250 | |
1251 bang_stack_shadow_pages(false); | |
1252 | |
1253 // reset the _do_not_unlock_if_synchronized flag | |
1254 __ stbool(G0, do_not_unlock_if_synchronized); | |
1255 | |
1256 // check for synchronized methods | |
1257 // Must happen AFTER invocation_counter check and stack overflow check, | |
1258 // so method is not locked if overflows. | |
1259 | |
1260 if (synchronized) { | |
1261 lock_method(); | |
1262 } else { | |
1263 #ifdef ASSERT | |
1264 { Label ok; | |
1265 __ ld(access_flags, O0); | |
1266 __ btst(JVM_ACC_SYNCHRONIZED, O0); | |
1267 __ br( Assembler::zero, false, Assembler::pt, ok); | |
1268 __ delayed()->nop(); | |
1269 __ stop("method needs synchronization"); | |
1270 __ bind(ok); | |
1271 } | |
1272 #endif // ASSERT | |
1273 } | |
1274 | |
1275 // start execution | |
1276 | |
1277 __ verify_thread(); | |
1278 | |
1279 // jvmti support | |
1280 __ notify_method_entry(); | |
1281 | |
1282 // start executing instructions | |
1283 __ dispatch_next(vtos); | |
1284 | |
1285 | |
1286 if (inc_counter) { | |
1287 if (ProfileInterpreter) { | |
1288 // We have decided to profile this method in the interpreter | |
1289 __ bind(profile_method); | |
1290 | |
1291 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), Lbcp, true); | |
1292 | |
1293 #ifdef ASSERT | |
1294 __ tst(O0); | |
1295 __ breakpoint_trap(Assembler::notEqual); | |
1296 #endif | |
1297 | |
1298 __ set_method_data_pointer(); | |
1299 | |
1300 __ ba(false, profile_method_continue); | |
1301 __ delayed()->nop(); | |
1302 } | |
1303 | |
1304 // handle invocation counter overflow | |
1305 __ bind(invocation_counter_overflow); | |
1306 generate_counter_overflow(Lcontinue); | |
1307 } | |
1308 | |
1309 | |
1310 return entry; | |
1311 } | |
1312 | |
1313 | |
1314 //---------------------------------------------------------------------------------------------------- | |
1315 // Entry points & stack frame layout | |
1316 // | |
1317 // Here we generate the various kind of entries into the interpreter. | |
1318 // The two main entry type are generic bytecode methods and native call method. | |
1319 // These both come in synchronized and non-synchronized versions but the | |
1320 // frame layout they create is very similar. The other method entry | |
1321 // types are really just special purpose entries that are really entry | |
1322 // and interpretation all in one. These are for trivial methods like | |
1323 // accessor, empty, or special math methods. | |
1324 // | |
1325 // When control flow reaches any of the entry types for the interpreter | |
1326 // the following holds -> | |
1327 // | |
1328 // C2 Calling Conventions: | |
1329 // | |
1330 // The entry code below assumes that the following registers are set | |
1331 // when coming in: | |
1332 // G5_method: holds the methodOop of the method to call | |
1333 // Lesp: points to the TOS of the callers expression stack | |
1334 // after having pushed all the parameters | |
1335 // | |
1336 // The entry code does the following to setup an interpreter frame | |
1337 // pop parameters from the callers stack by adjusting Lesp | |
1338 // set O0 to Lesp | |
1339 // compute X = (max_locals - num_parameters) | |
1340 // bump SP up by X to accomadate the extra locals | |
1341 // compute X = max_expression_stack | |
1342 // + vm_local_words | |
1343 // + 16 words of register save area | |
1344 // save frame doing a save sp, -X, sp growing towards lower addresses | |
1345 // set Lbcp, Lmethod, LcpoolCache | |
1346 // set Llocals to i0 | |
1347 // set Lmonitors to FP - rounded_vm_local_words | |
1348 // set Lesp to Lmonitors - 4 | |
1349 // | |
1350 // The frame has now been setup to do the rest of the entry code | |
1351 | |
1352 // Try this optimization: Most method entries could live in a | |
1353 // "one size fits all" stack frame without all the dynamic size | |
1354 // calculations. It might be profitable to do all this calculation | |
1355 // statically and approximately for "small enough" methods. | |
1356 | |
1357 //----------------------------------------------------------------------------------------------- | |
1358 | |
1359 // C1 Calling conventions | |
1360 // | |
1361 // Upon method entry, the following registers are setup: | |
1362 // | |
1363 // g2 G2_thread: current thread | |
1364 // g5 G5_method: method to activate | |
1365 // g4 Gargs : pointer to last argument | |
1366 // | |
1367 // | |
1368 // Stack: | |
1369 // | |
1370 // +---------------+ <--- sp | |
1371 // | | | |
1372 // : reg save area : | |
1373 // | | | |
1374 // +---------------+ <--- sp + 0x40 | |
1375 // | | | |
1376 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) | |
1377 // | | | |
1378 // +---------------+ <--- sp + 0x5c | |
1379 // | | | |
1380 // : free : | |
1381 // | | | |
1382 // +---------------+ <--- Gargs | |
1383 // | | | |
1384 // : arguments : | |
1385 // | | | |
1386 // +---------------+ | |
1387 // | | | |
1388 // | |
1389 // | |
1390 // | |
1391 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like: | |
1392 // | |
1393 // +---------------+ <--- sp | |
1394 // | | | |
1395 // : reg save area : | |
1396 // | | | |
1397 // +---------------+ <--- sp + 0x40 | |
1398 // | | | |
1399 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) | |
1400 // | | | |
1401 // +---------------+ <--- sp + 0x5c | |
1402 // | | | |
1403 // : : | |
1404 // | | <--- Lesp | |
1405 // +---------------+ <--- Lmonitors (fp - 0x18) | |
1406 // | VM locals | | |
1407 // +---------------+ <--- fp | |
1408 // | | | |
1409 // : reg save area : | |
1410 // | | | |
1411 // +---------------+ <--- fp + 0x40 | |
1412 // | | | |
1413 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) | |
1414 // | | | |
1415 // +---------------+ <--- fp + 0x5c | |
1416 // | | | |
1417 // : free : | |
1418 // | | | |
1419 // +---------------+ | |
1420 // | | | |
1421 // : nonarg locals : | |
1422 // | | | |
1423 // +---------------+ | |
1424 // | | | |
1425 // : arguments : | |
1426 // | | <--- Llocals | |
1427 // +---------------+ <--- Gargs | |
1428 // | | | |
1429 | |
1430 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) { | |
1431 | |
1432 // Figure out the size of an interpreter frame (in words) given that we have a fully allocated | |
1433 // expression stack, the callee will have callee_extra_locals (so we can account for | |
1434 // frame extension) and monitor_size for monitors. Basically we need to calculate | |
1435 // this exactly like generate_fixed_frame/generate_compute_interpreter_state. | |
1436 // | |
1437 // | |
1438 // The big complicating thing here is that we must ensure that the stack stays properly | |
1439 // aligned. This would be even uglier if monitor size wasn't modulo what the stack | |
1440 // needs to be aligned for). We are given that the sp (fp) is already aligned by | |
1441 // the caller so we must ensure that it is properly aligned for our callee. | |
1442 // | |
1443 const int rounded_vm_local_words = | |
1444 round_to(frame::interpreter_frame_vm_local_words,WordsPerLong); | |
1445 // callee_locals and max_stack are counts, not the size in frame. | |
1446 const int locals_size = | |
1447 round_to(callee_extra_locals * Interpreter::stackElementWords(), WordsPerLong); | |
1448 const int max_stack_words = max_stack * Interpreter::stackElementWords(); | |
1449 return (round_to((max_stack_words | |
1450 + rounded_vm_local_words | |
1451 + frame::memory_parameter_word_sp_offset), WordsPerLong) | |
1452 // already rounded | |
1453 + locals_size + monitor_size); | |
1454 } | |
1455 | |
1456 // How much stack a method top interpreter activation needs in words. | |
1457 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) { | |
1458 | |
1459 // See call_stub code | |
1460 int call_stub_size = round_to(7 + frame::memory_parameter_word_sp_offset, | |
1461 WordsPerLong); // 7 + register save area | |
1462 | |
1463 // Save space for one monitor to get into the interpreted method in case | |
1464 // the method is synchronized | |
1465 int monitor_size = method->is_synchronized() ? | |
1466 1*frame::interpreter_frame_monitor_size() : 0; | |
1467 return size_activation_helper(method->max_locals(), method->max_stack(), | |
1468 monitor_size) + call_stub_size; | |
1469 } | |
1470 | |
1471 int AbstractInterpreter::layout_activation(methodOop method, | |
1472 int tempcount, | |
1473 int popframe_extra_args, | |
1474 int moncount, | |
1475 int callee_param_count, | |
1476 int callee_local_count, | |
1477 frame* caller, | |
1478 frame* interpreter_frame, | |
1479 bool is_top_frame) { | |
1480 // Note: This calculation must exactly parallel the frame setup | |
1481 // in InterpreterGenerator::generate_fixed_frame. | |
1482 // If f!=NULL, set up the following variables: | |
1483 // - Lmethod | |
1484 // - Llocals | |
1485 // - Lmonitors (to the indicated number of monitors) | |
1486 // - Lesp (to the indicated number of temps) | |
1487 // The frame f (if not NULL) on entry is a description of the caller of the frame | |
1488 // we are about to layout. We are guaranteed that we will be able to fill in a | |
1489 // new interpreter frame as its callee (i.e. the stack space is allocated and | |
1490 // the amount was determined by an earlier call to this method with f == NULL). | |
1491 // On return f (if not NULL) while describe the interpreter frame we just layed out. | |
1492 | |
1493 int monitor_size = moncount * frame::interpreter_frame_monitor_size(); | |
1494 int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong); | |
1495 | |
1496 assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align"); | |
1497 // | |
1498 // Note: if you look closely this appears to be doing something much different | |
1499 // than generate_fixed_frame. What is happening is this. On sparc we have to do | |
1500 // this dance with interpreter_sp_adjustment because the window save area would | |
1501 // appear just below the bottom (tos) of the caller's java expression stack. Because | |
1502 // the interpreter want to have the locals completely contiguous generate_fixed_frame | |
1503 // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size). | |
1504 // Now in generate_fixed_frame the extension of the caller's sp happens in the callee. | |
1505 // In this code the opposite occurs the caller adjusts it's own stack base on the callee. | |
1506 // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest) | |
1507 // because the oldest frame would have adjust its callers frame and yet that frame | |
1508 // already exists and isn't part of this array of frames we are unpacking. So at first | |
1509 // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper() | |
1510 // will after it calculates all of the frame's on_stack_size()'s will then figure out the | |
1511 // amount to adjust the caller of the initial (oldest) frame and the calculation will all | |
1512 // add up. It does seem like it simpler to account for the adjustment here (and remove the | |
1513 // callee... parameters here). However this would mean that this routine would have to take | |
1514 // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment) | |
1515 // and run the calling loop in the reverse order. This would also would appear to mean making | |
1516 // this code aware of what the interactions are when that initial caller fram was an osr or | |
1517 // other adapter frame. deoptimization is complicated enough and hard enough to debug that | |
1518 // there is no sense in messing working code. | |
1519 // | |
1520 | |
1521 int rounded_cls = round_to((callee_local_count - callee_param_count), WordsPerLong); | |
1522 assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align"); | |
1523 | |
1524 int raw_frame_size = size_activation_helper(rounded_cls, method->max_stack(), | |
1525 monitor_size); | |
1526 | |
1527 if (interpreter_frame != NULL) { | |
1528 // The skeleton frame must already look like an interpreter frame | |
1529 // even if not fully filled out. | |
1530 assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame"); | |
1531 | |
1532 intptr_t* fp = interpreter_frame->fp(); | |
1533 | |
1534 JavaThread* thread = JavaThread::current(); | |
1535 RegisterMap map(thread, false); | |
1536 // More verification that skeleton frame is properly walkable | |
1537 assert(fp == caller->sp(), "fp must match"); | |
1538 | |
1539 intptr_t* montop = fp - rounded_vm_local_words; | |
1540 | |
1541 // preallocate monitors (cf. __ add_monitor_to_stack) | |
1542 intptr_t* monitors = montop - monitor_size; | |
1543 | |
1544 // preallocate stack space | |
1545 intptr_t* esp = monitors - 1 - | |
1546 (tempcount * Interpreter::stackElementWords()) - | |
1547 popframe_extra_args; | |
1548 | |
1549 int local_words = method->max_locals() * Interpreter::stackElementWords(); | |
1550 int parm_words = method->size_of_parameters() * Interpreter::stackElementWords(); | |
1551 NEEDS_CLEANUP; | |
1552 intptr_t* locals; | |
1553 if (caller->is_interpreted_frame()) { | |
1554 // Can force the locals area to end up properly overlapping the top of the expression stack. | |
1555 intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1; | |
1556 // Note that this computation means we replace size_of_parameters() values from the caller | |
1557 // interpreter frame's expression stack with our argument locals | |
1558 locals = Lesp_ptr + parm_words; | |
1559 int delta = local_words - parm_words; | |
1560 int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0; | |
1561 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS; | |
1562 } else { | |
1563 assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases"); | |
1564 // Don't have Lesp available; lay out locals block in the caller | |
1565 // adjacent to the register window save area. | |
1566 // | |
1567 // Compiled frames do not allocate a varargs area which is why this if | |
1568 // statement is needed. | |
1569 // | |
1570 if (caller->is_compiled_frame()) { | |
1571 locals = fp + frame::register_save_words + local_words - 1; | |
1572 } else { | |
1573 locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1; | |
1574 } | |
1575 if (!caller->is_entry_frame()) { | |
1576 // Caller wants his own SP back | |
1577 int caller_frame_size = caller->cb()->frame_size(); | |
1578 *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS; | |
1579 } | |
1580 } | |
1581 if (TraceDeoptimization) { | |
1582 if (caller->is_entry_frame()) { | |
1583 // make sure I5_savedSP and the entry frames notion of saved SP | |
1584 // agree. This assertion duplicate a check in entry frame code | |
1585 // but catches the failure earlier. | |
1586 assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP), | |
1587 "would change callers SP"); | |
1588 } | |
1589 if (caller->is_entry_frame()) { | |
1590 tty->print("entry "); | |
1591 } | |
1592 if (caller->is_compiled_frame()) { | |
1593 tty->print("compiled "); | |
1594 if (caller->is_deoptimized_frame()) { | |
1595 tty->print("(deopt) "); | |
1596 } | |
1597 } | |
1598 if (caller->is_interpreted_frame()) { | |
1599 tty->print("interpreted "); | |
1600 } | |
1601 tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp()); | |
1602 tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16); | |
1603 tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16); | |
1604 tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp()); | |
1605 tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16); | |
1606 tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16); | |
1607 tty->print_cr("Llocals = 0x%x", locals); | |
1608 tty->print_cr("Lesp = 0x%x", esp); | |
1609 tty->print_cr("Lmonitors = 0x%x", monitors); | |
1610 } | |
1611 | |
1612 if (method->max_locals() > 0) { | |
1613 assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area"); | |
1614 assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area"); | |
1615 assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area"); | |
1616 assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area"); | |
1617 } | |
1618 #ifdef _LP64 | |
1619 assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd"); | |
1620 #endif | |
1621 | |
1622 *interpreter_frame->register_addr(Lmethod) = (intptr_t) method; | |
1623 *interpreter_frame->register_addr(Llocals) = (intptr_t) locals; | |
1624 *interpreter_frame->register_addr(Lmonitors) = (intptr_t) monitors; | |
1625 *interpreter_frame->register_addr(Lesp) = (intptr_t) esp; | |
1626 // Llast_SP will be same as SP as there is no adapter space | |
1627 *interpreter_frame->register_addr(Llast_SP) = (intptr_t) interpreter_frame->sp() - STACK_BIAS; | |
1628 *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache(); | |
1629 #ifdef FAST_DISPATCH | |
1630 *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table(); | |
1631 #endif | |
1632 | |
1633 | |
1634 #ifdef ASSERT | |
1635 BasicObjectLock* mp = (BasicObjectLock*)monitors; | |
1636 | |
1637 assert(interpreter_frame->interpreter_frame_method() == method, "method matches"); | |
1638 assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize())+Interpreter::value_offset_in_bytes()), "locals match"); | |
1639 assert(interpreter_frame->interpreter_frame_monitor_end() == mp, "monitor_end matches"); | |
1640 assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches"); | |
1641 assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches"); | |
1642 | |
1643 // check bounds | |
1644 intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1); | |
1645 intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words; | |
1646 assert(lo < monitors && montop <= hi, "monitors in bounds"); | |
1647 assert(lo <= esp && esp < monitors, "esp in bounds"); | |
1648 #endif // ASSERT | |
1649 } | |
1650 | |
1651 return raw_frame_size; | |
1652 } | |
1653 | |
1654 //---------------------------------------------------------------------------------------------------- | |
1655 // Exceptions | |
1656 void TemplateInterpreterGenerator::generate_throw_exception() { | |
1657 | |
1658 // Entry point in previous activation (i.e., if the caller was interpreted) | |
1659 Interpreter::_rethrow_exception_entry = __ pc(); | |
1660 // O0: exception | |
1661 | |
1662 // entry point for exceptions thrown within interpreter code | |
1663 Interpreter::_throw_exception_entry = __ pc(); | |
1664 __ verify_thread(); | |
1665 // expression stack is undefined here | |
1666 // O0: exception, i.e. Oexception | |
1667 // Lbcp: exception bcx | |
1668 __ verify_oop(Oexception); | |
1669 | |
1670 | |
1671 // expression stack must be empty before entering the VM in case of an exception | |
1672 __ empty_expression_stack(); | |
1673 // find exception handler address and preserve exception oop | |
1674 // call C routine to find handler and jump to it | |
1675 __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception); | |
1676 __ push_ptr(O1); // push exception for exception handler bytecodes | |
1677 | |
1678 __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!) | |
1679 __ delayed()->nop(); | |
1680 | |
1681 | |
1682 // if the exception is not handled in the current frame | |
1683 // the frame is removed and the exception is rethrown | |
1684 // (i.e. exception continuation is _rethrow_exception) | |
1685 // | |
1686 // Note: At this point the bci is still the bxi for the instruction which caused | |
1687 // the exception and the expression stack is empty. Thus, for any VM calls | |
1688 // at this point, GC will find a legal oop map (with empty expression stack). | |
1689 | |
1690 // in current activation | |
1691 // tos: exception | |
1692 // Lbcp: exception bcp | |
1693 | |
1694 // | |
1695 // JVMTI PopFrame support | |
1696 // | |
1697 | |
1698 Interpreter::_remove_activation_preserving_args_entry = __ pc(); | |
1699 Address popframe_condition_addr (G2_thread, 0, in_bytes(JavaThread::popframe_condition_offset())); | |
1700 // Set the popframe_processing bit in popframe_condition indicating that we are | |
1701 // currently handling popframe, so that call_VMs that may happen later do not trigger new | |
1702 // popframe handling cycles. | |
1703 | |
1704 __ ld(popframe_condition_addr, G3_scratch); | |
1705 __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch); | |
1706 __ stw(G3_scratch, popframe_condition_addr); | |
1707 | |
1708 // Empty the expression stack, as in normal exception handling | |
1709 __ empty_expression_stack(); | |
1710 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); | |
1711 | |
1712 { | |
1713 // Check to see whether we are returning to a deoptimized frame. | |
1714 // (The PopFrame call ensures that the caller of the popped frame is | |
1715 // either interpreted or compiled and deoptimizes it if compiled.) | |
1716 // In this case, we can't call dispatch_next() after the frame is | |
1717 // popped, but instead must save the incoming arguments and restore | |
1718 // them after deoptimization has occurred. | |
1719 // | |
1720 // Note that we don't compare the return PC against the | |
1721 // deoptimization blob's unpack entry because of the presence of | |
1722 // adapter frames in C2. | |
1723 Label caller_not_deoptimized; | |
1724 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7); | |
1725 __ tst(O0); | |
1726 __ brx(Assembler::notEqual, false, Assembler::pt, caller_not_deoptimized); | |
1727 __ delayed()->nop(); | |
1728 | |
1729 const Register Gtmp1 = G3_scratch; | |
1730 const Register Gtmp2 = G1_scratch; | |
1731 | |
1732 // Compute size of arguments for saving when returning to deoptimized caller | |
1733 __ lduh(Lmethod, in_bytes(methodOopDesc::size_of_parameters_offset()), Gtmp1); | |
1734 __ sll(Gtmp1, Interpreter::logStackElementSize(), Gtmp1); | |
1735 __ sub(Llocals, Gtmp1, Gtmp2); | |
1736 __ add(Gtmp2, wordSize, Gtmp2); | |
1737 // Save these arguments | |
1738 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2); | |
1739 // Inform deoptimization that it is responsible for restoring these arguments | |
1740 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1); | |
1741 Address popframe_condition_addr(G2_thread, 0, in_bytes(JavaThread::popframe_condition_offset())); | |
1742 __ st(Gtmp1, popframe_condition_addr); | |
1743 | |
1744 // Return from the current method | |
1745 // The caller's SP was adjusted upon method entry to accomodate | |
1746 // the callee's non-argument locals. Undo that adjustment. | |
1747 __ ret(); | |
1748 __ delayed()->restore(I5_savedSP, G0, SP); | |
1749 | |
1750 __ bind(caller_not_deoptimized); | |
1751 } | |
1752 | |
1753 // Clear the popframe condition flag | |
1754 __ stw(G0 /* popframe_inactive */, popframe_condition_addr); | |
1755 | |
1756 // Get out of the current method (how this is done depends on the particular compiler calling | |
1757 // convention that the interpreter currently follows) | |
1758 // The caller's SP was adjusted upon method entry to accomodate | |
1759 // the callee's non-argument locals. Undo that adjustment. | |
1760 __ restore(I5_savedSP, G0, SP); | |
1761 // The method data pointer was incremented already during | |
1762 // call profiling. We have to restore the mdp for the current bcp. | |
1763 if (ProfileInterpreter) { | |
1764 __ set_method_data_pointer_for_bcp(); | |
1765 } | |
1766 // Resume bytecode interpretation at the current bcp | |
1767 __ dispatch_next(vtos); | |
1768 // end of JVMTI PopFrame support | |
1769 | |
1770 Interpreter::_remove_activation_entry = __ pc(); | |
1771 | |
1772 // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here) | |
1773 __ pop_ptr(Oexception); // get exception | |
1774 | |
1775 // Intel has the following comment: | |
1776 //// remove the activation (without doing throws on illegalMonitorExceptions) | |
1777 // They remove the activation without checking for bad monitor state. | |
1778 // %%% We should make sure this is the right semantics before implementing. | |
1779 | |
1780 // %%% changed set_vm_result_2 to set_vm_result and get_vm_result_2 to get_vm_result. Is there a bug here? | |
1781 __ set_vm_result(Oexception); | |
1782 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false); | |
1783 | |
1784 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI); | |
1785 | |
1786 __ get_vm_result(Oexception); | |
1787 __ verify_oop(Oexception); | |
1788 | |
1789 const int return_reg_adjustment = frame::pc_return_offset; | |
1790 Address issuing_pc_addr(I7, 0, return_reg_adjustment); | |
1791 | |
1792 // We are done with this activation frame; find out where to go next. | |
1793 // The continuation point will be an exception handler, which expects | |
1794 // the following registers set up: | |
1795 // | |
1796 // Oexception: exception | |
1797 // Oissuing_pc: the local call that threw exception | |
1798 // Other On: garbage | |
1799 // In/Ln: the contents of the caller's register window | |
1800 // | |
1801 // We do the required restore at the last possible moment, because we | |
1802 // need to preserve some state across a runtime call. | |
1803 // (Remember that the caller activation is unknown--it might not be | |
1804 // interpreted, so things like Lscratch are useless in the caller.) | |
1805 | |
1806 // Although the Intel version uses call_C, we can use the more | |
1807 // compact call_VM. (The only real difference on SPARC is a | |
1808 // harmlessly ignored [re]set_last_Java_frame, compared with | |
1809 // the Intel code which lacks this.) | |
1810 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore | |
1811 __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller | |
1812 __ super_call_VM_leaf(L7_thread_cache, | |
1813 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), | |
1814 Oissuing_pc->after_save()); | |
1815 | |
1816 // The caller's SP was adjusted upon method entry to accomodate | |
1817 // the callee's non-argument locals. Undo that adjustment. | |
1818 __ JMP(O0, 0); // return exception handler in caller | |
1819 __ delayed()->restore(I5_savedSP, G0, SP); | |
1820 | |
1821 // (same old exception object is already in Oexception; see above) | |
1822 // Note that an "issuing PC" is actually the next PC after the call | |
1823 } | |
1824 | |
1825 | |
1826 // | |
1827 // JVMTI ForceEarlyReturn support | |
1828 // | |
1829 | |
1830 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { | |
1831 address entry = __ pc(); | |
1832 | |
1833 __ empty_expression_stack(); | |
1834 __ load_earlyret_value(state); | |
1835 | |
1836 __ ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::jvmti_thread_state_offset())), G3_scratch); | |
1837 Address cond_addr(G3_scratch, 0, in_bytes(JvmtiThreadState::earlyret_state_offset())); | |
1838 | |
1839 // Clear the earlyret state | |
1840 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr); | |
1841 | |
1842 __ remove_activation(state, | |
1843 /* throw_monitor_exception */ false, | |
1844 /* install_monitor_exception */ false); | |
1845 | |
1846 // The caller's SP was adjusted upon method entry to accomodate | |
1847 // the callee's non-argument locals. Undo that adjustment. | |
1848 __ ret(); // return to caller | |
1849 __ delayed()->restore(I5_savedSP, G0, SP); | |
1850 | |
1851 return entry; | |
1852 } // end of JVMTI ForceEarlyReturn support | |
1853 | |
1854 | |
1855 //------------------------------------------------------------------------------------------------------------------------ | |
1856 // Helper for vtos entry point generation | |
1857 | |
1858 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { | |
1859 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); | |
1860 Label L; | |
1861 aep = __ pc(); __ push_ptr(); __ ba(false, L); __ delayed()->nop(); | |
1862 fep = __ pc(); __ push_f(); __ ba(false, L); __ delayed()->nop(); | |
1863 dep = __ pc(); __ push_d(); __ ba(false, L); __ delayed()->nop(); | |
1864 lep = __ pc(); __ push_l(); __ ba(false, L); __ delayed()->nop(); | |
1865 iep = __ pc(); __ push_i(); | |
1866 bep = cep = sep = iep; // there aren't any | |
1867 vep = __ pc(); __ bind(L); // fall through | |
1868 generate_and_dispatch(t); | |
1869 } | |
1870 | |
1871 // -------------------------------------------------------------------------------- | |
1872 | |
1873 | |
1874 InterpreterGenerator::InterpreterGenerator(StubQueue* code) | |
1875 : TemplateInterpreterGenerator(code) { | |
1876 generate_all(); // down here so it can be "virtual" | |
1877 } | |
1878 | |
1879 // -------------------------------------------------------------------------------- | |
1880 | |
1881 // Non-product code | |
1882 #ifndef PRODUCT | |
1883 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { | |
1884 address entry = __ pc(); | |
1885 | |
1886 __ push(state); | |
1887 __ mov(O7, Lscratch); // protect return address within interpreter | |
1888 | |
1889 // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer | |
1890 __ mov( Otos_l2, G3_scratch ); | |
1891 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch); | |
1892 __ mov(Lscratch, O7); // restore return address | |
1893 __ pop(state); | |
1894 __ retl(); | |
1895 __ delayed()->nop(); | |
1896 | |
1897 return entry; | |
1898 } | |
1899 | |
1900 | |
1901 // helpers for generate_and_dispatch | |
1902 | |
1903 void TemplateInterpreterGenerator::count_bytecode() { | |
1904 Address c(G3_scratch, (address)&BytecodeCounter::_counter_value); | |
1905 __ load_contents(c, G4_scratch); | |
1906 __ inc(G4_scratch); | |
1907 __ st(G4_scratch, c); | |
1908 } | |
1909 | |
1910 | |
1911 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { | |
1912 Address bucket( G3_scratch, (address) &BytecodeHistogram::_counters[t->bytecode()] ); | |
1913 __ load_contents(bucket, G4_scratch); | |
1914 __ inc(G4_scratch); | |
1915 __ st(G4_scratch, bucket); | |
1916 } | |
1917 | |
1918 | |
1919 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { | |
1920 address index_addr = (address)&BytecodePairHistogram::_index; | |
1921 Address index(G3_scratch, index_addr); | |
1922 | |
1923 address counters_addr = (address)&BytecodePairHistogram::_counters; | |
1924 Address counters(G3_scratch, counters_addr); | |
1925 | |
1926 // get index, shift out old bytecode, bring in new bytecode, and store it | |
1927 // _index = (_index >> log2_number_of_codes) | | |
1928 // (bytecode << log2_number_of_codes); | |
1929 | |
1930 | |
1931 __ load_contents( index, G4_scratch ); | |
1932 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch ); | |
1933 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch ); | |
1934 __ or3( G3_scratch, G4_scratch, G4_scratch ); | |
1935 __ store_contents( G4_scratch, index ); | |
1936 | |
1937 // bump bucket contents | |
1938 // _counters[_index] ++; | |
1939 | |
1940 __ load_address( counters ); // loads into G3_scratch | |
1941 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address | |
1942 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index | |
1943 __ ld (G3_scratch, 0, G4_scratch); | |
1944 __ inc (G4_scratch); | |
1945 __ st (G4_scratch, 0, G3_scratch); | |
1946 } | |
1947 | |
1948 | |
1949 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { | |
1950 // Call a little run-time stub to avoid blow-up for each bytecode. | |
1951 // The run-time runtime saves the right registers, depending on | |
1952 // the tosca in-state for the given template. | |
1953 address entry = Interpreter::trace_code(t->tos_in()); | |
1954 guarantee(entry != NULL, "entry must have been generated"); | |
1955 __ call(entry, relocInfo::none); | |
1956 __ delayed()->nop(); | |
1957 } | |
1958 | |
1959 | |
1960 void TemplateInterpreterGenerator::stop_interpreter_at() { | |
1961 Address counter(G3_scratch , (address)&BytecodeCounter::_counter_value); | |
1962 __ load_contents (counter, G3_scratch ); | |
1963 Address stop_at(G4_scratch, (address)&StopInterpreterAt); | |
1964 __ load_ptr_contents(stop_at, G4_scratch); | |
1965 __ cmp(G3_scratch, G4_scratch); | |
1966 __ breakpoint_trap(Assembler::equal); | |
1967 } | |
1968 #endif // not PRODUCT | |
1969 #endif // !CC_INTERP |