comparison src/cpu/sparc/vm/cppInterpreter_sparc.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children 9e5a7340635e
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_cppInterpreter_sparc.cpp.incl"
27
28 #ifdef CC_INTERP
29
30 // Routine exists to make tracebacks look decent in debugger
31 // while "shadow" interpreter frames are on stack. It is also
32 // used to distinguish interpreter frames.
33
34 extern "C" void RecursiveInterpreterActivation(interpreterState istate) {
35 ShouldNotReachHere();
36 }
37
38 bool CppInterpreter::contains(address pc) {
39 return ( _code->contains(pc) ||
40 ( pc == (CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) + frame::pc_return_offset)));
41 }
42
43 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
44 #define __ _masm->
45
46 Label frame_manager_entry;
47 Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
48 // c++ interpreter entry point this holds that entry point label.
49
50 static address unctrap_frame_manager_entry = NULL;
51
52 static address interpreter_return_address = NULL;
53 static address deopt_frame_manager_return_atos = NULL;
54 static address deopt_frame_manager_return_btos = NULL;
55 static address deopt_frame_manager_return_itos = NULL;
56 static address deopt_frame_manager_return_ltos = NULL;
57 static address deopt_frame_manager_return_ftos = NULL;
58 static address deopt_frame_manager_return_dtos = NULL;
59 static address deopt_frame_manager_return_vtos = NULL;
60
61 const Register prevState = G1_scratch;
62
63 void InterpreterGenerator::save_native_result(void) {
64 // result potentially in O0/O1: save it across calls
65 __ stf(FloatRegisterImpl::D, F0, STATE(_native_fresult));
66 #ifdef _LP64
67 __ stx(O0, STATE(_native_lresult));
68 #else
69 __ std(O0, STATE(_native_lresult));
70 #endif
71 }
72
73 void InterpreterGenerator::restore_native_result(void) {
74
75 // Restore any method result value
76 __ ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0);
77 #ifdef _LP64
78 __ ldx(STATE(_native_lresult), O0);
79 #else
80 __ ldd(STATE(_native_lresult), O0);
81 #endif
82 }
83
84 // A result handler converts/unboxes a native call result into
85 // a java interpreter/compiler result. The current frame is an
86 // interpreter frame. The activation frame unwind code must be
87 // consistent with that of TemplateTable::_return(...). In the
88 // case of native methods, the caller's SP was not modified.
89 address CppInterpreterGenerator::generate_result_handler_for(BasicType type) {
90 address entry = __ pc();
91 Register Itos_i = Otos_i ->after_save();
92 Register Itos_l = Otos_l ->after_save();
93 Register Itos_l1 = Otos_l1->after_save();
94 Register Itos_l2 = Otos_l2->after_save();
95 switch (type) {
96 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
97 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value!
98 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break;
99 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break;
100 case T_LONG :
101 #ifndef _LP64
102 __ mov(O1, Itos_l2); // move other half of long
103 #endif // ifdef or no ifdef, fall through to the T_INT case
104 case T_INT : __ mov(O0, Itos_i); break;
105 case T_VOID : /* nothing to do */ break;
106 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break;
107 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break;
108 case T_OBJECT :
109 __ ld_ptr(STATE(_oop_temp), Itos_i);
110 __ verify_oop(Itos_i);
111 break;
112 default : ShouldNotReachHere();
113 }
114 __ ret(); // return from interpreter activation
115 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame
116 NOT_PRODUCT(__ emit_long(0);) // marker for disassembly
117 return entry;
118 }
119
120 // tosca based result to c++ interpreter stack based result.
121 // Result goes to address in L1_scratch
122
123 address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) {
124 // A result is in the native abi result register from a native method call.
125 // We need to return this result to the interpreter by pushing the result on the interpreter's
126 // stack. This is relatively simple the destination is in L1_scratch
127 // i.e. L1_scratch is the first free element on the stack. If we "push" a return value we must
128 // adjust L1_scratch
129 address entry = __ pc();
130 switch (type) {
131 case T_BOOLEAN:
132 // !0 => true; 0 => false
133 __ subcc(G0, O0, G0);
134 __ addc(G0, 0, O0);
135 __ st(O0, L1_scratch, 0);
136 __ sub(L1_scratch, wordSize, L1_scratch);
137 break;
138
139 // cannot use and3, 0xFFFF too big as immediate value!
140 case T_CHAR :
141 __ sll(O0, 16, O0);
142 __ srl(O0, 16, O0);
143 __ st(O0, L1_scratch, 0);
144 __ sub(L1_scratch, wordSize, L1_scratch);
145 break;
146
147 case T_BYTE :
148 __ sll(O0, 24, O0);
149 __ sra(O0, 24, O0);
150 __ st(O0, L1_scratch, 0);
151 __ sub(L1_scratch, wordSize, L1_scratch);
152 break;
153
154 case T_SHORT :
155 __ sll(O0, 16, O0);
156 __ sra(O0, 16, O0);
157 __ st(O0, L1_scratch, 0);
158 __ sub(L1_scratch, wordSize, L1_scratch);
159 break;
160 case T_LONG :
161 #ifndef _LP64
162 #if !defined(_LP64) && defined(COMPILER2)
163 // All return values are where we want them, except for Longs. C2 returns
164 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
165 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
166 // build even if we are returning from interpreted we just do a little
167 // stupid shuffing.
168 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
169 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
170 // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
171 __ stx(G1, L1_scratch, -wordSize);
172 #else
173 // native result is in O0, O1
174 __ st(O1, L1_scratch, 0); // Low order
175 __ st(O0, L1_scratch, -wordSize); // High order
176 #endif /* !_LP64 && COMPILER2 */
177 #else
178 __ stx(O0, L1_scratch, 0);
179 __ breakpoint_trap();
180 #endif
181 __ sub(L1_scratch, 2*wordSize, L1_scratch);
182 break;
183
184 case T_INT :
185 __ st(O0, L1_scratch, 0);
186 __ sub(L1_scratch, wordSize, L1_scratch);
187 break;
188
189 case T_VOID : /* nothing to do */
190 break;
191
192 case T_FLOAT :
193 __ stf(FloatRegisterImpl::S, F0, L1_scratch, 0);
194 __ sub(L1_scratch, wordSize, L1_scratch);
195 break;
196
197 case T_DOUBLE :
198 // Every stack slot is aligned on 64 bit, However is this
199 // the correct stack slot on 64bit?? QQQ
200 __ stf(FloatRegisterImpl::D, F0, L1_scratch, -wordSize);
201 __ sub(L1_scratch, 2*wordSize, L1_scratch);
202 break;
203 case T_OBJECT :
204 __ verify_oop(O0);
205 __ st_ptr(O0, L1_scratch, 0);
206 __ sub(L1_scratch, wordSize, L1_scratch);
207 break;
208 default : ShouldNotReachHere();
209 }
210 __ retl(); // return from interpreter activation
211 __ delayed()->nop(); // schedule this better
212 NOT_PRODUCT(__ emit_long(0);) // marker for disassembly
213 return entry;
214 }
215
216 address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) {
217 // A result is in the java expression stack of the interpreted method that has just
218 // returned. Place this result on the java expression stack of the caller.
219 //
220 // The current interpreter activation in Lstate is for the method just returning its
221 // result. So we know that the result of this method is on the top of the current
222 // execution stack (which is pre-pushed) and will be return to the top of the caller
223 // stack. The top of the callers stack is the bottom of the locals of the current
224 // activation.
225 // Because of the way activation are managed by the frame manager the value of esp is
226 // below both the stack top of the current activation and naturally the stack top
227 // of the calling activation. This enable this routine to leave the return address
228 // to the frame manager on the stack and do a vanilla return.
229 //
230 // On entry: O0 - points to source (callee stack top)
231 // O1 - points to destination (caller stack top [i.e. free location])
232 // destroys O2, O3
233 //
234
235 address entry = __ pc();
236 switch (type) {
237 case T_VOID: break;
238 break;
239 case T_FLOAT :
240 __ breakpoint_trap(Assembler::zero);
241 case T_BOOLEAN:
242 case T_CHAR :
243 case T_BYTE :
244 case T_SHORT :
245 case T_INT :
246 // 1 word result
247 __ ld(O0, 0, O2);
248 __ st(O2, O1, 0);
249 __ sub(O1, wordSize, O1);
250 break;
251 case T_DOUBLE :
252 case T_LONG :
253 // return top two words on current expression stack to caller's expression stack
254 // The caller's expression stack is adjacent to the current frame manager's intepretState
255 // except we allocated one extra word for this intepretState so we won't overwrite it
256 // when we return a two word result.
257 #ifdef _LP64
258 __ breakpoint_trap();
259 // Hmm now that longs are in one entry should "_ptr" really be "x"?
260 __ ld_ptr(O0, 0, O2);
261 __ ld_ptr(O0, wordSize, O3);
262 __ st_ptr(O3, O1, 0);
263 __ st_ptr(O2, O1, -wordSize);
264 #else
265 __ ld(O0, 0, O2);
266 __ ld(O0, wordSize, O3);
267 __ st(O3, O1, 0);
268 __ st(O2, O1, -wordSize);
269 #endif
270 __ sub(O1, 2*wordSize, O1);
271 break;
272 case T_OBJECT :
273 __ ld_ptr(O0, 0, O2);
274 __ verify_oop(O2); // verify it
275 __ st_ptr(O2, O1, 0);
276 __ sub(O1, wordSize, O1);
277 break;
278 default : ShouldNotReachHere();
279 }
280 __ retl();
281 __ delayed()->nop(); // QQ schedule this better
282 return entry;
283 }
284
285 address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) {
286 // A result is in the java expression stack of the interpreted method that has just
287 // returned. Place this result in the native abi that the caller expects.
288 // We are in a new frame registers we set must be in caller (i.e. callstub) frame.
289 //
290 // Similar to generate_stack_to_stack_converter above. Called at a similar time from the
291 // frame manager execept in this situation the caller is native code (c1/c2/call_stub)
292 // and so rather than return result onto caller's java expression stack we return the
293 // result in the expected location based on the native abi.
294 // On entry: O0 - source (stack top)
295 // On exit result in expected output register
296 // QQQ schedule this better
297
298 address entry = __ pc();
299 switch (type) {
300 case T_VOID: break;
301 break;
302 case T_FLOAT :
303 __ ldf(FloatRegisterImpl::S, O0, 0, F0);
304 break;
305 case T_BOOLEAN:
306 case T_CHAR :
307 case T_BYTE :
308 case T_SHORT :
309 case T_INT :
310 // 1 word result
311 __ ld(O0, 0, O0->after_save());
312 break;
313 case T_DOUBLE :
314 __ ldf(FloatRegisterImpl::D, O0, 0, F0);
315 break;
316 case T_LONG :
317 // return top two words on current expression stack to caller's expression stack
318 // The caller's expression stack is adjacent to the current frame manager's interpretState
319 // except we allocated one extra word for this intepretState so we won't overwrite it
320 // when we return a two word result.
321 #ifdef _LP64
322 __ breakpoint_trap();
323 // Hmm now that longs are in one entry should "_ptr" really be "x"?
324 __ ld_ptr(O0, 0, O0->after_save());
325 __ ld_ptr(O0, wordSize, O1->after_save());
326 #else
327 __ ld(O0, wordSize, O1->after_save());
328 __ ld(O0, 0, O0->after_save());
329 #endif
330 #if defined(COMPILER2) && !defined(_LP64)
331 // C2 expects long results in G1 we can't tell if we're returning to interpreted
332 // or compiled so just be safe use G1 and O0/O1
333
334 // Shift bits into high (msb) of G1
335 __ sllx(Otos_l1->after_save(), 32, G1);
336 // Zero extend low bits
337 __ srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
338 __ or3 (Otos_l2->after_save(), G1, G1);
339 #endif /* COMPILER2 */
340 break;
341 case T_OBJECT :
342 __ ld_ptr(O0, 0, O0->after_save());
343 __ verify_oop(O0->after_save()); // verify it
344 break;
345 default : ShouldNotReachHere();
346 }
347 __ retl();
348 __ delayed()->nop();
349 return entry;
350 }
351
352 address CppInterpreter::return_entry(TosState state, int length) {
353 // make it look good in the debugger
354 return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) + frame::pc_return_offset;
355 }
356
357 address CppInterpreter::deopt_entry(TosState state, int length) {
358 address ret = NULL;
359 if (length != 0) {
360 switch (state) {
361 case atos: ret = deopt_frame_manager_return_atos; break;
362 case btos: ret = deopt_frame_manager_return_btos; break;
363 case ctos:
364 case stos:
365 case itos: ret = deopt_frame_manager_return_itos; break;
366 case ltos: ret = deopt_frame_manager_return_ltos; break;
367 case ftos: ret = deopt_frame_manager_return_ftos; break;
368 case dtos: ret = deopt_frame_manager_return_dtos; break;
369 case vtos: ret = deopt_frame_manager_return_vtos; break;
370 }
371 } else {
372 ret = unctrap_frame_manager_entry; // re-execute the bytecode ( e.g. uncommon trap)
373 }
374 assert(ret != NULL, "Not initialized");
375 return ret;
376 }
377
378 //
379 // Helpers for commoning out cases in the various type of method entries.
380 //
381
382 // increment invocation count & check for overflow
383 //
384 // Note: checking for negative value instead of overflow
385 // so we have a 'sticky' overflow test
386 //
387 // Lmethod: method
388 // ??: invocation counter
389 //
390 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
391 // Update standard invocation counters
392 __ increment_invocation_counter(O0, G3_scratch);
393 if (ProfileInterpreter) { // %%% Merge this into methodDataOop
394 __ ld_ptr(STATE(_method), G3_scratch);
395 Address interpreter_invocation_counter(G3_scratch, 0, in_bytes(methodOopDesc::interpreter_invocation_counter_offset()));
396 __ ld(interpreter_invocation_counter, G3_scratch);
397 __ inc(G3_scratch);
398 __ st(G3_scratch, interpreter_invocation_counter);
399 }
400
401 Address invocation_limit(G3_scratch, (address)&InvocationCounter::InterpreterInvocationLimit);
402 __ sethi(invocation_limit);
403 __ ld(invocation_limit, G3_scratch);
404 __ cmp(O0, G3_scratch);
405 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
406 __ delayed()->nop();
407
408 }
409
410 address InterpreterGenerator::generate_empty_entry(void) {
411
412 // A method that does nothing but return...
413
414 address entry = __ pc();
415 Label slow_path;
416
417 __ verify_oop(G5_method);
418
419 // do nothing for empty methods (do not even increment invocation counter)
420 if ( UseFastEmptyMethods) {
421 // If we need a safepoint check, generate full interpreter entry.
422 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
423 __ load_contents(sync_state, G3_scratch);
424 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
425 __ br(Assembler::notEqual, false, Assembler::pn, frame_manager_entry);
426 __ delayed()->nop();
427
428 // Code: _return
429 __ retl();
430 __ delayed()->mov(O5_savedSP, SP);
431 return entry;
432 }
433 return NULL;
434 }
435
436 // Call an accessor method (assuming it is resolved, otherwise drop into
437 // vanilla (slow path) entry
438
439 // Generates code to elide accessor methods
440 // Uses G3_scratch and G1_scratch as scratch
441 address InterpreterGenerator::generate_accessor_entry(void) {
442
443 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
444 // parameter size = 1
445 // Note: We can only use this code if the getfield has been resolved
446 // and if we don't have a null-pointer exception => check for
447 // these conditions first and use slow path if necessary.
448 address entry = __ pc();
449 Label slow_path;
450
451 if ( UseFastAccessorMethods) {
452 // Check if we need to reach a safepoint and generate full interpreter
453 // frame if so.
454 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
455 __ load_contents(sync_state, G3_scratch);
456 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
457 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
458 __ delayed()->nop();
459
460 // Check if local 0 != NULL
461 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
462 __ tst(Otos_i); // check if local 0 == NULL and go the slow path
463 __ brx(Assembler::zero, false, Assembler::pn, slow_path);
464 __ delayed()->nop();
465
466
467 // read first instruction word and extract bytecode @ 1 and index @ 2
468 // get first 4 bytes of the bytecodes (big endian!)
469 __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::const_offset())), G1_scratch);
470 __ ld(Address(G1_scratch, 0, in_bytes(constMethodOopDesc::codes_offset())), G1_scratch);
471
472 // move index @ 2 far left then to the right most two bytes.
473 __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
474 __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
475 ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
476
477 // get constant pool cache
478 __ ld_ptr(G5_method, in_bytes(methodOopDesc::constants_offset()), G3_scratch);
479 __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch);
480
481 // get specific constant pool cache entry
482 __ add(G3_scratch, G1_scratch, G3_scratch);
483
484 // Check the constant Pool cache entry to see if it has been resolved.
485 // If not, need the slow path.
486 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
487 __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch);
488 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
489 __ and3(G1_scratch, 0xFF, G1_scratch);
490 __ cmp(G1_scratch, Bytecodes::_getfield);
491 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
492 __ delayed()->nop();
493
494 // Get the type and return field offset from the constant pool cache
495 __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch);
496 __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch);
497
498 Label xreturn_path;
499 // Need to differentiate between igetfield, agetfield, bgetfield etc.
500 // because they are different sizes.
501 // Get the type from the constant pool cache
502 __ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch);
503 // Make sure we don't need to mask G1_scratch for tosBits after the above shift
504 ConstantPoolCacheEntry::verify_tosBits();
505 __ cmp(G1_scratch, atos );
506 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
507 __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
508 __ cmp(G1_scratch, itos);
509 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
510 __ delayed()->ld(Otos_i, G3_scratch, Otos_i);
511 __ cmp(G1_scratch, stos);
512 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
513 __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
514 __ cmp(G1_scratch, ctos);
515 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
516 __ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
517 #ifdef ASSERT
518 __ cmp(G1_scratch, btos);
519 __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
520 __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
521 __ should_not_reach_here();
522 #endif
523 __ ldsb(Otos_i, G3_scratch, Otos_i);
524 __ bind(xreturn_path);
525
526 // _ireturn/_areturn
527 __ retl(); // return from leaf routine
528 __ delayed()->mov(O5_savedSP, SP);
529
530 // Generate regular method entry
531 __ bind(slow_path);
532 __ ba(false, fast_accessor_slow_entry_path);
533 __ delayed()->nop();
534 return entry;
535 }
536 return NULL;
537 }
538
539 //
540 // Interpreter stub for calling a native method. (C++ interpreter)
541 // This sets up a somewhat different looking stack for calling the native method
542 // than the typical interpreter frame setup.
543 //
544
545 address InterpreterGenerator::generate_native_entry(bool synchronized) {
546 address entry = __ pc();
547
548 // the following temporary registers are used during frame creation
549 const Register Gtmp1 = G3_scratch ;
550 const Register Gtmp2 = G1_scratch;
551 const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset()));
552
553 bool inc_counter = UseCompiler || CountCompiledCalls;
554
555 // make sure registers are different!
556 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
557
558 const Address access_flags (G5_method, 0, in_bytes(methodOopDesc::access_flags_offset()));
559
560 Label Lentry;
561 __ bind(Lentry);
562
563 __ verify_oop(G5_method);
564
565 const Register Glocals_size = G3;
566 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
567
568 // make sure method is native & not abstract
569 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
570 #ifdef ASSERT
571 __ ld(access_flags, Gtmp1);
572 {
573 Label L;
574 __ btst(JVM_ACC_NATIVE, Gtmp1);
575 __ br(Assembler::notZero, false, Assembler::pt, L);
576 __ delayed()->nop();
577 __ stop("tried to execute non-native method as native");
578 __ bind(L);
579 }
580 { Label L;
581 __ btst(JVM_ACC_ABSTRACT, Gtmp1);
582 __ br(Assembler::zero, false, Assembler::pt, L);
583 __ delayed()->nop();
584 __ stop("tried to execute abstract method as non-abstract");
585 __ bind(L);
586 }
587 #endif // ASSERT
588
589 __ lduh(size_of_parameters, Gtmp1);
590 __ sll(Gtmp1, LogBytesPerWord, Gtmp2); // parameter size in bytes
591 __ add(Gargs, Gtmp2, Gargs); // points to first local + BytesPerWord
592 // NEW
593 __ add(Gargs, -wordSize, Gargs); // points to first local[0]
594 // generate the code to allocate the interpreter stack frame
595 // NEW FRAME ALLOCATED HERE
596 // save callers original sp
597 // __ mov(SP, I5_savedSP->after_restore());
598
599 generate_compute_interpreter_state(Lstate, G0, true);
600
601 // At this point Lstate points to new interpreter state
602 //
603
604 const Address do_not_unlock_if_synchronized(G2_thread, 0,
605 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
606 // Since at this point in the method invocation the exception handler
607 // would try to exit the monitor of synchronized methods which hasn't
608 // been entered yet, we set the thread local variable
609 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
610 // runtime, exception handling i.e. unlock_if_synchronized_method will
611 // check this thread local flag.
612 // This flag has two effects, one is to force an unwind in the topmost
613 // interpreter frame and not perform an unlock while doing so.
614
615 __ movbool(true, G3_scratch);
616 __ stbool(G3_scratch, do_not_unlock_if_synchronized);
617
618
619 // increment invocation counter and check for overflow
620 //
621 // Note: checking for negative value instead of overflow
622 // so we have a 'sticky' overflow test (may be of
623 // importance as soon as we have true MT/MP)
624 Label invocation_counter_overflow;
625 if (inc_counter) {
626 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
627 }
628 Label Lcontinue;
629 __ bind(Lcontinue);
630
631 bang_stack_shadow_pages(true);
632 // reset the _do_not_unlock_if_synchronized flag
633 __ stbool(G0, do_not_unlock_if_synchronized);
634
635 // check for synchronized methods
636 // Must happen AFTER invocation_counter check, so method is not locked
637 // if counter overflows.
638
639 if (synchronized) {
640 lock_method();
641 // Don't see how G2_thread is preserved here...
642 // __ verify_thread(); QQQ destroys L0,L1 can't use
643 } else {
644 #ifdef ASSERT
645 { Label ok;
646 __ ld_ptr(STATE(_method), G5_method);
647 __ ld(access_flags, O0);
648 __ btst(JVM_ACC_SYNCHRONIZED, O0);
649 __ br( Assembler::zero, false, Assembler::pt, ok);
650 __ delayed()->nop();
651 __ stop("method needs synchronization");
652 __ bind(ok);
653 }
654 #endif // ASSERT
655 }
656
657 // start execution
658
659 // __ verify_thread(); kills L1,L2 can't use at the moment
660
661 // jvmti/jvmpi support
662 __ notify_method_entry();
663
664 // native call
665
666 // (note that O0 is never an oop--at most it is a handle)
667 // It is important not to smash any handles created by this call,
668 // until any oop handle in O0 is dereferenced.
669
670 // (note that the space for outgoing params is preallocated)
671
672 // get signature handler
673
674 Label pending_exception_present;
675
676 { Label L;
677 __ ld_ptr(STATE(_method), G5_method);
678 __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch);
679 __ tst(G3_scratch);
680 __ brx(Assembler::notZero, false, Assembler::pt, L);
681 __ delayed()->nop();
682 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), G5_method, false);
683 __ ld_ptr(STATE(_method), G5_method);
684
685 Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
686 __ ld_ptr(exception_addr, G3_scratch);
687 __ br_notnull(G3_scratch, false, Assembler::pn, pending_exception_present);
688 __ delayed()->nop();
689 __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch);
690 __ bind(L);
691 }
692
693 // Push a new frame so that the args will really be stored in
694 // Copy a few locals across so the new frame has the variables
695 // we need but these values will be dead at the jni call and
696 // therefore not gc volatile like the values in the current
697 // frame (Lstate in particular)
698
699 // Flush the state pointer to the register save area
700 // Which is the only register we need for a stack walk.
701 __ st_ptr(Lstate, SP, (Lstate->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
702
703 __ mov(Lstate, O1); // Need to pass the state pointer across the frame
704
705 // Calculate current frame size
706 __ sub(SP, FP, O3); // Calculate negative of current frame size
707 __ save(SP, O3, SP); // Allocate an identical sized frame
708
709 __ mov(I1, Lstate); // In the "natural" register.
710
711 // Note I7 has leftover trash. Slow signature handler will fill it in
712 // should we get there. Normal jni call will set reasonable last_Java_pc
713 // below (and fix I7 so the stack trace doesn't have a meaningless frame
714 // in it).
715
716
717 // call signature handler
718 __ ld_ptr(STATE(_method), Lmethod);
719 __ ld_ptr(STATE(_locals), Llocals);
720
721 __ callr(G3_scratch, 0);
722 __ delayed()->nop();
723 __ ld_ptr(STATE(_thread), G2_thread); // restore thread (shouldn't be needed)
724
725 { Label not_static;
726
727 __ ld_ptr(STATE(_method), G5_method);
728 __ ld(access_flags, O0);
729 __ btst(JVM_ACC_STATIC, O0);
730 __ br( Assembler::zero, false, Assembler::pt, not_static);
731 __ delayed()->
732 // get native function entry point(O0 is a good temp until the very end)
733 ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::native_function_offset())), O0);
734 // for static methods insert the mirror argument
735 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
736
737 __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc:: constants_offset())), O1);
738 __ ld_ptr(Address(O1, 0, constantPoolOopDesc::pool_holder_offset_in_bytes()), O1);
739 __ ld_ptr(O1, mirror_offset, O1);
740 // where the mirror handle body is allocated:
741 #ifdef ASSERT
742 if (!PrintSignatureHandlers) // do not dirty the output with this
743 { Label L;
744 __ tst(O1);
745 __ brx(Assembler::notZero, false, Assembler::pt, L);
746 __ delayed()->nop();
747 __ stop("mirror is missing");
748 __ bind(L);
749 }
750 #endif // ASSERT
751 __ st_ptr(O1, STATE(_oop_temp));
752 __ add(STATE(_oop_temp), O1); // this is really an LEA not an add
753 __ bind(not_static);
754 }
755
756 // At this point, arguments have been copied off of stack into
757 // their JNI positions, which are O1..O5 and SP[68..].
758 // Oops are boxed in-place on the stack, with handles copied to arguments.
759 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*.
760
761 #ifdef ASSERT
762 { Label L;
763 __ tst(O0);
764 __ brx(Assembler::notZero, false, Assembler::pt, L);
765 __ delayed()->nop();
766 __ stop("native entry point is missing");
767 __ bind(L);
768 }
769 #endif // ASSERT
770
771 //
772 // setup the java frame anchor
773 //
774 // The scavenge function only needs to know that the PC of this frame is
775 // in the interpreter method entry code, it doesn't need to know the exact
776 // PC and hence we can use O7 which points to the return address from the
777 // previous call in the code stream (signature handler function)
778 //
779 // The other trick is we set last_Java_sp to FP instead of the usual SP because
780 // we have pushed the extra frame in order to protect the volatile register(s)
781 // in that frame when we return from the jni call
782 //
783
784
785 __ set_last_Java_frame(FP, O7);
786 __ mov(O7, I7); // make dummy interpreter frame look like one above,
787 // not meaningless information that'll confuse me.
788
789 // flush the windows now. We don't care about the current (protection) frame
790 // only the outer frames
791
792 __ flush_windows();
793
794 // mark windows as flushed
795 Address flags(G2_thread,
796 0,
797 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
798 __ set(JavaFrameAnchor::flushed, G3_scratch);
799 __ st(G3_scratch, flags);
800
801 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
802
803 Address thread_state(G2_thread, 0, in_bytes(JavaThread::thread_state_offset()));
804 #ifdef ASSERT
805 { Label L;
806 __ ld(thread_state, G3_scratch);
807 __ cmp(G3_scratch, _thread_in_Java);
808 __ br(Assembler::equal, false, Assembler::pt, L);
809 __ delayed()->nop();
810 __ stop("Wrong thread state in native stub");
811 __ bind(L);
812 }
813 #endif // ASSERT
814 __ set(_thread_in_native, G3_scratch);
815 __ st(G3_scratch, thread_state);
816
817 // Call the jni method, using the delay slot to set the JNIEnv* argument.
818 __ callr(O0, 0);
819 __ delayed()->
820 add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
821 __ ld_ptr(STATE(_thread), G2_thread); // restore thread
822
823 // must we block?
824
825 // Block, if necessary, before resuming in _thread_in_Java state.
826 // In order for GC to work, don't clear the last_Java_sp until after blocking.
827 { Label no_block;
828 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
829
830 // Switch thread to "native transition" state before reading the synchronization state.
831 // This additional state is necessary because reading and testing the synchronization
832 // state is not atomic w.r.t. GC, as this scenario demonstrates:
833 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
834 // VM thread changes sync state to synchronizing and suspends threads for GC.
835 // Thread A is resumed to finish this native method, but doesn't block here since it
836 // didn't see any synchronization is progress, and escapes.
837 __ set(_thread_in_native_trans, G3_scratch);
838 __ st(G3_scratch, thread_state);
839 if(os::is_MP()) {
840 // Write serialization page so VM thread can do a pseudo remote membar.
841 // We use the current thread pointer to calculate a thread specific
842 // offset to write to within the page. This minimizes bus traffic
843 // due to cache line collision.
844 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
845 }
846 __ load_contents(sync_state, G3_scratch);
847 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
848
849
850 Label L;
851 Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset()));
852 __ br(Assembler::notEqual, false, Assembler::pn, L);
853 __ delayed()->
854 ld(suspend_state, G3_scratch);
855 __ cmp(G3_scratch, 0);
856 __ br(Assembler::equal, false, Assembler::pt, no_block);
857 __ delayed()->nop();
858 __ bind(L);
859
860 // Block. Save any potential method result value before the operation and
861 // use a leaf call to leave the last_Java_frame setup undisturbed.
862 save_native_result();
863 __ call_VM_leaf(noreg,
864 CAST_FROM_FN_PTR(address, JavaThread::check_safepoint_and_suspend_for_native_trans),
865 G2_thread);
866 __ ld_ptr(STATE(_thread), G2_thread); // restore thread
867 // Restore any method result value
868 restore_native_result();
869 __ bind(no_block);
870 }
871
872 // Clear the frame anchor now
873
874 __ reset_last_Java_frame();
875
876 // Move the result handler address
877 __ mov(Lscratch, G3_scratch);
878 // return possible result to the outer frame
879 #ifndef __LP64
880 __ mov(O0, I0);
881 __ restore(O1, G0, O1);
882 #else
883 __ restore(O0, G0, O0);
884 #endif /* __LP64 */
885
886 // Move result handler to expected register
887 __ mov(G3_scratch, Lscratch);
888
889
890 // thread state is thread_in_native_trans. Any safepoint blocking has
891 // happened in the trampoline we are ready to switch to thread_in_Java.
892
893 __ set(_thread_in_Java, G3_scratch);
894 __ st(G3_scratch, thread_state);
895
896 // If we have an oop result store it where it will be safe for any further gc
897 // until we return now that we've released the handle it might be protected by
898
899 {
900 Label no_oop, store_result;
901
902 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
903 __ cmp(G3_scratch, Lscratch);
904 __ brx(Assembler::notEqual, false, Assembler::pt, no_oop);
905 __ delayed()->nop();
906 __ addcc(G0, O0, O0);
907 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL:
908 __ delayed()->ld_ptr(O0, 0, O0); // unbox it
909 __ mov(G0, O0);
910
911 __ bind(store_result);
912 // Store it where gc will look for it and result handler expects it.
913 __ st_ptr(O0, STATE(_oop_temp));
914
915 __ bind(no_oop);
916
917 }
918
919 // reset handle block
920 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), G3_scratch);
921 __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
922
923
924 // handle exceptions (exception handling will handle unlocking!)
925 { Label L;
926 Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
927
928 __ ld_ptr(exception_addr, Gtemp);
929 __ tst(Gtemp);
930 __ brx(Assembler::equal, false, Assembler::pt, L);
931 __ delayed()->nop();
932 __ bind(pending_exception_present);
933 // With c++ interpreter we just leave it pending caller will do the correct thing. However...
934 // Like x86 we ignore the result of the native call and leave the method locked. This
935 // seems wrong to leave things locked.
936
937 __ br(Assembler::always, false, Assembler::pt, StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
938 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame
939
940 __ bind(L);
941 }
942
943 // jvmdi/jvmpi support (preserves thread register)
944 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
945
946 if (synchronized) {
947 // save and restore any potential method result value around the unlocking operation
948 save_native_result();
949
950 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
951 // Get the initial monitor we allocated
952 __ sub(Lstate, entry_size, O1); // initial monitor
953 __ unlock_object(O1);
954 restore_native_result();
955 }
956
957 #if defined(COMPILER2) && !defined(_LP64)
958
959 // C2 expects long results in G1 we can't tell if we're returning to interpreted
960 // or compiled so just be safe.
961
962 __ sllx(O0, 32, G1); // Shift bits into high G1
963 __ srl (O1, 0, O1); // Zero extend O1
964 __ or3 (O1, G1, G1); // OR 64 bits into G1
965
966 #endif /* COMPILER2 && !_LP64 */
967
968 #ifdef ASSERT
969 {
970 Label ok;
971 __ cmp(I5_savedSP, FP);
972 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok);
973 __ delayed()->nop();
974 __ stop("bad I5_savedSP value");
975 __ should_not_reach_here();
976 __ bind(ok);
977 }
978 #endif
979 // Calls result handler which POPS FRAME
980 if (TraceJumps) {
981 // Move target to register that is recordable
982 __ mov(Lscratch, G3_scratch);
983 __ JMP(G3_scratch, 0);
984 } else {
985 __ jmp(Lscratch, 0);
986 }
987 __ delayed()->nop();
988
989 if (inc_counter) {
990 // handle invocation counter overflow
991 __ bind(invocation_counter_overflow);
992 generate_counter_overflow(Lcontinue);
993 }
994
995
996 return entry;
997 }
998
999 void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state,
1000 const Register prev_state,
1001 bool native) {
1002
1003 // On entry
1004 // G5_method - caller's method
1005 // Gargs - points to initial parameters (i.e. locals[0])
1006 // G2_thread - valid? (C1 only??)
1007 // "prev_state" - contains any previous frame manager state which we must save a link
1008 //
1009 // On return
1010 // "state" is a pointer to the newly allocated state object. We must allocate and initialize
1011 // a new interpretState object and the method expression stack.
1012
1013 assert_different_registers(state, prev_state);
1014 assert_different_registers(prev_state, G3_scratch);
1015 const Register Gtmp = G3_scratch;
1016 const Address constants (G5_method, 0, in_bytes(methodOopDesc::constants_offset()));
1017 const Address access_flags (G5_method, 0, in_bytes(methodOopDesc::access_flags_offset()));
1018 const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset()));
1019 const Address max_stack (G5_method, 0, in_bytes(methodOopDesc::max_stack_offset()));
1020 const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset()));
1021
1022 // slop factor is two extra slots on the expression stack so that
1023 // we always have room to store a result when returning from a call without parameters
1024 // that returns a result.
1025
1026 const int slop_factor = 2*wordSize;
1027
1028 const int fixed_size = ((sizeof(BytecodeInterpreter) + slop_factor) >> LogBytesPerWord) + // what is the slop factor?
1029 frame::memory_parameter_word_sp_offset + // register save area + param window
1030 (native ? frame::interpreter_frame_extra_outgoing_argument_words : 0); // JNI, class
1031
1032 // XXX G5_method valid
1033
1034 // Now compute new frame size
1035
1036 if (native) {
1037 __ lduh( size_of_parameters, Gtmp );
1038 __ calc_mem_param_words(Gtmp, Gtmp); // space for native call parameters passed on the stack in words
1039 } else {
1040 __ lduh(max_stack, Gtmp); // Full size expression stack
1041 }
1042 __ add(Gtmp, fixed_size, Gtmp); // plus the fixed portion
1043
1044 __ neg(Gtmp); // negative space for stack/parameters in words
1045 __ and3(Gtmp, -WordsPerLong, Gtmp); // make multiple of 2 (SP must be 2-word aligned)
1046 __ sll(Gtmp, LogBytesPerWord, Gtmp); // negative space for frame in bytes
1047
1048 // Need to do stack size check here before we fault on large frames
1049
1050 Label stack_ok;
1051
1052 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
1053 (StackRedPages+StackYellowPages);
1054
1055
1056 __ ld_ptr(G2_thread, in_bytes(Thread::stack_base_offset()), O0);
1057 __ ld_ptr(G2_thread, in_bytes(Thread::stack_size_offset()), O1);
1058 // compute stack bottom
1059 __ sub(O0, O1, O0);
1060
1061 // Avoid touching the guard pages
1062 // Also a fudge for frame size of BytecodeInterpreter::run
1063 // It varies from 1k->4k depending on build type
1064 const int fudge = 6 * K;
1065
1066 __ set(fudge + (max_pages * os::vm_page_size()), O1);
1067
1068 __ add(O0, O1, O0);
1069 __ sub(O0, Gtmp, O0);
1070 __ cmp(SP, O0);
1071 __ brx(Assembler::greaterUnsigned, false, Assembler::pt, stack_ok);
1072 __ delayed()->nop();
1073
1074 // throw exception return address becomes throwing pc
1075
1076 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
1077 __ stop("never reached");
1078
1079 __ bind(stack_ok);
1080
1081 __ save(SP, Gtmp, SP); // setup new frame and register window
1082
1083 // New window I7 call_stub or previous activation
1084 // O6 - register save area, BytecodeInterpreter just below it, args/locals just above that
1085 //
1086 __ sub(FP, sizeof(BytecodeInterpreter), state); // Point to new Interpreter state
1087 __ add(state, STACK_BIAS, state ); // Account for 64bit bias
1088
1089 #define XXX_STATE(field_name) state, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
1090
1091 // Initialize a new Interpreter state
1092 // orig_sp - caller's original sp
1093 // G2_thread - thread
1094 // Gargs - &locals[0] (unbiased?)
1095 // G5_method - method
1096 // SP (biased) - accounts for full size java stack, BytecodeInterpreter object, register save area, and register parameter save window
1097
1098
1099 __ set(0xdead0004, O1);
1100
1101
1102 __ st_ptr(Gargs, XXX_STATE(_locals));
1103 __ st_ptr(G0, XXX_STATE(_oop_temp));
1104
1105 __ st_ptr(state, XXX_STATE(_self_link)); // point to self
1106 __ st_ptr(prev_state->after_save(), XXX_STATE(_prev_link)); // Chain interpreter states
1107 __ st_ptr(G2_thread, XXX_STATE(_thread)); // Store javathread
1108
1109 if (native) {
1110 __ st_ptr(G0, XXX_STATE(_bcp));
1111 } else {
1112 __ ld_ptr(G5_method, in_bytes(methodOopDesc::const_offset()), O2); // get constMethodOop
1113 __ add(O2, in_bytes(constMethodOopDesc::codes_offset()), O2); // get bcp
1114 __ st_ptr(O2, XXX_STATE(_bcp));
1115 }
1116
1117 __ st_ptr(G0, XXX_STATE(_mdx));
1118 __ st_ptr(G5_method, XXX_STATE(_method));
1119
1120 __ set((int) BytecodeInterpreter::method_entry, O1);
1121 __ st(O1, XXX_STATE(_msg));
1122
1123 __ ld_ptr(constants, O3);
1124 __ ld_ptr(O3, constantPoolOopDesc::cache_offset_in_bytes(), O2);
1125 __ st_ptr(O2, XXX_STATE(_constants));
1126
1127 __ st_ptr(G0, XXX_STATE(_result._to_call._callee));
1128
1129 // Monitor base is just start of BytecodeInterpreter object;
1130 __ mov(state, O2);
1131 __ st_ptr(O2, XXX_STATE(_monitor_base));
1132
1133 // Do we need a monitor for synchonized method?
1134 {
1135 __ ld(access_flags, O1);
1136 Label done;
1137 Label got_obj;
1138 __ btst(JVM_ACC_SYNCHRONIZED, O1);
1139 __ br( Assembler::zero, false, Assembler::pt, done);
1140
1141 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
1142 __ delayed()->btst(JVM_ACC_STATIC, O1);
1143 __ ld_ptr(XXX_STATE(_locals), O1);
1144 __ br( Assembler::zero, true, Assembler::pt, got_obj);
1145 __ delayed()->ld_ptr(O1, 0, O1); // get receiver for not-static case
1146 __ ld_ptr(constants, O1);
1147 __ ld_ptr( O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);
1148 // lock the mirror, not the klassOop
1149 __ ld_ptr( O1, mirror_offset, O1);
1150
1151 __ bind(got_obj);
1152
1153 #ifdef ASSERT
1154 __ tst(O1);
1155 __ breakpoint_trap(Assembler::zero);
1156 #endif // ASSERT
1157
1158 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
1159 __ sub(SP, entry_size, SP); // account for initial monitor
1160 __ sub(O2, entry_size, O2); // initial monitor
1161 __ st_ptr(O1, O2, BasicObjectLock::obj_offset_in_bytes()); // and allocate it for interpreter use
1162 __ bind(done);
1163 }
1164
1165 // Remember initial frame bottom
1166
1167 __ st_ptr(SP, XXX_STATE(_frame_bottom));
1168
1169 __ st_ptr(O2, XXX_STATE(_stack_base));
1170
1171 __ sub(O2, wordSize, O2); // prepush
1172 __ st_ptr(O2, XXX_STATE(_stack)); // PREPUSH
1173
1174 __ lduh(max_stack, O3); // Full size expression stack
1175 __ sll(O3, LogBytesPerWord, O3);
1176 __ sub(O2, O3, O3);
1177 // __ sub(O3, wordSize, O3); // so prepush doesn't look out of bounds
1178 __ st_ptr(O3, XXX_STATE(_stack_limit));
1179
1180 if (!native) {
1181 //
1182 // Code to initialize locals
1183 //
1184 Register init_value = noreg; // will be G0 if we must clear locals
1185 // Now zero locals
1186 if (true /* zerolocals */ || ClearInterpreterLocals) {
1187 // explicitly initialize locals
1188 init_value = G0;
1189 } else {
1190 #ifdef ASSERT
1191 // initialize locals to a garbage pattern for better debugging
1192 init_value = O3;
1193 __ set( 0x0F0F0F0F, init_value );
1194 #endif // ASSERT
1195 }
1196 if (init_value != noreg) {
1197 Label clear_loop;
1198
1199 // NOTE: If you change the frame layout, this code will need to
1200 // be updated!
1201 __ lduh( size_of_locals, O2 );
1202 __ lduh( size_of_parameters, O1 );
1203 __ sll( O2, LogBytesPerWord, O2);
1204 __ sll( O1, LogBytesPerWord, O1 );
1205 __ ld_ptr(XXX_STATE(_locals), L2_scratch);
1206 __ sub( L2_scratch, O2, O2 );
1207 __ sub( L2_scratch, O1, O1 );
1208
1209 __ bind( clear_loop );
1210 __ inc( O2, wordSize );
1211
1212 __ cmp( O2, O1 );
1213 __ br( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
1214 __ delayed()->st_ptr( init_value, O2, 0 );
1215 }
1216 }
1217 }
1218 // Find preallocated monitor and lock method (C++ interpreter)
1219 //
1220 void InterpreterGenerator::lock_method(void) {
1221 // Lock the current method.
1222 // Destroys registers L2_scratch, L3_scratch, O0
1223 //
1224 // Find everything relative to Lstate
1225
1226 #ifdef ASSERT
1227 __ ld_ptr(STATE(_method), L2_scratch);
1228 __ ld(L2_scratch, in_bytes(methodOopDesc::access_flags_offset()), O0);
1229
1230 { Label ok;
1231 __ btst(JVM_ACC_SYNCHRONIZED, O0);
1232 __ br( Assembler::notZero, false, Assembler::pt, ok);
1233 __ delayed()->nop();
1234 __ stop("method doesn't need synchronization");
1235 __ bind(ok);
1236 }
1237 #endif // ASSERT
1238
1239 // monitor is already allocated at stack base
1240 // and the lockee is already present
1241 __ ld_ptr(STATE(_stack_base), L2_scratch);
1242 __ ld_ptr(L2_scratch, BasicObjectLock::obj_offset_in_bytes(), O0); // get object
1243 __ lock_object(L2_scratch, O0);
1244
1245 }
1246
1247 // Generate code for handling resuming a deopted method
1248 void CppInterpreterGenerator::generate_deopt_handling() {
1249
1250 Label return_from_deopt_common;
1251
1252 // deopt needs to jump to here to enter the interpreter (return a result)
1253 deopt_frame_manager_return_atos = __ pc();
1254
1255 // O0/O1 live
1256 __ ba(false, return_from_deopt_common);
1257 __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_OBJECT), L3_scratch); // Result stub address array index
1258
1259
1260 // deopt needs to jump to here to enter the interpreter (return a result)
1261 deopt_frame_manager_return_btos = __ pc();
1262
1263 // O0/O1 live
1264 __ ba(false, return_from_deopt_common);
1265 __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_BOOLEAN), L3_scratch); // Result stub address array index
1266
1267 // deopt needs to jump to here to enter the interpreter (return a result)
1268 deopt_frame_manager_return_itos = __ pc();
1269
1270 // O0/O1 live
1271 __ ba(false, return_from_deopt_common);
1272 __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_INT), L3_scratch); // Result stub address array index
1273
1274 // deopt needs to jump to here to enter the interpreter (return a result)
1275
1276 deopt_frame_manager_return_ltos = __ pc();
1277 #if !defined(_LP64) && defined(COMPILER2)
1278 // All return values are where we want them, except for Longs. C2 returns
1279 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
1280 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
1281 // build even if we are returning from interpreted we just do a little
1282 // stupid shuffing.
1283 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
1284 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
1285 // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
1286
1287 __ srl (G1, 0,O1);
1288 __ srlx(G1,32,O0);
1289 #endif /* !_LP64 && COMPILER2 */
1290 // O0/O1 live
1291 __ ba(false, return_from_deopt_common);
1292 __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_LONG), L3_scratch); // Result stub address array index
1293
1294 // deopt needs to jump to here to enter the interpreter (return a result)
1295
1296 deopt_frame_manager_return_ftos = __ pc();
1297 // O0/O1 live
1298 __ ba(false, return_from_deopt_common);
1299 __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_FLOAT), L3_scratch); // Result stub address array index
1300
1301 // deopt needs to jump to here to enter the interpreter (return a result)
1302 deopt_frame_manager_return_dtos = __ pc();
1303
1304 // O0/O1 live
1305 __ ba(false, return_from_deopt_common);
1306 __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_DOUBLE), L3_scratch); // Result stub address array index
1307
1308 // deopt needs to jump to here to enter the interpreter (return a result)
1309 deopt_frame_manager_return_vtos = __ pc();
1310
1311 // O0/O1 live
1312 __ set(AbstractInterpreter::BasicType_as_index(T_VOID), L3_scratch);
1313
1314 // Deopt return common
1315 // an index is present that lets us move any possible result being
1316 // return to the interpreter's stack
1317 //
1318 __ bind(return_from_deopt_common);
1319
1320 // Result if any is in native abi result (O0..O1/F0..F1). The java expression
1321 // stack is in the state that the calling convention left it.
1322 // Copy the result from native abi result and place it on java expression stack.
1323
1324 // Current interpreter state is present in Lstate
1325
1326 // Get current pre-pushed top of interpreter stack
1327 // Any result (if any) is in native abi
1328 // result type index is in L3_scratch
1329
1330 __ ld_ptr(STATE(_stack), L1_scratch); // get top of java expr stack
1331
1332 __ set((intptr_t)CppInterpreter::_tosca_to_stack, L4_scratch);
1333 __ sll(L3_scratch, LogBytesPerWord, L3_scratch);
1334 __ ld_ptr(L4_scratch, L3_scratch, Lscratch); // get typed result converter address
1335 __ jmpl(Lscratch, G0, O7); // and convert it
1336 __ delayed()->nop();
1337
1338 // L1_scratch points to top of stack (prepushed)
1339 __ st_ptr(L1_scratch, STATE(_stack));
1340 }
1341
1342 // Generate the code to handle a more_monitors message from the c++ interpreter
1343 void CppInterpreterGenerator::generate_more_monitors() {
1344
1345 Label entry, loop;
1346 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
1347 // 1. compute new pointers // esp: old expression stack top
1348 __ delayed()->ld_ptr(STATE(_stack_base), L4_scratch); // current expression stack bottom
1349 __ sub(L4_scratch, entry_size, L4_scratch);
1350 __ st_ptr(L4_scratch, STATE(_stack_base));
1351
1352 __ sub(SP, entry_size, SP); // Grow stack
1353 __ st_ptr(SP, STATE(_frame_bottom));
1354
1355 __ ld_ptr(STATE(_stack_limit), L2_scratch);
1356 __ sub(L2_scratch, entry_size, L2_scratch);
1357 __ st_ptr(L2_scratch, STATE(_stack_limit));
1358
1359 __ ld_ptr(STATE(_stack), L1_scratch); // Get current stack top
1360 __ sub(L1_scratch, entry_size, L1_scratch);
1361 __ st_ptr(L1_scratch, STATE(_stack));
1362 __ ba(false, entry);
1363 __ delayed()->add(L1_scratch, wordSize, L1_scratch); // first real entry (undo prepush)
1364
1365 // 2. move expression stack
1366
1367 __ bind(loop);
1368 __ st_ptr(L3_scratch, Address(L1_scratch, 0));
1369 __ add(L1_scratch, wordSize, L1_scratch);
1370 __ bind(entry);
1371 __ cmp(L1_scratch, L4_scratch);
1372 __ br(Assembler::notEqual, false, Assembler::pt, loop);
1373 __ delayed()->ld_ptr(L1_scratch, entry_size, L3_scratch);
1374
1375 // now zero the slot so we can find it.
1376 __ st(G0, L4_scratch, BasicObjectLock::obj_offset_in_bytes());
1377
1378 }
1379
1380 // Initial entry to C++ interpreter from the call_stub.
1381 // This entry point is called the frame manager since it handles the generation
1382 // of interpreter activation frames via requests directly from the vm (via call_stub)
1383 // and via requests from the interpreter. The requests from the call_stub happen
1384 // directly thru the entry point. Requests from the interpreter happen via returning
1385 // from the interpreter and examining the message the interpreter has returned to
1386 // the frame manager. The frame manager can take the following requests:
1387
1388 // NO_REQUEST - error, should never happen.
1389 // MORE_MONITORS - need a new monitor. Shuffle the expression stack on down and
1390 // allocate a new monitor.
1391 // CALL_METHOD - setup a new activation to call a new method. Very similar to what
1392 // happens during entry during the entry via the call stub.
1393 // RETURN_FROM_METHOD - remove an activation. Return to interpreter or call stub.
1394 //
1395 // Arguments:
1396 //
1397 // ebx: methodOop
1398 // ecx: receiver - unused (retrieved from stack as needed)
1399 // esi: previous frame manager state (NULL from the call_stub/c1/c2)
1400 //
1401 //
1402 // Stack layout at entry
1403 //
1404 // [ return address ] <--- esp
1405 // [ parameter n ]
1406 // ...
1407 // [ parameter 1 ]
1408 // [ expression stack ]
1409 //
1410 //
1411 // We are free to blow any registers we like because the call_stub which brought us here
1412 // initially has preserved the callee save registers already.
1413 //
1414 //
1415
1416 static address interpreter_frame_manager = NULL;
1417
1418 #ifdef ASSERT
1419 #define VALIDATE_STATE(scratch, marker) \
1420 { \
1421 Label skip; \
1422 __ ld_ptr(STATE(_self_link), scratch); \
1423 __ cmp(Lstate, scratch); \
1424 __ brx(Assembler::equal, false, Assembler::pt, skip); \
1425 __ delayed()->nop(); \
1426 __ breakpoint_trap(); \
1427 __ emit_long(marker); \
1428 __ bind(skip); \
1429 }
1430 #else
1431 #define VALIDATE_STATE(scratch, marker)
1432 #endif /* ASSERT */
1433
1434 void CppInterpreterGenerator::adjust_callers_stack(Register args) {
1435 //
1436 // Adjust caller's stack so that all the locals can be contiguous with
1437 // the parameters.
1438 // Worries about stack overflow make this a pain.
1439 //
1440 // Destroys args, G3_scratch, G3_scratch
1441 // In/Out O5_savedSP (sender's original SP)
1442 //
1443 // assert_different_registers(state, prev_state);
1444 const Register Gtmp = G3_scratch;
1445 const Register tmp = O2;
1446 const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset()));
1447 const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset()));
1448
1449 __ lduh(size_of_parameters, tmp);
1450 __ sll(tmp, LogBytesPerWord, Gtmp); // parameter size in bytes
1451 __ add(args, Gtmp, Gargs); // points to first local + BytesPerWord
1452 // NEW
1453 __ add(Gargs, -wordSize, Gargs); // points to first local[0]
1454 // determine extra space for non-argument locals & adjust caller's SP
1455 // Gtmp1: parameter size in words
1456 __ lduh(size_of_locals, Gtmp);
1457 __ compute_extra_locals_size_in_bytes(tmp, Gtmp, Gtmp);
1458
1459 #if 1
1460 // c2i adapters place the final interpreter argument in the register save area for O0/I0
1461 // the call_stub will place the final interpreter argument at
1462 // frame::memory_parameter_word_sp_offset. This is mostly not noticable for either asm
1463 // or c++ interpreter. However with the c++ interpreter when we do a recursive call
1464 // and try to make it look good in the debugger we will store the argument to
1465 // RecursiveInterpreterActivation in the register argument save area. Without allocating
1466 // extra space for the compiler this will overwrite locals in the local array of the
1467 // interpreter.
1468 // QQQ still needed with frameless adapters???
1469
1470 const int c2i_adjust_words = frame::memory_parameter_word_sp_offset - frame::callee_register_argument_save_area_sp_offset;
1471
1472 __ add(Gtmp, c2i_adjust_words*wordSize, Gtmp);
1473 #endif // 1
1474
1475
1476 __ sub(SP, Gtmp, SP); // just caller's frame for the additional space we need.
1477 }
1478
1479 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1480
1481 // G5_method: methodOop
1482 // G2_thread: thread (unused)
1483 // Gargs: bottom of args (sender_sp)
1484 // O5: sender's sp
1485
1486 // A single frame manager is plenty as we don't specialize for synchronized. We could and
1487 // the code is pretty much ready. Would need to change the test below and for good measure
1488 // modify generate_interpreter_state to only do the (pre) sync stuff stuff for synchronized
1489 // routines. Not clear this is worth it yet.
1490
1491 if (interpreter_frame_manager) {
1492 return interpreter_frame_manager;
1493 }
1494
1495 __ bind(frame_manager_entry);
1496
1497 // the following temporary registers are used during frame creation
1498 const Register Gtmp1 = G3_scratch;
1499 // const Register Lmirror = L1; // native mirror (native calls only)
1500
1501 const Address constants (G5_method, 0, in_bytes(methodOopDesc::constants_offset()));
1502 const Address access_flags (G5_method, 0, in_bytes(methodOopDesc::access_flags_offset()));
1503 const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset()));
1504 const Address max_stack (G5_method, 0, in_bytes(methodOopDesc::max_stack_offset()));
1505 const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset()));
1506
1507 address entry_point = __ pc();
1508 __ mov(G0, prevState); // no current activation
1509
1510
1511 Label re_dispatch;
1512
1513 __ bind(re_dispatch);
1514
1515 // Interpreter needs to have locals completely contiguous. In order to do that
1516 // We must adjust the caller's stack pointer for any locals beyond just the
1517 // parameters
1518 adjust_callers_stack(Gargs);
1519
1520 // O5_savedSP still contains sender's sp
1521
1522 // NEW FRAME
1523
1524 generate_compute_interpreter_state(Lstate, prevState, false);
1525
1526 // At this point a new interpreter frame and state object are created and initialized
1527 // Lstate has the pointer to the new activation
1528 // Any stack banging or limit check should already be done.
1529
1530 Label call_interpreter;
1531
1532 __ bind(call_interpreter);
1533
1534
1535 #if 1
1536 __ set(0xdead002, Lmirror);
1537 __ set(0xdead002, L2_scratch);
1538 __ set(0xdead003, L3_scratch);
1539 __ set(0xdead004, L4_scratch);
1540 __ set(0xdead005, Lscratch);
1541 __ set(0xdead006, Lscratch2);
1542 __ set(0xdead007, L7_scratch);
1543
1544 __ set(0xdeaf002, O2);
1545 __ set(0xdeaf003, O3);
1546 __ set(0xdeaf004, O4);
1547 __ set(0xdeaf005, O5);
1548 #endif
1549
1550 // Call interpreter (stack bang complete) enter here if message is
1551 // set and we know stack size is valid
1552
1553 Label call_interpreter_2;
1554
1555 __ bind(call_interpreter_2);
1556
1557 #ifdef ASSERT
1558 {
1559 Label skip;
1560 __ ld_ptr(STATE(_frame_bottom), G3_scratch);
1561 __ cmp(G3_scratch, SP);
1562 __ brx(Assembler::equal, false, Assembler::pt, skip);
1563 __ delayed()->nop();
1564 __ stop("SP not restored to frame bottom");
1565 __ bind(skip);
1566 }
1567 #endif
1568
1569 VALIDATE_STATE(G3_scratch, 4);
1570 __ set_last_Java_frame(SP, noreg);
1571 __ mov(Lstate, O0); // (arg) pointer to current state
1572
1573 __ call(CAST_FROM_FN_PTR(address,
1574 JvmtiExport::can_post_interpreter_events() ?
1575 BytecodeInterpreter::runWithChecks
1576 : BytecodeInterpreter::run),
1577 relocInfo::runtime_call_type);
1578
1579 __ delayed()->nop();
1580
1581 __ ld_ptr(STATE(_thread), G2_thread);
1582 __ reset_last_Java_frame();
1583
1584 // examine msg from interpreter to determine next action
1585 __ ld_ptr(STATE(_thread), G2_thread); // restore G2_thread
1586
1587 __ ld(STATE(_msg), L1_scratch); // Get new message
1588
1589 Label call_method;
1590 Label return_from_interpreted_method;
1591 Label throw_exception;
1592 Label do_OSR;
1593 Label bad_msg;
1594 Label resume_interpreter;
1595
1596 __ cmp(L1_scratch, (int)BytecodeInterpreter::call_method);
1597 __ br(Assembler::equal, false, Assembler::pt, call_method);
1598 __ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::return_from_method);
1599 __ br(Assembler::equal, false, Assembler::pt, return_from_interpreted_method);
1600 __ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::throwing_exception);
1601 __ br(Assembler::equal, false, Assembler::pt, throw_exception);
1602 __ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::do_osr);
1603 __ br(Assembler::equal, false, Assembler::pt, do_OSR);
1604 __ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::more_monitors);
1605 __ br(Assembler::notEqual, false, Assembler::pt, bad_msg);
1606
1607 // Allocate more monitor space, shuffle expression stack....
1608
1609 generate_more_monitors();
1610
1611 // new monitor slot allocated, resume the interpreter.
1612
1613 __ set((int)BytecodeInterpreter::got_monitors, L1_scratch);
1614 VALIDATE_STATE(G3_scratch, 5);
1615 __ ba(false, call_interpreter);
1616 __ delayed()->st(L1_scratch, STATE(_msg));
1617
1618 // uncommon trap needs to jump to here to enter the interpreter (re-execute current bytecode)
1619 unctrap_frame_manager_entry = __ pc();
1620
1621 // QQQ what message do we send
1622
1623 __ ba(false, call_interpreter);
1624 __ delayed()->ld_ptr(STATE(_frame_bottom), SP); // restore to full stack frame
1625
1626 //=============================================================================
1627 // Returning from a compiled method into a deopted method. The bytecode at the
1628 // bcp has completed. The result of the bytecode is in the native abi (the tosca
1629 // for the template based interpreter). Any stack space that was used by the
1630 // bytecode that has completed has been removed (e.g. parameters for an invoke)
1631 // so all that we have to do is place any pending result on the expression stack
1632 // and resume execution on the next bytecode.
1633
1634 generate_deopt_handling();
1635
1636 // ready to resume the interpreter
1637
1638 __ set((int)BytecodeInterpreter::deopt_resume, L1_scratch);
1639 __ ba(false, call_interpreter);
1640 __ delayed()->st(L1_scratch, STATE(_msg));
1641
1642 // Current frame has caught an exception we need to dispatch to the
1643 // handler. We can get here because a native interpreter frame caught
1644 // an exception in which case there is no handler and we must rethrow
1645 // If it is a vanilla interpreted frame the we simply drop into the
1646 // interpreter and let it do the lookup.
1647
1648 Interpreter::_rethrow_exception_entry = __ pc();
1649
1650 Label return_with_exception;
1651 Label unwind_and_forward;
1652
1653 // O0: exception
1654 // O7: throwing pc
1655
1656 // We want exception in the thread no matter what we ultimately decide about frame type.
1657
1658 Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
1659 __ verify_thread();
1660 __ st_ptr(O0, exception_addr);
1661
1662 // get the methodOop
1663 __ ld_ptr(STATE(_method), G5_method);
1664
1665 // if this current frame vanilla or native?
1666
1667 __ ld(access_flags, Gtmp1);
1668 __ btst(JVM_ACC_NATIVE, Gtmp1);
1669 __ br(Assembler::zero, false, Assembler::pt, return_with_exception); // vanilla interpreted frame handle directly
1670 __ delayed()->nop();
1671
1672 // We drop thru to unwind a native interpreted frame with a pending exception
1673 // We jump here for the initial interpreter frame with exception pending
1674 // We unwind the current acivation and forward it to our caller.
1675
1676 __ bind(unwind_and_forward);
1677
1678 // Unwind frame and jump to forward exception. unwinding will place throwing pc in O7
1679 // as expected by forward_exception.
1680
1681 __ restore(FP, G0, SP); // unwind interpreter state frame
1682 __ br(Assembler::always, false, Assembler::pt, StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
1683 __ delayed()->mov(I5_savedSP->after_restore(), SP);
1684
1685 // Return point from a call which returns a result in the native abi
1686 // (c1/c2/jni-native). This result must be processed onto the java
1687 // expression stack.
1688 //
1689 // A pending exception may be present in which case there is no result present
1690
1691 address return_from_native_method = __ pc();
1692
1693 VALIDATE_STATE(G3_scratch, 6);
1694
1695 // Result if any is in native abi result (O0..O1/F0..F1). The java expression
1696 // stack is in the state that the calling convention left it.
1697 // Copy the result from native abi result and place it on java expression stack.
1698
1699 // Current interpreter state is present in Lstate
1700
1701 // Exception pending?
1702
1703 __ ld_ptr(STATE(_frame_bottom), SP); // restore to full stack frame
1704 __ ld_ptr(exception_addr, Lscratch); // get any pending exception
1705 __ tst(Lscratch); // exception pending?
1706 __ brx(Assembler::notZero, false, Assembler::pt, return_with_exception);
1707 __ delayed()->nop();
1708
1709 // Process the native abi result to java expression stack
1710
1711 __ ld_ptr(STATE(_result._to_call._callee), L4_scratch); // called method
1712 __ ld_ptr(STATE(_stack), L1_scratch); // get top of java expr stack
1713 __ lduh(L4_scratch, in_bytes(methodOopDesc::size_of_parameters_offset()), L2_scratch); // get parameter size
1714 __ sll(L2_scratch, LogBytesPerWord, L2_scratch ); // parameter size in bytes
1715 __ add(L1_scratch, L2_scratch, L1_scratch); // stack destination for result
1716 __ ld_ptr(L4_scratch, in_bytes(methodOopDesc::result_index_offset()), L3_scratch); // called method result type index
1717
1718 // tosca is really just native abi
1719 __ set((intptr_t)CppInterpreter::_tosca_to_stack, L4_scratch);
1720 __ sll(L3_scratch, LogBytesPerWord, L3_scratch);
1721 __ ld_ptr(L4_scratch, L3_scratch, Lscratch); // get typed result converter address
1722 __ jmpl(Lscratch, G0, O7); // and convert it
1723 __ delayed()->nop();
1724
1725 // L1_scratch points to top of stack (prepushed)
1726
1727 __ ba(false, resume_interpreter);
1728 __ delayed()->mov(L1_scratch, O1);
1729
1730 // An exception is being caught on return to a vanilla interpreter frame.
1731 // Empty the stack and resume interpreter
1732
1733 __ bind(return_with_exception);
1734
1735 __ ld_ptr(STATE(_frame_bottom), SP); // restore to full stack frame
1736 __ ld_ptr(STATE(_stack_base), O1); // empty java expression stack
1737 __ ba(false, resume_interpreter);
1738 __ delayed()->sub(O1, wordSize, O1); // account for prepush
1739
1740 // Return from interpreted method we return result appropriate to the caller (i.e. "recursive"
1741 // interpreter call, or native) and unwind this interpreter activation.
1742 // All monitors should be unlocked.
1743
1744 __ bind(return_from_interpreted_method);
1745
1746 VALIDATE_STATE(G3_scratch, 7);
1747
1748 Label return_to_initial_caller;
1749
1750 // Interpreted result is on the top of the completed activation expression stack.
1751 // We must return it to the top of the callers stack if caller was interpreted
1752 // otherwise we convert to native abi result and return to call_stub/c1/c2
1753 // The caller's expression stack was truncated by the call however the current activation
1754 // has enough stuff on the stack that we have usable space there no matter what. The
1755 // other thing that makes it easy is that the top of the caller's stack is stored in STATE(_locals)
1756 // for the current activation
1757
1758 __ ld_ptr(STATE(_prev_link), L1_scratch);
1759 __ ld_ptr(STATE(_method), L2_scratch); // get method just executed
1760 __ ld_ptr(L2_scratch, in_bytes(methodOopDesc::result_index_offset()), L2_scratch);
1761 __ tst(L1_scratch);
1762 __ brx(Assembler::zero, false, Assembler::pt, return_to_initial_caller);
1763 __ delayed()->sll(L2_scratch, LogBytesPerWord, L2_scratch);
1764
1765 // Copy result to callers java stack
1766
1767 __ set((intptr_t)CppInterpreter::_stack_to_stack, L4_scratch);
1768 __ ld_ptr(L4_scratch, L2_scratch, Lscratch); // get typed result converter address
1769 __ ld_ptr(STATE(_stack), O0); // current top (prepushed)
1770 __ ld_ptr(STATE(_locals), O1); // stack destination
1771
1772 // O0 - will be source, O1 - will be destination (preserved)
1773 __ jmpl(Lscratch, G0, O7); // and convert it
1774 __ delayed()->add(O0, wordSize, O0); // get source (top of current expr stack)
1775
1776 // O1 == &locals[0]
1777
1778 // Result is now on caller's stack. Just unwind current activation and resume
1779
1780 Label unwind_recursive_activation;
1781
1782
1783 __ bind(unwind_recursive_activation);
1784
1785 // O1 == &locals[0] (really callers stacktop) for activation now returning
1786 // returning to interpreter method from "recursive" interpreter call
1787 // result converter left O1 pointing to top of the( prepushed) java stack for method we are returning
1788 // to. Now all we must do is unwind the state from the completed call
1789
1790 // Must restore stack
1791 VALIDATE_STATE(G3_scratch, 8);
1792
1793 // Return to interpreter method after a method call (interpreted/native/c1/c2) has completed.
1794 // Result if any is already on the caller's stack. All we must do now is remove the now dead
1795 // frame and tell interpreter to resume.
1796
1797
1798 __ mov(O1, I1); // pass back new stack top across activation
1799 // POP FRAME HERE ==================================
1800 __ restore(FP, G0, SP); // unwind interpreter state frame
1801 __ ld_ptr(STATE(_frame_bottom), SP); // restore to full stack frame
1802
1803
1804 // Resume the interpreter. The current frame contains the current interpreter
1805 // state object.
1806 //
1807 // O1 == new java stack pointer
1808
1809 __ bind(resume_interpreter);
1810 VALIDATE_STATE(G3_scratch, 10);
1811
1812 // A frame we have already used before so no need to bang stack so use call_interpreter_2 entry
1813
1814 __ set((int)BytecodeInterpreter::method_resume, L1_scratch);
1815 __ st(L1_scratch, STATE(_msg));
1816 __ ba(false, call_interpreter_2);
1817 __ delayed()->st_ptr(O1, STATE(_stack));
1818
1819
1820 // Fast accessor methods share this entry point.
1821 // This works because frame manager is in the same codelet
1822 // This can either be an entry via call_stub/c1/c2 or a recursive interpreter call
1823 // we need to do a little register fixup here once we distinguish the two of them
1824 if (UseFastAccessorMethods && !synchronized) {
1825 // Call stub_return address still in O7
1826 __ bind(fast_accessor_slow_entry_path);
1827 __ set((intptr_t)return_from_native_method - 8, Gtmp1);
1828 __ cmp(Gtmp1, O7); // returning to interpreter?
1829 __ brx(Assembler::equal, true, Assembler::pt, re_dispatch); // yep
1830 __ delayed()->nop();
1831 __ ba(false, re_dispatch);
1832 __ delayed()->mov(G0, prevState); // initial entry
1833
1834 }
1835
1836 // interpreter returning to native code (call_stub/c1/c2)
1837 // convert result and unwind initial activation
1838 // L2_scratch - scaled result type index
1839
1840 __ bind(return_to_initial_caller);
1841
1842 __ set((intptr_t)CppInterpreter::_stack_to_native_abi, L4_scratch);
1843 __ ld_ptr(L4_scratch, L2_scratch, Lscratch); // get typed result converter address
1844 __ ld_ptr(STATE(_stack), O0); // current top (prepushed)
1845 __ jmpl(Lscratch, G0, O7); // and convert it
1846 __ delayed()->add(O0, wordSize, O0); // get source (top of current expr stack)
1847
1848 Label unwind_initial_activation;
1849 __ bind(unwind_initial_activation);
1850
1851 // RETURN TO CALL_STUB/C1/C2 code (result if any in I0..I1/(F0/..F1)
1852 // we can return here with an exception that wasn't handled by interpreted code
1853 // how does c1/c2 see it on return?
1854
1855 // compute resulting sp before/after args popped depending upon calling convention
1856 // __ ld_ptr(STATE(_saved_sp), Gtmp1);
1857 //
1858 // POP FRAME HERE ==================================
1859 __ restore(FP, G0, SP);
1860 __ retl();
1861 __ delayed()->mov(I5_savedSP->after_restore(), SP);
1862
1863 // OSR request, unwind the current frame and transfer to the OSR entry
1864 // and enter OSR nmethod
1865
1866 __ bind(do_OSR);
1867 Label remove_initial_frame;
1868 __ ld_ptr(STATE(_prev_link), L1_scratch);
1869 __ ld_ptr(STATE(_result._osr._osr_buf), G1_scratch);
1870
1871 // We are going to pop this frame. Is there another interpreter frame underneath
1872 // it or is it callstub/compiled?
1873
1874 __ tst(L1_scratch);
1875 __ brx(Assembler::zero, false, Assembler::pt, remove_initial_frame);
1876 __ delayed()->ld_ptr(STATE(_result._osr._osr_entry), G3_scratch);
1877
1878 // Frame underneath is an interpreter frame simply unwind
1879 // POP FRAME HERE ==================================
1880 __ restore(FP, G0, SP); // unwind interpreter state frame
1881 __ mov(I5_savedSP->after_restore(), SP);
1882
1883 // Since we are now calling native need to change our "return address" from the
1884 // dummy RecursiveInterpreterActivation to a return from native
1885
1886 __ set((intptr_t)return_from_native_method - 8, O7);
1887
1888 __ jmpl(G3_scratch, G0, G0);
1889 __ delayed()->mov(G1_scratch, O0);
1890
1891 __ bind(remove_initial_frame);
1892
1893 // POP FRAME HERE ==================================
1894 __ restore(FP, G0, SP);
1895 __ mov(I5_savedSP->after_restore(), SP);
1896 __ jmpl(G3_scratch, G0, G0);
1897 __ delayed()->mov(G1_scratch, O0);
1898
1899 // Call a new method. All we do is (temporarily) trim the expression stack
1900 // push a return address to bring us back to here and leap to the new entry.
1901 // At this point we have a topmost frame that was allocated by the frame manager
1902 // which contains the current method interpreted state. We trim this frame
1903 // of excess java expression stack entries and then recurse.
1904
1905 __ bind(call_method);
1906
1907 // stack points to next free location and not top element on expression stack
1908 // method expects sp to be pointing to topmost element
1909
1910 __ ld_ptr(STATE(_thread), G2_thread);
1911 __ ld_ptr(STATE(_result._to_call._callee), G5_method);
1912
1913
1914 // SP already takes in to account the 2 extra words we use for slop
1915 // when we call a "static long no_params()" method. So if
1916 // we trim back sp by the amount of unused java expression stack
1917 // there will be automagically the 2 extra words we need.
1918 // We also have to worry about keeping SP aligned.
1919
1920 __ ld_ptr(STATE(_stack), Gargs);
1921 __ ld_ptr(STATE(_stack_limit), L1_scratch);
1922
1923 // compute the unused java stack size
1924 __ sub(Gargs, L1_scratch, L2_scratch); // compute unused space
1925
1926 // Round down the unused space to that stack is always aligned
1927 // by making the unused space a multiple of the size of a long.
1928
1929 __ and3(L2_scratch, -BytesPerLong, L2_scratch);
1930
1931 // Now trim the stack
1932 __ add(SP, L2_scratch, SP);
1933
1934
1935 // Now point to the final argument (account for prepush)
1936 __ add(Gargs, wordSize, Gargs);
1937 #ifdef ASSERT
1938 // Make sure we have space for the window
1939 __ sub(Gargs, SP, L1_scratch);
1940 __ cmp(L1_scratch, 16*wordSize);
1941 {
1942 Label skip;
1943 __ brx(Assembler::greaterEqual, false, Assembler::pt, skip);
1944 __ delayed()->nop();
1945 __ stop("killed stack");
1946 __ bind(skip);
1947 }
1948 #endif // ASSERT
1949
1950 // Create a new frame where we can store values that make it look like the interpreter
1951 // really recursed.
1952
1953 // prepare to recurse or call specialized entry
1954
1955 // First link the registers we need
1956
1957 // make the pc look good in debugger
1958 __ set(CAST_FROM_FN_PTR(intptr_t, RecursiveInterpreterActivation), O7);
1959 // argument too
1960 __ mov(Lstate, I0);
1961
1962 // Record our sending SP
1963 __ mov(SP, O5_savedSP);
1964
1965 __ ld_ptr(STATE(_result._to_call._callee_entry_point), L2_scratch);
1966 __ set((intptr_t) entry_point, L1_scratch);
1967 __ cmp(L1_scratch, L2_scratch);
1968 __ brx(Assembler::equal, false, Assembler::pt, re_dispatch);
1969 __ delayed()->mov(Lstate, prevState); // link activations
1970
1971 // method uses specialized entry, push a return so we look like call stub setup
1972 // this path will handle fact that result is returned in registers and not
1973 // on the java stack.
1974
1975 __ set((intptr_t)return_from_native_method - 8, O7);
1976 __ jmpl(L2_scratch, G0, G0); // Do specialized entry
1977 __ delayed()->nop();
1978
1979 //
1980 // Bad Message from interpreter
1981 //
1982 __ bind(bad_msg);
1983 __ stop("Bad message from interpreter");
1984
1985 // Interpreted method "returned" with an exception pass it on...
1986 // Pass result, unwind activation and continue/return to interpreter/call_stub
1987 // We handle result (if any) differently based on return to interpreter or call_stub
1988
1989 __ bind(throw_exception);
1990 __ ld_ptr(STATE(_prev_link), L1_scratch);
1991 __ tst(L1_scratch);
1992 __ brx(Assembler::zero, false, Assembler::pt, unwind_and_forward);
1993 __ delayed()->nop();
1994
1995 __ ld_ptr(STATE(_locals), O1); // get result of popping callee's args
1996 __ ba(false, unwind_recursive_activation);
1997 __ delayed()->nop();
1998
1999 interpreter_frame_manager = entry_point;
2000 return entry_point;
2001 }
2002
2003 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
2004 : CppInterpreterGenerator(code) {
2005 generate_all(); // down here so it can be "virtual"
2006 }
2007
2008
2009 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
2010
2011 // Figure out the size of an interpreter frame (in words) given that we have a fully allocated
2012 // expression stack, the callee will have callee_extra_locals (so we can account for
2013 // frame extension) and monitor_size for monitors. Basically we need to calculate
2014 // this exactly like generate_fixed_frame/generate_compute_interpreter_state.
2015 //
2016 //
2017 // The big complicating thing here is that we must ensure that the stack stays properly
2018 // aligned. This would be even uglier if monitor size wasn't modulo what the stack
2019 // needs to be aligned for). We are given that the sp (fp) is already aligned by
2020 // the caller so we must ensure that it is properly aligned for our callee.
2021 //
2022 // Ths c++ interpreter always makes sure that we have a enough extra space on the
2023 // stack at all times to deal with the "stack long no_params()" method issue. This
2024 // is "slop_factor" here.
2025 const int slop_factor = 2;
2026
2027 const int fixed_size = sizeof(BytecodeInterpreter)/wordSize + // interpreter state object
2028 frame::memory_parameter_word_sp_offset; // register save area + param window
2029 return (round_to(max_stack +
2030 slop_factor +
2031 fixed_size +
2032 monitor_size +
2033 (callee_extra_locals * Interpreter::stackElementWords()), WordsPerLong));
2034
2035 }
2036
2037 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
2038
2039 // See call_stub code
2040 int call_stub_size = round_to(7 + frame::memory_parameter_word_sp_offset,
2041 WordsPerLong); // 7 + register save area
2042
2043 // Save space for one monitor to get into the interpreted method in case
2044 // the method is synchronized
2045 int monitor_size = method->is_synchronized() ?
2046 1*frame::interpreter_frame_monitor_size() : 0;
2047 return size_activation_helper(method->max_locals(), method->max_stack(),
2048 monitor_size) + call_stub_size;
2049 }
2050
2051 void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
2052 frame* caller,
2053 frame* current,
2054 methodOop method,
2055 intptr_t* locals,
2056 intptr_t* stack,
2057 intptr_t* stack_base,
2058 intptr_t* monitor_base,
2059 intptr_t* frame_bottom,
2060 bool is_top_frame
2061 )
2062 {
2063 // What about any vtable?
2064 //
2065 to_fill->_thread = JavaThread::current();
2066 // This gets filled in later but make it something recognizable for now
2067 to_fill->_bcp = method->code_base();
2068 to_fill->_locals = locals;
2069 to_fill->_constants = method->constants()->cache();
2070 to_fill->_method = method;
2071 to_fill->_mdx = NULL;
2072 to_fill->_stack = stack;
2073 if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution() ) {
2074 to_fill->_msg = deopt_resume2;
2075 } else {
2076 to_fill->_msg = method_resume;
2077 }
2078 to_fill->_result._to_call._bcp_advance = 0;
2079 to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone
2080 to_fill->_result._to_call._callee = NULL; // doesn't matter to anyone
2081 to_fill->_prev_link = NULL;
2082
2083 // Fill in the registers for the frame
2084
2085 // Need to install _sender_sp. Actually not too hard in C++!
2086 // When the skeletal frames are layed out we fill in a value
2087 // for _sender_sp. That value is only correct for the oldest
2088 // skeletal frame constructed (because there is only a single
2089 // entry for "caller_adjustment". While the skeletal frames
2090 // exist that is good enough. We correct that calculation
2091 // here and get all the frames correct.
2092
2093 // to_fill->_sender_sp = locals - (method->size_of_parameters() - 1);
2094
2095 *current->register_addr(Lstate) = (intptr_t) to_fill;
2096 // skeletal already places a useful value here and this doesn't account
2097 // for alignment so don't bother.
2098 // *current->register_addr(I5_savedSP) = (intptr_t) locals - (method->size_of_parameters() - 1);
2099
2100 if (caller->is_interpreted_frame()) {
2101 interpreterState prev = caller->get_interpreterState();
2102 to_fill->_prev_link = prev;
2103 // Make the prev callee look proper
2104 prev->_result._to_call._callee = method;
2105 if (*prev->_bcp == Bytecodes::_invokeinterface) {
2106 prev->_result._to_call._bcp_advance = 5;
2107 } else {
2108 prev->_result._to_call._bcp_advance = 3;
2109 }
2110 }
2111 to_fill->_oop_temp = NULL;
2112 to_fill->_stack_base = stack_base;
2113 // Need +1 here because stack_base points to the word just above the first expr stack entry
2114 // and stack_limit is supposed to point to the word just below the last expr stack entry.
2115 // See generate_compute_interpreter_state.
2116 to_fill->_stack_limit = stack_base - (method->max_stack() + 1);
2117 to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
2118
2119 // sparc specific
2120 to_fill->_frame_bottom = frame_bottom;
2121 to_fill->_self_link = to_fill;
2122 #ifdef ASSERT
2123 to_fill->_native_fresult = 123456.789;
2124 to_fill->_native_lresult = CONST64(0xdeadcafedeafcafe);
2125 #endif
2126 }
2127
2128 void BytecodeInterpreter::pd_layout_interpreterState(interpreterState istate, address last_Java_pc, intptr_t* last_Java_fp) {
2129 istate->_last_Java_pc = (intptr_t*) last_Java_pc;
2130 }
2131
2132
2133 int AbstractInterpreter::layout_activation(methodOop method,
2134 int tempcount, // Number of slots on java expression stack in use
2135 int popframe_extra_args,
2136 int moncount, // Number of active monitors
2137 int callee_param_size,
2138 int callee_locals_size,
2139 frame* caller,
2140 frame* interpreter_frame,
2141 bool is_top_frame) {
2142
2143 assert(popframe_extra_args == 0, "NEED TO FIX");
2144 // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
2145 // does as far as allocating an interpreter frame.
2146 // If interpreter_frame!=NULL, set up the method, locals, and monitors.
2147 // The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
2148 // as determined by a previous call to this method.
2149 // It is also guaranteed to be walkable even though it is in a skeletal state
2150 // NOTE: return size is in words not bytes
2151 // NOTE: tempcount is the current size of the java expression stack. For top most
2152 // frames we will allocate a full sized expression stack and not the curback
2153 // version that non-top frames have.
2154
2155 // Calculate the amount our frame will be adjust by the callee. For top frame
2156 // this is zero.
2157
2158 // NOTE: ia64 seems to do this wrong (or at least backwards) in that it
2159 // calculates the extra locals based on itself. Not what the callee does
2160 // to it. So it ignores last_frame_adjust value. Seems suspicious as far
2161 // as getting sender_sp correct.
2162
2163 int extra_locals_size = callee_locals_size - callee_param_size;
2164 int monitor_size = (sizeof(BasicObjectLock) * moncount) / wordSize;
2165 int full_frame_words = size_activation_helper(extra_locals_size, method->max_stack(), monitor_size);
2166 int short_frame_words = size_activation_helper(extra_locals_size, method->max_stack(), monitor_size);
2167 int frame_words = is_top_frame ? full_frame_words : short_frame_words;
2168
2169
2170 /*
2171 if we actually have a frame to layout we must now fill in all the pieces. This means both
2172 the interpreterState and the registers.
2173 */
2174 if (interpreter_frame != NULL) {
2175
2176 // MUCHO HACK
2177
2178 intptr_t* frame_bottom = interpreter_frame->sp() - (full_frame_words - frame_words);
2179
2180 /* Now fillin the interpreterState object */
2181
2182 interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter));
2183
2184
2185 intptr_t* locals;
2186
2187 // Calculate the postion of locals[0]. This is painful because of
2188 // stack alignment (same as ia64). The problem is that we can
2189 // not compute the location of locals from fp(). fp() will account
2190 // for the extra locals but it also accounts for aligning the stack
2191 // and we can't determine if the locals[0] was misaligned but max_locals
2192 // was enough to have the
2193 // calculate postion of locals. fp already accounts for extra locals.
2194 // +2 for the static long no_params() issue.
2195
2196 if (caller->is_interpreted_frame()) {
2197 // locals must agree with the caller because it will be used to set the
2198 // caller's tos when we return.
2199 interpreterState prev = caller->get_interpreterState();
2200 // stack() is prepushed.
2201 locals = prev->stack() + method->size_of_parameters();
2202 } else {
2203 // Lay out locals block in the caller adjacent to the register window save area.
2204 //
2205 // Compiled frames do not allocate a varargs area which is why this if
2206 // statement is needed.
2207 //
2208 intptr_t* fp = interpreter_frame->fp();
2209 int local_words = method->max_locals() * Interpreter::stackElementWords();
2210
2211 if (caller->is_compiled_frame()) {
2212 locals = fp + frame::register_save_words + local_words - 1;
2213 } else {
2214 locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
2215 }
2216
2217 }
2218 // END MUCHO HACK
2219
2220 intptr_t* monitor_base = (intptr_t*) cur_state;
2221 intptr_t* stack_base = monitor_base - monitor_size;
2222 /* +1 because stack is always prepushed */
2223 intptr_t* stack = stack_base - (tempcount + 1);
2224
2225
2226 BytecodeInterpreter::layout_interpreterState(cur_state,
2227 caller,
2228 interpreter_frame,
2229 method,
2230 locals,
2231 stack,
2232 stack_base,
2233 monitor_base,
2234 frame_bottom,
2235 is_top_frame);
2236
2237 BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
2238
2239 }
2240 return frame_words;
2241 }
2242
2243 #endif // CC_INTERP