Mercurial > hg > truffle
annotate src/cpu/sparc/vm/cppInterpreter_sparc.cpp @ 1661:01b172b8cd7c
Merge
author | never |
---|---|
date | Fri, 16 Jul 2010 08:29:42 -0700 |
parents | c18cbe5936b8 |
children | f95d63e2154a |
rev | line source |
---|---|
0 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
710
diff
changeset
|
2 * Copyright (c) 2007, 2009, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
710
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
710
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
710
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_cppInterpreter_sparc.cpp.incl" | |
27 | |
28 #ifdef CC_INTERP | |
29 | |
30 // Routine exists to make tracebacks look decent in debugger | |
31 // while "shadow" interpreter frames are on stack. It is also | |
32 // used to distinguish interpreter frames. | |
33 | |
34 extern "C" void RecursiveInterpreterActivation(interpreterState istate) { | |
35 ShouldNotReachHere(); | |
36 } | |
37 | |
38 bool CppInterpreter::contains(address pc) { | |
39 return ( _code->contains(pc) || | |
40 ( pc == (CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) + frame::pc_return_offset))); | |
41 } | |
42 | |
43 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name)) | |
44 #define __ _masm-> | |
45 | |
46 Label frame_manager_entry; | |
47 Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized | |
48 // c++ interpreter entry point this holds that entry point label. | |
49 | |
50 static address unctrap_frame_manager_entry = NULL; | |
51 | |
52 static address interpreter_return_address = NULL; | |
53 static address deopt_frame_manager_return_atos = NULL; | |
54 static address deopt_frame_manager_return_btos = NULL; | |
55 static address deopt_frame_manager_return_itos = NULL; | |
56 static address deopt_frame_manager_return_ltos = NULL; | |
57 static address deopt_frame_manager_return_ftos = NULL; | |
58 static address deopt_frame_manager_return_dtos = NULL; | |
59 static address deopt_frame_manager_return_vtos = NULL; | |
60 | |
61 const Register prevState = G1_scratch; | |
62 | |
63 void InterpreterGenerator::save_native_result(void) { | |
64 // result potentially in O0/O1: save it across calls | |
65 __ stf(FloatRegisterImpl::D, F0, STATE(_native_fresult)); | |
66 #ifdef _LP64 | |
67 __ stx(O0, STATE(_native_lresult)); | |
68 #else | |
69 __ std(O0, STATE(_native_lresult)); | |
70 #endif | |
71 } | |
72 | |
73 void InterpreterGenerator::restore_native_result(void) { | |
74 | |
75 // Restore any method result value | |
76 __ ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0); | |
77 #ifdef _LP64 | |
78 __ ldx(STATE(_native_lresult), O0); | |
79 #else | |
80 __ ldd(STATE(_native_lresult), O0); | |
81 #endif | |
82 } | |
83 | |
84 // A result handler converts/unboxes a native call result into | |
85 // a java interpreter/compiler result. The current frame is an | |
86 // interpreter frame. The activation frame unwind code must be | |
87 // consistent with that of TemplateTable::_return(...). In the | |
88 // case of native methods, the caller's SP was not modified. | |
89 address CppInterpreterGenerator::generate_result_handler_for(BasicType type) { | |
90 address entry = __ pc(); | |
91 Register Itos_i = Otos_i ->after_save(); | |
92 Register Itos_l = Otos_l ->after_save(); | |
93 Register Itos_l1 = Otos_l1->after_save(); | |
94 Register Itos_l2 = Otos_l2->after_save(); | |
95 switch (type) { | |
96 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false | |
97 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value! | |
98 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break; | |
99 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break; | |
100 case T_LONG : | |
101 #ifndef _LP64 | |
102 __ mov(O1, Itos_l2); // move other half of long | |
103 #endif // ifdef or no ifdef, fall through to the T_INT case | |
104 case T_INT : __ mov(O0, Itos_i); break; | |
105 case T_VOID : /* nothing to do */ break; | |
106 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break; | |
107 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break; | |
108 case T_OBJECT : | |
109 __ ld_ptr(STATE(_oop_temp), Itos_i); | |
110 __ verify_oop(Itos_i); | |
111 break; | |
112 default : ShouldNotReachHere(); | |
113 } | |
114 __ ret(); // return from interpreter activation | |
115 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame | |
116 NOT_PRODUCT(__ emit_long(0);) // marker for disassembly | |
117 return entry; | |
118 } | |
119 | |
120 // tosca based result to c++ interpreter stack based result. | |
121 // Result goes to address in L1_scratch | |
122 | |
123 address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) { | |
124 // A result is in the native abi result register from a native method call. | |
125 // We need to return this result to the interpreter by pushing the result on the interpreter's | |
126 // stack. This is relatively simple the destination is in L1_scratch | |
127 // i.e. L1_scratch is the first free element on the stack. If we "push" a return value we must | |
128 // adjust L1_scratch | |
129 address entry = __ pc(); | |
130 switch (type) { | |
131 case T_BOOLEAN: | |
132 // !0 => true; 0 => false | |
133 __ subcc(G0, O0, G0); | |
134 __ addc(G0, 0, O0); | |
135 __ st(O0, L1_scratch, 0); | |
136 __ sub(L1_scratch, wordSize, L1_scratch); | |
137 break; | |
138 | |
139 // cannot use and3, 0xFFFF too big as immediate value! | |
140 case T_CHAR : | |
141 __ sll(O0, 16, O0); | |
142 __ srl(O0, 16, O0); | |
143 __ st(O0, L1_scratch, 0); | |
144 __ sub(L1_scratch, wordSize, L1_scratch); | |
145 break; | |
146 | |
147 case T_BYTE : | |
148 __ sll(O0, 24, O0); | |
149 __ sra(O0, 24, O0); | |
150 __ st(O0, L1_scratch, 0); | |
151 __ sub(L1_scratch, wordSize, L1_scratch); | |
152 break; | |
153 | |
154 case T_SHORT : | |
155 __ sll(O0, 16, O0); | |
156 __ sra(O0, 16, O0); | |
157 __ st(O0, L1_scratch, 0); | |
158 __ sub(L1_scratch, wordSize, L1_scratch); | |
159 break; | |
160 case T_LONG : | |
161 #ifndef _LP64 | |
123 | 162 #if defined(COMPILER2) |
0 | 163 // All return values are where we want them, except for Longs. C2 returns |
164 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. | |
165 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit | |
166 // build even if we are returning from interpreted we just do a little | |
167 // stupid shuffing. | |
168 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to | |
169 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node | |
170 // first which would move g1 -> O0/O1 and destroy the exception we were throwing. | |
171 __ stx(G1, L1_scratch, -wordSize); | |
172 #else | |
173 // native result is in O0, O1 | |
174 __ st(O1, L1_scratch, 0); // Low order | |
175 __ st(O0, L1_scratch, -wordSize); // High order | |
123 | 176 #endif /* COMPILER2 */ |
0 | 177 #else |
123 | 178 __ stx(O0, L1_scratch, -wordSize); |
0 | 179 #endif |
180 __ sub(L1_scratch, 2*wordSize, L1_scratch); | |
181 break; | |
182 | |
183 case T_INT : | |
184 __ st(O0, L1_scratch, 0); | |
185 __ sub(L1_scratch, wordSize, L1_scratch); | |
186 break; | |
187 | |
188 case T_VOID : /* nothing to do */ | |
189 break; | |
190 | |
191 case T_FLOAT : | |
192 __ stf(FloatRegisterImpl::S, F0, L1_scratch, 0); | |
193 __ sub(L1_scratch, wordSize, L1_scratch); | |
194 break; | |
195 | |
196 case T_DOUBLE : | |
197 // Every stack slot is aligned on 64 bit, However is this | |
198 // the correct stack slot on 64bit?? QQQ | |
199 __ stf(FloatRegisterImpl::D, F0, L1_scratch, -wordSize); | |
200 __ sub(L1_scratch, 2*wordSize, L1_scratch); | |
201 break; | |
202 case T_OBJECT : | |
203 __ verify_oop(O0); | |
204 __ st_ptr(O0, L1_scratch, 0); | |
205 __ sub(L1_scratch, wordSize, L1_scratch); | |
206 break; | |
207 default : ShouldNotReachHere(); | |
208 } | |
209 __ retl(); // return from interpreter activation | |
210 __ delayed()->nop(); // schedule this better | |
211 NOT_PRODUCT(__ emit_long(0);) // marker for disassembly | |
212 return entry; | |
213 } | |
214 | |
215 address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) { | |
216 // A result is in the java expression stack of the interpreted method that has just | |
217 // returned. Place this result on the java expression stack of the caller. | |
218 // | |
219 // The current interpreter activation in Lstate is for the method just returning its | |
220 // result. So we know that the result of this method is on the top of the current | |
221 // execution stack (which is pre-pushed) and will be return to the top of the caller | |
222 // stack. The top of the callers stack is the bottom of the locals of the current | |
223 // activation. | |
224 // Because of the way activation are managed by the frame manager the value of esp is | |
225 // below both the stack top of the current activation and naturally the stack top | |
226 // of the calling activation. This enable this routine to leave the return address | |
227 // to the frame manager on the stack and do a vanilla return. | |
228 // | |
229 // On entry: O0 - points to source (callee stack top) | |
230 // O1 - points to destination (caller stack top [i.e. free location]) | |
231 // destroys O2, O3 | |
232 // | |
233 | |
234 address entry = __ pc(); | |
235 switch (type) { | |
236 case T_VOID: break; | |
237 break; | |
238 case T_FLOAT : | |
239 case T_BOOLEAN: | |
240 case T_CHAR : | |
241 case T_BYTE : | |
242 case T_SHORT : | |
243 case T_INT : | |
244 // 1 word result | |
245 __ ld(O0, 0, O2); | |
246 __ st(O2, O1, 0); | |
247 __ sub(O1, wordSize, O1); | |
248 break; | |
249 case T_DOUBLE : | |
250 case T_LONG : | |
251 // return top two words on current expression stack to caller's expression stack | |
252 // The caller's expression stack is adjacent to the current frame manager's intepretState | |
253 // except we allocated one extra word for this intepretState so we won't overwrite it | |
254 // when we return a two word result. | |
255 #ifdef _LP64 | |
256 __ ld_ptr(O0, 0, O2); | |
257 __ st_ptr(O2, O1, -wordSize); | |
258 #else | |
259 __ ld(O0, 0, O2); | |
260 __ ld(O0, wordSize, O3); | |
261 __ st(O3, O1, 0); | |
262 __ st(O2, O1, -wordSize); | |
263 #endif | |
264 __ sub(O1, 2*wordSize, O1); | |
265 break; | |
266 case T_OBJECT : | |
267 __ ld_ptr(O0, 0, O2); | |
268 __ verify_oop(O2); // verify it | |
269 __ st_ptr(O2, O1, 0); | |
270 __ sub(O1, wordSize, O1); | |
271 break; | |
272 default : ShouldNotReachHere(); | |
273 } | |
274 __ retl(); | |
275 __ delayed()->nop(); // QQ schedule this better | |
276 return entry; | |
277 } | |
278 | |
279 address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) { | |
280 // A result is in the java expression stack of the interpreted method that has just | |
281 // returned. Place this result in the native abi that the caller expects. | |
282 // We are in a new frame registers we set must be in caller (i.e. callstub) frame. | |
283 // | |
284 // Similar to generate_stack_to_stack_converter above. Called at a similar time from the | |
285 // frame manager execept in this situation the caller is native code (c1/c2/call_stub) | |
286 // and so rather than return result onto caller's java expression stack we return the | |
287 // result in the expected location based on the native abi. | |
288 // On entry: O0 - source (stack top) | |
289 // On exit result in expected output register | |
290 // QQQ schedule this better | |
291 | |
292 address entry = __ pc(); | |
293 switch (type) { | |
294 case T_VOID: break; | |
295 break; | |
296 case T_FLOAT : | |
297 __ ldf(FloatRegisterImpl::S, O0, 0, F0); | |
298 break; | |
299 case T_BOOLEAN: | |
300 case T_CHAR : | |
301 case T_BYTE : | |
302 case T_SHORT : | |
303 case T_INT : | |
304 // 1 word result | |
305 __ ld(O0, 0, O0->after_save()); | |
306 break; | |
307 case T_DOUBLE : | |
308 __ ldf(FloatRegisterImpl::D, O0, 0, F0); | |
309 break; | |
310 case T_LONG : | |
311 // return top two words on current expression stack to caller's expression stack | |
312 // The caller's expression stack is adjacent to the current frame manager's interpretState | |
313 // except we allocated one extra word for this intepretState so we won't overwrite it | |
314 // when we return a two word result. | |
315 #ifdef _LP64 | |
316 __ ld_ptr(O0, 0, O0->after_save()); | |
317 #else | |
318 __ ld(O0, wordSize, O1->after_save()); | |
319 __ ld(O0, 0, O0->after_save()); | |
320 #endif | |
321 #if defined(COMPILER2) && !defined(_LP64) | |
322 // C2 expects long results in G1 we can't tell if we're returning to interpreted | |
323 // or compiled so just be safe use G1 and O0/O1 | |
324 | |
325 // Shift bits into high (msb) of G1 | |
326 __ sllx(Otos_l1->after_save(), 32, G1); | |
327 // Zero extend low bits | |
328 __ srl (Otos_l2->after_save(), 0, Otos_l2->after_save()); | |
329 __ or3 (Otos_l2->after_save(), G1, G1); | |
330 #endif /* COMPILER2 */ | |
331 break; | |
332 case T_OBJECT : | |
333 __ ld_ptr(O0, 0, O0->after_save()); | |
334 __ verify_oop(O0->after_save()); // verify it | |
335 break; | |
336 default : ShouldNotReachHere(); | |
337 } | |
338 __ retl(); | |
339 __ delayed()->nop(); | |
340 return entry; | |
341 } | |
342 | |
343 address CppInterpreter::return_entry(TosState state, int length) { | |
344 // make it look good in the debugger | |
345 return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) + frame::pc_return_offset; | |
346 } | |
347 | |
348 address CppInterpreter::deopt_entry(TosState state, int length) { | |
349 address ret = NULL; | |
350 if (length != 0) { | |
351 switch (state) { | |
352 case atos: ret = deopt_frame_manager_return_atos; break; | |
353 case btos: ret = deopt_frame_manager_return_btos; break; | |
354 case ctos: | |
355 case stos: | |
356 case itos: ret = deopt_frame_manager_return_itos; break; | |
357 case ltos: ret = deopt_frame_manager_return_ltos; break; | |
358 case ftos: ret = deopt_frame_manager_return_ftos; break; | |
359 case dtos: ret = deopt_frame_manager_return_dtos; break; | |
360 case vtos: ret = deopt_frame_manager_return_vtos; break; | |
361 } | |
362 } else { | |
363 ret = unctrap_frame_manager_entry; // re-execute the bytecode ( e.g. uncommon trap) | |
364 } | |
365 assert(ret != NULL, "Not initialized"); | |
366 return ret; | |
367 } | |
368 | |
369 // | |
370 // Helpers for commoning out cases in the various type of method entries. | |
371 // | |
372 | |
373 // increment invocation count & check for overflow | |
374 // | |
375 // Note: checking for negative value instead of overflow | |
376 // so we have a 'sticky' overflow test | |
377 // | |
378 // Lmethod: method | |
379 // ??: invocation counter | |
380 // | |
381 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { | |
382 // Update standard invocation counters | |
383 __ increment_invocation_counter(O0, G3_scratch); | |
384 if (ProfileInterpreter) { // %%% Merge this into methodDataOop | |
385 __ ld_ptr(STATE(_method), G3_scratch); | |
386 Address interpreter_invocation_counter(G3_scratch, 0, in_bytes(methodOopDesc::interpreter_invocation_counter_offset())); | |
387 __ ld(interpreter_invocation_counter, G3_scratch); | |
388 __ inc(G3_scratch); | |
389 __ st(G3_scratch, interpreter_invocation_counter); | |
390 } | |
391 | |
392 Address invocation_limit(G3_scratch, (address)&InvocationCounter::InterpreterInvocationLimit); | |
393 __ sethi(invocation_limit); | |
394 __ ld(invocation_limit, G3_scratch); | |
395 __ cmp(O0, G3_scratch); | |
396 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); | |
397 __ delayed()->nop(); | |
398 | |
399 } | |
400 | |
401 address InterpreterGenerator::generate_empty_entry(void) { | |
402 | |
403 // A method that does nothing but return... | |
404 | |
405 address entry = __ pc(); | |
406 Label slow_path; | |
407 | |
408 __ verify_oop(G5_method); | |
409 | |
410 // do nothing for empty methods (do not even increment invocation counter) | |
411 if ( UseFastEmptyMethods) { | |
412 // If we need a safepoint check, generate full interpreter entry. | |
413 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); | |
414 __ load_contents(sync_state, G3_scratch); | |
415 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); | |
416 __ br(Assembler::notEqual, false, Assembler::pn, frame_manager_entry); | |
417 __ delayed()->nop(); | |
418 | |
419 // Code: _return | |
420 __ retl(); | |
421 __ delayed()->mov(O5_savedSP, SP); | |
422 return entry; | |
423 } | |
424 return NULL; | |
425 } | |
426 | |
427 // Call an accessor method (assuming it is resolved, otherwise drop into | |
428 // vanilla (slow path) entry | |
429 | |
430 // Generates code to elide accessor methods | |
431 // Uses G3_scratch and G1_scratch as scratch | |
432 address InterpreterGenerator::generate_accessor_entry(void) { | |
433 | |
434 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; | |
435 // parameter size = 1 | |
436 // Note: We can only use this code if the getfield has been resolved | |
437 // and if we don't have a null-pointer exception => check for | |
438 // these conditions first and use slow path if necessary. | |
439 address entry = __ pc(); | |
440 Label slow_path; | |
441 | |
442 if ( UseFastAccessorMethods) { | |
443 // Check if we need to reach a safepoint and generate full interpreter | |
444 // frame if so. | |
445 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); | |
446 __ load_contents(sync_state, G3_scratch); | |
447 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); | |
448 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); | |
449 __ delayed()->nop(); | |
450 | |
451 // Check if local 0 != NULL | |
452 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 | |
453 __ tst(Otos_i); // check if local 0 == NULL and go the slow path | |
454 __ brx(Assembler::zero, false, Assembler::pn, slow_path); | |
455 __ delayed()->nop(); | |
456 | |
457 | |
458 // read first instruction word and extract bytecode @ 1 and index @ 2 | |
459 // get first 4 bytes of the bytecodes (big endian!) | |
460 __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::const_offset())), G1_scratch); | |
461 __ ld(Address(G1_scratch, 0, in_bytes(constMethodOopDesc::codes_offset())), G1_scratch); | |
462 | |
463 // move index @ 2 far left then to the right most two bytes. | |
464 __ sll(G1_scratch, 2*BitsPerByte, G1_scratch); | |
465 __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words( | |
466 ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch); | |
467 | |
468 // get constant pool cache | |
469 __ ld_ptr(G5_method, in_bytes(methodOopDesc::constants_offset()), G3_scratch); | |
470 __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch); | |
471 | |
472 // get specific constant pool cache entry | |
473 __ add(G3_scratch, G1_scratch, G3_scratch); | |
474 | |
475 // Check the constant Pool cache entry to see if it has been resolved. | |
476 // If not, need the slow path. | |
477 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); | |
478 __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch); | |
479 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch); | |
480 __ and3(G1_scratch, 0xFF, G1_scratch); | |
481 __ cmp(G1_scratch, Bytecodes::_getfield); | |
482 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); | |
483 __ delayed()->nop(); | |
484 | |
485 // Get the type and return field offset from the constant pool cache | |
486 __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch); | |
487 __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch); | |
488 | |
489 Label xreturn_path; | |
490 // Need to differentiate between igetfield, agetfield, bgetfield etc. | |
491 // because they are different sizes. | |
492 // Get the type from the constant pool cache | |
493 __ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch); | |
494 // Make sure we don't need to mask G1_scratch for tosBits after the above shift | |
495 ConstantPoolCacheEntry::verify_tosBits(); | |
496 __ cmp(G1_scratch, atos ); | |
497 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); | |
498 __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i); | |
499 __ cmp(G1_scratch, itos); | |
500 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); | |
501 __ delayed()->ld(Otos_i, G3_scratch, Otos_i); | |
502 __ cmp(G1_scratch, stos); | |
503 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); | |
504 __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i); | |
505 __ cmp(G1_scratch, ctos); | |
506 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); | |
507 __ delayed()->lduh(Otos_i, G3_scratch, Otos_i); | |
508 #ifdef ASSERT | |
509 __ cmp(G1_scratch, btos); | |
510 __ br(Assembler::equal, true, Assembler::pt, xreturn_path); | |
511 __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i); | |
512 __ should_not_reach_here(); | |
513 #endif | |
514 __ ldsb(Otos_i, G3_scratch, Otos_i); | |
515 __ bind(xreturn_path); | |
516 | |
517 // _ireturn/_areturn | |
518 __ retl(); // return from leaf routine | |
519 __ delayed()->mov(O5_savedSP, SP); | |
520 | |
521 // Generate regular method entry | |
522 __ bind(slow_path); | |
523 __ ba(false, fast_accessor_slow_entry_path); | |
524 __ delayed()->nop(); | |
525 return entry; | |
526 } | |
527 return NULL; | |
528 } | |
529 | |
530 // | |
531 // Interpreter stub for calling a native method. (C++ interpreter) | |
532 // This sets up a somewhat different looking stack for calling the native method | |
533 // than the typical interpreter frame setup. | |
534 // | |
535 | |
536 address InterpreterGenerator::generate_native_entry(bool synchronized) { | |
537 address entry = __ pc(); | |
538 | |
539 // the following temporary registers are used during frame creation | |
540 const Register Gtmp1 = G3_scratch ; | |
541 const Register Gtmp2 = G1_scratch; | |
542 const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())); | |
543 | |
544 bool inc_counter = UseCompiler || CountCompiledCalls; | |
545 | |
546 // make sure registers are different! | |
547 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); | |
548 | |
549 const Address access_flags (G5_method, 0, in_bytes(methodOopDesc::access_flags_offset())); | |
550 | |
551 Label Lentry; | |
552 __ bind(Lentry); | |
553 | |
554 __ verify_oop(G5_method); | |
555 | |
556 const Register Glocals_size = G3; | |
557 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); | |
558 | |
559 // make sure method is native & not abstract | |
560 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) | |
561 #ifdef ASSERT | |
562 __ ld(access_flags, Gtmp1); | |
563 { | |
564 Label L; | |
565 __ btst(JVM_ACC_NATIVE, Gtmp1); | |
566 __ br(Assembler::notZero, false, Assembler::pt, L); | |
567 __ delayed()->nop(); | |
568 __ stop("tried to execute non-native method as native"); | |
569 __ bind(L); | |
570 } | |
571 { Label L; | |
572 __ btst(JVM_ACC_ABSTRACT, Gtmp1); | |
573 __ br(Assembler::zero, false, Assembler::pt, L); | |
574 __ delayed()->nop(); | |
575 __ stop("tried to execute abstract method as non-abstract"); | |
576 __ bind(L); | |
577 } | |
578 #endif // ASSERT | |
579 | |
580 __ lduh(size_of_parameters, Gtmp1); | |
581 __ sll(Gtmp1, LogBytesPerWord, Gtmp2); // parameter size in bytes | |
582 __ add(Gargs, Gtmp2, Gargs); // points to first local + BytesPerWord | |
583 // NEW | |
584 __ add(Gargs, -wordSize, Gargs); // points to first local[0] | |
585 // generate the code to allocate the interpreter stack frame | |
586 // NEW FRAME ALLOCATED HERE | |
587 // save callers original sp | |
588 // __ mov(SP, I5_savedSP->after_restore()); | |
589 | |
590 generate_compute_interpreter_state(Lstate, G0, true); | |
591 | |
592 // At this point Lstate points to new interpreter state | |
593 // | |
594 | |
595 const Address do_not_unlock_if_synchronized(G2_thread, 0, | |
596 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); | |
597 // Since at this point in the method invocation the exception handler | |
598 // would try to exit the monitor of synchronized methods which hasn't | |
599 // been entered yet, we set the thread local variable | |
600 // _do_not_unlock_if_synchronized to true. If any exception was thrown by | |
601 // runtime, exception handling i.e. unlock_if_synchronized_method will | |
602 // check this thread local flag. | |
603 // This flag has two effects, one is to force an unwind in the topmost | |
604 // interpreter frame and not perform an unlock while doing so. | |
605 | |
606 __ movbool(true, G3_scratch); | |
607 __ stbool(G3_scratch, do_not_unlock_if_synchronized); | |
608 | |
609 | |
610 // increment invocation counter and check for overflow | |
611 // | |
612 // Note: checking for negative value instead of overflow | |
613 // so we have a 'sticky' overflow test (may be of | |
614 // importance as soon as we have true MT/MP) | |
615 Label invocation_counter_overflow; | |
616 if (inc_counter) { | |
617 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); | |
618 } | |
619 Label Lcontinue; | |
620 __ bind(Lcontinue); | |
621 | |
622 bang_stack_shadow_pages(true); | |
623 // reset the _do_not_unlock_if_synchronized flag | |
624 __ stbool(G0, do_not_unlock_if_synchronized); | |
625 | |
626 // check for synchronized methods | |
627 // Must happen AFTER invocation_counter check, so method is not locked | |
628 // if counter overflows. | |
629 | |
630 if (synchronized) { | |
631 lock_method(); | |
632 // Don't see how G2_thread is preserved here... | |
633 // __ verify_thread(); QQQ destroys L0,L1 can't use | |
634 } else { | |
635 #ifdef ASSERT | |
636 { Label ok; | |
637 __ ld_ptr(STATE(_method), G5_method); | |
638 __ ld(access_flags, O0); | |
639 __ btst(JVM_ACC_SYNCHRONIZED, O0); | |
640 __ br( Assembler::zero, false, Assembler::pt, ok); | |
641 __ delayed()->nop(); | |
642 __ stop("method needs synchronization"); | |
643 __ bind(ok); | |
644 } | |
645 #endif // ASSERT | |
646 } | |
647 | |
648 // start execution | |
649 | |
650 // __ verify_thread(); kills L1,L2 can't use at the moment | |
651 | |
652 // jvmti/jvmpi support | |
653 __ notify_method_entry(); | |
654 | |
655 // native call | |
656 | |
657 // (note that O0 is never an oop--at most it is a handle) | |
658 // It is important not to smash any handles created by this call, | |
659 // until any oop handle in O0 is dereferenced. | |
660 | |
661 // (note that the space for outgoing params is preallocated) | |
662 | |
663 // get signature handler | |
664 | |
665 Label pending_exception_present; | |
666 | |
667 { Label L; | |
668 __ ld_ptr(STATE(_method), G5_method); | |
669 __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch); | |
670 __ tst(G3_scratch); | |
671 __ brx(Assembler::notZero, false, Assembler::pt, L); | |
672 __ delayed()->nop(); | |
673 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), G5_method, false); | |
674 __ ld_ptr(STATE(_method), G5_method); | |
675 | |
676 Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset())); | |
677 __ ld_ptr(exception_addr, G3_scratch); | |
678 __ br_notnull(G3_scratch, false, Assembler::pn, pending_exception_present); | |
679 __ delayed()->nop(); | |
680 __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch); | |
681 __ bind(L); | |
682 } | |
683 | |
684 // Push a new frame so that the args will really be stored in | |
685 // Copy a few locals across so the new frame has the variables | |
686 // we need but these values will be dead at the jni call and | |
687 // therefore not gc volatile like the values in the current | |
688 // frame (Lstate in particular) | |
689 | |
690 // Flush the state pointer to the register save area | |
691 // Which is the only register we need for a stack walk. | |
692 __ st_ptr(Lstate, SP, (Lstate->sp_offset_in_saved_window() * wordSize) + STACK_BIAS); | |
693 | |
694 __ mov(Lstate, O1); // Need to pass the state pointer across the frame | |
695 | |
696 // Calculate current frame size | |
697 __ sub(SP, FP, O3); // Calculate negative of current frame size | |
698 __ save(SP, O3, SP); // Allocate an identical sized frame | |
699 | |
700 __ mov(I1, Lstate); // In the "natural" register. | |
701 | |
702 // Note I7 has leftover trash. Slow signature handler will fill it in | |
703 // should we get there. Normal jni call will set reasonable last_Java_pc | |
704 // below (and fix I7 so the stack trace doesn't have a meaningless frame | |
705 // in it). | |
706 | |
707 | |
708 // call signature handler | |
709 __ ld_ptr(STATE(_method), Lmethod); | |
710 __ ld_ptr(STATE(_locals), Llocals); | |
711 | |
712 __ callr(G3_scratch, 0); | |
713 __ delayed()->nop(); | |
714 __ ld_ptr(STATE(_thread), G2_thread); // restore thread (shouldn't be needed) | |
715 | |
716 { Label not_static; | |
717 | |
718 __ ld_ptr(STATE(_method), G5_method); | |
719 __ ld(access_flags, O0); | |
720 __ btst(JVM_ACC_STATIC, O0); | |
721 __ br( Assembler::zero, false, Assembler::pt, not_static); | |
722 __ delayed()-> | |
723 // get native function entry point(O0 is a good temp until the very end) | |
724 ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::native_function_offset())), O0); | |
725 // for static methods insert the mirror argument | |
726 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); | |
727 | |
728 __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc:: constants_offset())), O1); | |
729 __ ld_ptr(Address(O1, 0, constantPoolOopDesc::pool_holder_offset_in_bytes()), O1); | |
730 __ ld_ptr(O1, mirror_offset, O1); | |
731 // where the mirror handle body is allocated: | |
732 #ifdef ASSERT | |
733 if (!PrintSignatureHandlers) // do not dirty the output with this | |
734 { Label L; | |
735 __ tst(O1); | |
736 __ brx(Assembler::notZero, false, Assembler::pt, L); | |
737 __ delayed()->nop(); | |
738 __ stop("mirror is missing"); | |
739 __ bind(L); | |
740 } | |
741 #endif // ASSERT | |
742 __ st_ptr(O1, STATE(_oop_temp)); | |
743 __ add(STATE(_oop_temp), O1); // this is really an LEA not an add | |
744 __ bind(not_static); | |
745 } | |
746 | |
747 // At this point, arguments have been copied off of stack into | |
748 // their JNI positions, which are O1..O5 and SP[68..]. | |
749 // Oops are boxed in-place on the stack, with handles copied to arguments. | |
750 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*. | |
751 | |
752 #ifdef ASSERT | |
753 { Label L; | |
754 __ tst(O0); | |
755 __ brx(Assembler::notZero, false, Assembler::pt, L); | |
756 __ delayed()->nop(); | |
757 __ stop("native entry point is missing"); | |
758 __ bind(L); | |
759 } | |
760 #endif // ASSERT | |
761 | |
762 // | |
763 // setup the java frame anchor | |
764 // | |
765 // The scavenge function only needs to know that the PC of this frame is | |
766 // in the interpreter method entry code, it doesn't need to know the exact | |
767 // PC and hence we can use O7 which points to the return address from the | |
768 // previous call in the code stream (signature handler function) | |
769 // | |
770 // The other trick is we set last_Java_sp to FP instead of the usual SP because | |
771 // we have pushed the extra frame in order to protect the volatile register(s) | |
772 // in that frame when we return from the jni call | |
773 // | |
774 | |
775 | |
776 __ set_last_Java_frame(FP, O7); | |
777 __ mov(O7, I7); // make dummy interpreter frame look like one above, | |
778 // not meaningless information that'll confuse me. | |
779 | |
780 // flush the windows now. We don't care about the current (protection) frame | |
781 // only the outer frames | |
782 | |
783 __ flush_windows(); | |
784 | |
785 // mark windows as flushed | |
786 Address flags(G2_thread, | |
787 0, | |
788 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset())); | |
789 __ set(JavaFrameAnchor::flushed, G3_scratch); | |
790 __ st(G3_scratch, flags); | |
791 | |
792 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready. | |
793 | |
794 Address thread_state(G2_thread, 0, in_bytes(JavaThread::thread_state_offset())); | |
795 #ifdef ASSERT | |
796 { Label L; | |
797 __ ld(thread_state, G3_scratch); | |
798 __ cmp(G3_scratch, _thread_in_Java); | |
799 __ br(Assembler::equal, false, Assembler::pt, L); | |
800 __ delayed()->nop(); | |
801 __ stop("Wrong thread state in native stub"); | |
802 __ bind(L); | |
803 } | |
804 #endif // ASSERT | |
805 __ set(_thread_in_native, G3_scratch); | |
806 __ st(G3_scratch, thread_state); | |
807 | |
808 // Call the jni method, using the delay slot to set the JNIEnv* argument. | |
809 __ callr(O0, 0); | |
810 __ delayed()-> | |
811 add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0); | |
812 __ ld_ptr(STATE(_thread), G2_thread); // restore thread | |
813 | |
814 // must we block? | |
815 | |
816 // Block, if necessary, before resuming in _thread_in_Java state. | |
817 // In order for GC to work, don't clear the last_Java_sp until after blocking. | |
818 { Label no_block; | |
819 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); | |
820 | |
821 // Switch thread to "native transition" state before reading the synchronization state. | |
822 // This additional state is necessary because reading and testing the synchronization | |
823 // state is not atomic w.r.t. GC, as this scenario demonstrates: | |
824 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. | |
825 // VM thread changes sync state to synchronizing and suspends threads for GC. | |
826 // Thread A is resumed to finish this native method, but doesn't block here since it | |
827 // didn't see any synchronization is progress, and escapes. | |
828 __ set(_thread_in_native_trans, G3_scratch); | |
829 __ st(G3_scratch, thread_state); | |
830 if(os::is_MP()) { | |
831 // Write serialization page so VM thread can do a pseudo remote membar. | |
832 // We use the current thread pointer to calculate a thread specific | |
833 // offset to write to within the page. This minimizes bus traffic | |
834 // due to cache line collision. | |
835 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); | |
836 } | |
837 __ load_contents(sync_state, G3_scratch); | |
838 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); | |
839 | |
840 | |
841 Label L; | |
842 Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset())); | |
843 __ br(Assembler::notEqual, false, Assembler::pn, L); | |
844 __ delayed()-> | |
845 ld(suspend_state, G3_scratch); | |
846 __ cmp(G3_scratch, 0); | |
847 __ br(Assembler::equal, false, Assembler::pt, no_block); | |
848 __ delayed()->nop(); | |
849 __ bind(L); | |
850 | |
851 // Block. Save any potential method result value before the operation and | |
852 // use a leaf call to leave the last_Java_frame setup undisturbed. | |
853 save_native_result(); | |
854 __ call_VM_leaf(noreg, | |
855 CAST_FROM_FN_PTR(address, JavaThread::check_safepoint_and_suspend_for_native_trans), | |
856 G2_thread); | |
857 __ ld_ptr(STATE(_thread), G2_thread); // restore thread | |
858 // Restore any method result value | |
859 restore_native_result(); | |
860 __ bind(no_block); | |
861 } | |
862 | |
863 // Clear the frame anchor now | |
864 | |
865 __ reset_last_Java_frame(); | |
866 | |
867 // Move the result handler address | |
868 __ mov(Lscratch, G3_scratch); | |
869 // return possible result to the outer frame | |
870 #ifndef __LP64 | |
871 __ mov(O0, I0); | |
872 __ restore(O1, G0, O1); | |
873 #else | |
874 __ restore(O0, G0, O0); | |
875 #endif /* __LP64 */ | |
876 | |
877 // Move result handler to expected register | |
878 __ mov(G3_scratch, Lscratch); | |
879 | |
880 | |
881 // thread state is thread_in_native_trans. Any safepoint blocking has | |
882 // happened in the trampoline we are ready to switch to thread_in_Java. | |
883 | |
884 __ set(_thread_in_Java, G3_scratch); | |
885 __ st(G3_scratch, thread_state); | |
886 | |
887 // If we have an oop result store it where it will be safe for any further gc | |
888 // until we return now that we've released the handle it might be protected by | |
889 | |
890 { | |
891 Label no_oop, store_result; | |
892 | |
893 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch); | |
894 __ cmp(G3_scratch, Lscratch); | |
895 __ brx(Assembler::notEqual, false, Assembler::pt, no_oop); | |
896 __ delayed()->nop(); | |
897 __ addcc(G0, O0, O0); | |
898 __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL: | |
899 __ delayed()->ld_ptr(O0, 0, O0); // unbox it | |
900 __ mov(G0, O0); | |
901 | |
902 __ bind(store_result); | |
903 // Store it where gc will look for it and result handler expects it. | |
904 __ st_ptr(O0, STATE(_oop_temp)); | |
905 | |
906 __ bind(no_oop); | |
907 | |
908 } | |
909 | |
910 // reset handle block | |
911 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), G3_scratch); | |
912 __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes()); | |
913 | |
914 | |
915 // handle exceptions (exception handling will handle unlocking!) | |
916 { Label L; | |
917 Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset())); | |
918 | |
919 __ ld_ptr(exception_addr, Gtemp); | |
920 __ tst(Gtemp); | |
921 __ brx(Assembler::equal, false, Assembler::pt, L); | |
922 __ delayed()->nop(); | |
923 __ bind(pending_exception_present); | |
924 // With c++ interpreter we just leave it pending caller will do the correct thing. However... | |
925 // Like x86 we ignore the result of the native call and leave the method locked. This | |
926 // seems wrong to leave things locked. | |
927 | |
928 __ br(Assembler::always, false, Assembler::pt, StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); | |
929 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame | |
930 | |
931 __ bind(L); | |
932 } | |
933 | |
934 // jvmdi/jvmpi support (preserves thread register) | |
935 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI); | |
936 | |
937 if (synchronized) { | |
938 // save and restore any potential method result value around the unlocking operation | |
939 save_native_result(); | |
940 | |
941 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; | |
942 // Get the initial monitor we allocated | |
943 __ sub(Lstate, entry_size, O1); // initial monitor | |
944 __ unlock_object(O1); | |
945 restore_native_result(); | |
946 } | |
947 | |
948 #if defined(COMPILER2) && !defined(_LP64) | |
949 | |
950 // C2 expects long results in G1 we can't tell if we're returning to interpreted | |
951 // or compiled so just be safe. | |
952 | |
953 __ sllx(O0, 32, G1); // Shift bits into high G1 | |
954 __ srl (O1, 0, O1); // Zero extend O1 | |
955 __ or3 (O1, G1, G1); // OR 64 bits into G1 | |
956 | |
957 #endif /* COMPILER2 && !_LP64 */ | |
958 | |
959 #ifdef ASSERT | |
960 { | |
961 Label ok; | |
962 __ cmp(I5_savedSP, FP); | |
963 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok); | |
964 __ delayed()->nop(); | |
965 __ stop("bad I5_savedSP value"); | |
966 __ should_not_reach_here(); | |
967 __ bind(ok); | |
968 } | |
969 #endif | |
970 // Calls result handler which POPS FRAME | |
971 if (TraceJumps) { | |
972 // Move target to register that is recordable | |
973 __ mov(Lscratch, G3_scratch); | |
974 __ JMP(G3_scratch, 0); | |
975 } else { | |
976 __ jmp(Lscratch, 0); | |
977 } | |
978 __ delayed()->nop(); | |
979 | |
980 if (inc_counter) { | |
981 // handle invocation counter overflow | |
982 __ bind(invocation_counter_overflow); | |
983 generate_counter_overflow(Lcontinue); | |
984 } | |
985 | |
986 | |
987 return entry; | |
988 } | |
989 | |
990 void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state, | |
991 const Register prev_state, | |
992 bool native) { | |
993 | |
994 // On entry | |
995 // G5_method - caller's method | |
996 // Gargs - points to initial parameters (i.e. locals[0]) | |
997 // G2_thread - valid? (C1 only??) | |
998 // "prev_state" - contains any previous frame manager state which we must save a link | |
999 // | |
1000 // On return | |
1001 // "state" is a pointer to the newly allocated state object. We must allocate and initialize | |
1002 // a new interpretState object and the method expression stack. | |
1003 | |
1004 assert_different_registers(state, prev_state); | |
1005 assert_different_registers(prev_state, G3_scratch); | |
1006 const Register Gtmp = G3_scratch; | |
1007 const Address constants (G5_method, 0, in_bytes(methodOopDesc::constants_offset())); | |
1008 const Address access_flags (G5_method, 0, in_bytes(methodOopDesc::access_flags_offset())); | |
1009 const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())); | |
1010 const Address max_stack (G5_method, 0, in_bytes(methodOopDesc::max_stack_offset())); | |
1011 const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset())); | |
1012 | |
1013 // slop factor is two extra slots on the expression stack so that | |
1014 // we always have room to store a result when returning from a call without parameters | |
1015 // that returns a result. | |
1016 | |
1017 const int slop_factor = 2*wordSize; | |
1018 | |
1019 const int fixed_size = ((sizeof(BytecodeInterpreter) + slop_factor) >> LogBytesPerWord) + // what is the slop factor? | |
710 | 1020 //6815692//methodOopDesc::extra_stack_words() + // extra push slots for MH adapters |
0 | 1021 frame::memory_parameter_word_sp_offset + // register save area + param window |
1022 (native ? frame::interpreter_frame_extra_outgoing_argument_words : 0); // JNI, class | |
1023 | |
1024 // XXX G5_method valid | |
1025 | |
1026 // Now compute new frame size | |
1027 | |
1028 if (native) { | |
1029 __ lduh( size_of_parameters, Gtmp ); | |
1030 __ calc_mem_param_words(Gtmp, Gtmp); // space for native call parameters passed on the stack in words | |
1031 } else { | |
1032 __ lduh(max_stack, Gtmp); // Full size expression stack | |
1033 } | |
1034 __ add(Gtmp, fixed_size, Gtmp); // plus the fixed portion | |
1035 | |
1036 __ neg(Gtmp); // negative space for stack/parameters in words | |
1037 __ and3(Gtmp, -WordsPerLong, Gtmp); // make multiple of 2 (SP must be 2-word aligned) | |
1038 __ sll(Gtmp, LogBytesPerWord, Gtmp); // negative space for frame in bytes | |
1039 | |
1040 // Need to do stack size check here before we fault on large frames | |
1041 | |
1042 Label stack_ok; | |
1043 | |
1044 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : | |
1045 (StackRedPages+StackYellowPages); | |
1046 | |
1047 | |
1048 __ ld_ptr(G2_thread, in_bytes(Thread::stack_base_offset()), O0); | |
1049 __ ld_ptr(G2_thread, in_bytes(Thread::stack_size_offset()), O1); | |
1050 // compute stack bottom | |
1051 __ sub(O0, O1, O0); | |
1052 | |
1053 // Avoid touching the guard pages | |
1054 // Also a fudge for frame size of BytecodeInterpreter::run | |
1055 // It varies from 1k->4k depending on build type | |
1056 const int fudge = 6 * K; | |
1057 | |
1058 __ set(fudge + (max_pages * os::vm_page_size()), O1); | |
1059 | |
1060 __ add(O0, O1, O0); | |
1061 __ sub(O0, Gtmp, O0); | |
1062 __ cmp(SP, O0); | |
1063 __ brx(Assembler::greaterUnsigned, false, Assembler::pt, stack_ok); | |
1064 __ delayed()->nop(); | |
1065 | |
1066 // throw exception return address becomes throwing pc | |
1067 | |
1068 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); | |
1069 __ stop("never reached"); | |
1070 | |
1071 __ bind(stack_ok); | |
1072 | |
1073 __ save(SP, Gtmp, SP); // setup new frame and register window | |
1074 | |
1075 // New window I7 call_stub or previous activation | |
1076 // O6 - register save area, BytecodeInterpreter just below it, args/locals just above that | |
1077 // | |
1078 __ sub(FP, sizeof(BytecodeInterpreter), state); // Point to new Interpreter state | |
1079 __ add(state, STACK_BIAS, state ); // Account for 64bit bias | |
1080 | |
1081 #define XXX_STATE(field_name) state, in_bytes(byte_offset_of(BytecodeInterpreter, field_name)) | |
1082 | |
1083 // Initialize a new Interpreter state | |
1084 // orig_sp - caller's original sp | |
1085 // G2_thread - thread | |
1086 // Gargs - &locals[0] (unbiased?) | |
1087 // G5_method - method | |
1088 // SP (biased) - accounts for full size java stack, BytecodeInterpreter object, register save area, and register parameter save window | |
1089 | |
1090 | |
1091 __ set(0xdead0004, O1); | |
1092 | |
1093 | |
1094 __ st_ptr(Gargs, XXX_STATE(_locals)); | |
1095 __ st_ptr(G0, XXX_STATE(_oop_temp)); | |
1096 | |
1097 __ st_ptr(state, XXX_STATE(_self_link)); // point to self | |
1098 __ st_ptr(prev_state->after_save(), XXX_STATE(_prev_link)); // Chain interpreter states | |
1099 __ st_ptr(G2_thread, XXX_STATE(_thread)); // Store javathread | |
1100 | |
1101 if (native) { | |
1102 __ st_ptr(G0, XXX_STATE(_bcp)); | |
1103 } else { | |
1104 __ ld_ptr(G5_method, in_bytes(methodOopDesc::const_offset()), O2); // get constMethodOop | |
1105 __ add(O2, in_bytes(constMethodOopDesc::codes_offset()), O2); // get bcp | |
1106 __ st_ptr(O2, XXX_STATE(_bcp)); | |
1107 } | |
1108 | |
1109 __ st_ptr(G0, XXX_STATE(_mdx)); | |
1110 __ st_ptr(G5_method, XXX_STATE(_method)); | |
1111 | |
1112 __ set((int) BytecodeInterpreter::method_entry, O1); | |
1113 __ st(O1, XXX_STATE(_msg)); | |
1114 | |
1115 __ ld_ptr(constants, O3); | |
1116 __ ld_ptr(O3, constantPoolOopDesc::cache_offset_in_bytes(), O2); | |
1117 __ st_ptr(O2, XXX_STATE(_constants)); | |
1118 | |
1119 __ st_ptr(G0, XXX_STATE(_result._to_call._callee)); | |
1120 | |
1121 // Monitor base is just start of BytecodeInterpreter object; | |
1122 __ mov(state, O2); | |
1123 __ st_ptr(O2, XXX_STATE(_monitor_base)); | |
1124 | |
1125 // Do we need a monitor for synchonized method? | |
1126 { | |
1127 __ ld(access_flags, O1); | |
1128 Label done; | |
1129 Label got_obj; | |
1130 __ btst(JVM_ACC_SYNCHRONIZED, O1); | |
1131 __ br( Assembler::zero, false, Assembler::pt, done); | |
1132 | |
1133 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); | |
1134 __ delayed()->btst(JVM_ACC_STATIC, O1); | |
1135 __ ld_ptr(XXX_STATE(_locals), O1); | |
1136 __ br( Assembler::zero, true, Assembler::pt, got_obj); | |
1137 __ delayed()->ld_ptr(O1, 0, O1); // get receiver for not-static case | |
1138 __ ld_ptr(constants, O1); | |
1139 __ ld_ptr( O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1); | |
1140 // lock the mirror, not the klassOop | |
1141 __ ld_ptr( O1, mirror_offset, O1); | |
1142 | |
1143 __ bind(got_obj); | |
1144 | |
1145 #ifdef ASSERT | |
1146 __ tst(O1); | |
1147 __ breakpoint_trap(Assembler::zero); | |
1148 #endif // ASSERT | |
1149 | |
1150 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; | |
1151 __ sub(SP, entry_size, SP); // account for initial monitor | |
1152 __ sub(O2, entry_size, O2); // initial monitor | |
1153 __ st_ptr(O1, O2, BasicObjectLock::obj_offset_in_bytes()); // and allocate it for interpreter use | |
1154 __ bind(done); | |
1155 } | |
1156 | |
1157 // Remember initial frame bottom | |
1158 | |
1159 __ st_ptr(SP, XXX_STATE(_frame_bottom)); | |
1160 | |
1161 __ st_ptr(O2, XXX_STATE(_stack_base)); | |
1162 | |
1163 __ sub(O2, wordSize, O2); // prepush | |
1164 __ st_ptr(O2, XXX_STATE(_stack)); // PREPUSH | |
1165 | |
1166 __ lduh(max_stack, O3); // Full size expression stack | |
710 | 1167 guarantee(!EnableMethodHandles, "no support yet for java.dyn.MethodHandle"); //6815692 |
1168 //6815692//if (EnableMethodHandles) | |
1169 //6815692// __ inc(O3, methodOopDesc::extra_stack_entries()); | |
0 | 1170 __ sll(O3, LogBytesPerWord, O3); |
1171 __ sub(O2, O3, O3); | |
1172 // __ sub(O3, wordSize, O3); // so prepush doesn't look out of bounds | |
1173 __ st_ptr(O3, XXX_STATE(_stack_limit)); | |
1174 | |
1175 if (!native) { | |
1176 // | |
1177 // Code to initialize locals | |
1178 // | |
1179 Register init_value = noreg; // will be G0 if we must clear locals | |
1180 // Now zero locals | |
1181 if (true /* zerolocals */ || ClearInterpreterLocals) { | |
1182 // explicitly initialize locals | |
1183 init_value = G0; | |
1184 } else { | |
1185 #ifdef ASSERT | |
1186 // initialize locals to a garbage pattern for better debugging | |
1187 init_value = O3; | |
1188 __ set( 0x0F0F0F0F, init_value ); | |
1189 #endif // ASSERT | |
1190 } | |
1191 if (init_value != noreg) { | |
1192 Label clear_loop; | |
1193 | |
1194 // NOTE: If you change the frame layout, this code will need to | |
1195 // be updated! | |
1196 __ lduh( size_of_locals, O2 ); | |
1197 __ lduh( size_of_parameters, O1 ); | |
1198 __ sll( O2, LogBytesPerWord, O2); | |
1199 __ sll( O1, LogBytesPerWord, O1 ); | |
1200 __ ld_ptr(XXX_STATE(_locals), L2_scratch); | |
1201 __ sub( L2_scratch, O2, O2 ); | |
1202 __ sub( L2_scratch, O1, O1 ); | |
1203 | |
1204 __ bind( clear_loop ); | |
1205 __ inc( O2, wordSize ); | |
1206 | |
1207 __ cmp( O2, O1 ); | |
1208 __ br( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop ); | |
1209 __ delayed()->st_ptr( init_value, O2, 0 ); | |
1210 } | |
1211 } | |
1212 } | |
1213 // Find preallocated monitor and lock method (C++ interpreter) | |
1214 // | |
1215 void InterpreterGenerator::lock_method(void) { | |
1216 // Lock the current method. | |
1217 // Destroys registers L2_scratch, L3_scratch, O0 | |
1218 // | |
1219 // Find everything relative to Lstate | |
1220 | |
1221 #ifdef ASSERT | |
1222 __ ld_ptr(STATE(_method), L2_scratch); | |
1223 __ ld(L2_scratch, in_bytes(methodOopDesc::access_flags_offset()), O0); | |
1224 | |
1225 { Label ok; | |
1226 __ btst(JVM_ACC_SYNCHRONIZED, O0); | |
1227 __ br( Assembler::notZero, false, Assembler::pt, ok); | |
1228 __ delayed()->nop(); | |
1229 __ stop("method doesn't need synchronization"); | |
1230 __ bind(ok); | |
1231 } | |
1232 #endif // ASSERT | |
1233 | |
1234 // monitor is already allocated at stack base | |
1235 // and the lockee is already present | |
1236 __ ld_ptr(STATE(_stack_base), L2_scratch); | |
1237 __ ld_ptr(L2_scratch, BasicObjectLock::obj_offset_in_bytes(), O0); // get object | |
1238 __ lock_object(L2_scratch, O0); | |
1239 | |
1240 } | |
1241 | |
1242 // Generate code for handling resuming a deopted method | |
1243 void CppInterpreterGenerator::generate_deopt_handling() { | |
1244 | |
1245 Label return_from_deopt_common; | |
1246 | |
1247 // deopt needs to jump to here to enter the interpreter (return a result) | |
1248 deopt_frame_manager_return_atos = __ pc(); | |
1249 | |
1250 // O0/O1 live | |
1251 __ ba(false, return_from_deopt_common); | |
1252 __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_OBJECT), L3_scratch); // Result stub address array index | |
1253 | |
1254 | |
1255 // deopt needs to jump to here to enter the interpreter (return a result) | |
1256 deopt_frame_manager_return_btos = __ pc(); | |
1257 | |
1258 // O0/O1 live | |
1259 __ ba(false, return_from_deopt_common); | |
1260 __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_BOOLEAN), L3_scratch); // Result stub address array index | |
1261 | |
1262 // deopt needs to jump to here to enter the interpreter (return a result) | |
1263 deopt_frame_manager_return_itos = __ pc(); | |
1264 | |
1265 // O0/O1 live | |
1266 __ ba(false, return_from_deopt_common); | |
1267 __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_INT), L3_scratch); // Result stub address array index | |
1268 | |
1269 // deopt needs to jump to here to enter the interpreter (return a result) | |
1270 | |
1271 deopt_frame_manager_return_ltos = __ pc(); | |
1272 #if !defined(_LP64) && defined(COMPILER2) | |
1273 // All return values are where we want them, except for Longs. C2 returns | |
1274 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. | |
1275 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit | |
1276 // build even if we are returning from interpreted we just do a little | |
1277 // stupid shuffing. | |
1278 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to | |
1279 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node | |
1280 // first which would move g1 -> O0/O1 and destroy the exception we were throwing. | |
1281 | |
1282 __ srl (G1, 0,O1); | |
1283 __ srlx(G1,32,O0); | |
1284 #endif /* !_LP64 && COMPILER2 */ | |
1285 // O0/O1 live | |
1286 __ ba(false, return_from_deopt_common); | |
1287 __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_LONG), L3_scratch); // Result stub address array index | |
1288 | |
1289 // deopt needs to jump to here to enter the interpreter (return a result) | |
1290 | |
1291 deopt_frame_manager_return_ftos = __ pc(); | |
1292 // O0/O1 live | |
1293 __ ba(false, return_from_deopt_common); | |
1294 __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_FLOAT), L3_scratch); // Result stub address array index | |
1295 | |
1296 // deopt needs to jump to here to enter the interpreter (return a result) | |
1297 deopt_frame_manager_return_dtos = __ pc(); | |
1298 | |
1299 // O0/O1 live | |
1300 __ ba(false, return_from_deopt_common); | |
1301 __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_DOUBLE), L3_scratch); // Result stub address array index | |
1302 | |
1303 // deopt needs to jump to here to enter the interpreter (return a result) | |
1304 deopt_frame_manager_return_vtos = __ pc(); | |
1305 | |
1306 // O0/O1 live | |
1307 __ set(AbstractInterpreter::BasicType_as_index(T_VOID), L3_scratch); | |
1308 | |
1309 // Deopt return common | |
1310 // an index is present that lets us move any possible result being | |
1311 // return to the interpreter's stack | |
1312 // | |
1313 __ bind(return_from_deopt_common); | |
1314 | |
1315 // Result if any is in native abi result (O0..O1/F0..F1). The java expression | |
1316 // stack is in the state that the calling convention left it. | |
1317 // Copy the result from native abi result and place it on java expression stack. | |
1318 | |
1319 // Current interpreter state is present in Lstate | |
1320 | |
1321 // Get current pre-pushed top of interpreter stack | |
1322 // Any result (if any) is in native abi | |
1323 // result type index is in L3_scratch | |
1324 | |
1325 __ ld_ptr(STATE(_stack), L1_scratch); // get top of java expr stack | |
1326 | |
1327 __ set((intptr_t)CppInterpreter::_tosca_to_stack, L4_scratch); | |
1328 __ sll(L3_scratch, LogBytesPerWord, L3_scratch); | |
1329 __ ld_ptr(L4_scratch, L3_scratch, Lscratch); // get typed result converter address | |
1330 __ jmpl(Lscratch, G0, O7); // and convert it | |
1331 __ delayed()->nop(); | |
1332 | |
1333 // L1_scratch points to top of stack (prepushed) | |
1334 __ st_ptr(L1_scratch, STATE(_stack)); | |
1335 } | |
1336 | |
1337 // Generate the code to handle a more_monitors message from the c++ interpreter | |
1338 void CppInterpreterGenerator::generate_more_monitors() { | |
1339 | |
1340 Label entry, loop; | |
1341 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; | |
1342 // 1. compute new pointers // esp: old expression stack top | |
1343 __ delayed()->ld_ptr(STATE(_stack_base), L4_scratch); // current expression stack bottom | |
1344 __ sub(L4_scratch, entry_size, L4_scratch); | |
1345 __ st_ptr(L4_scratch, STATE(_stack_base)); | |
1346 | |
1347 __ sub(SP, entry_size, SP); // Grow stack | |
1348 __ st_ptr(SP, STATE(_frame_bottom)); | |
1349 | |
1350 __ ld_ptr(STATE(_stack_limit), L2_scratch); | |
1351 __ sub(L2_scratch, entry_size, L2_scratch); | |
1352 __ st_ptr(L2_scratch, STATE(_stack_limit)); | |
1353 | |
1354 __ ld_ptr(STATE(_stack), L1_scratch); // Get current stack top | |
1355 __ sub(L1_scratch, entry_size, L1_scratch); | |
1356 __ st_ptr(L1_scratch, STATE(_stack)); | |
1357 __ ba(false, entry); | |
1358 __ delayed()->add(L1_scratch, wordSize, L1_scratch); // first real entry (undo prepush) | |
1359 | |
1360 // 2. move expression stack | |
1361 | |
1362 __ bind(loop); | |
1363 __ st_ptr(L3_scratch, Address(L1_scratch, 0)); | |
1364 __ add(L1_scratch, wordSize, L1_scratch); | |
1365 __ bind(entry); | |
1366 __ cmp(L1_scratch, L4_scratch); | |
1367 __ br(Assembler::notEqual, false, Assembler::pt, loop); | |
1368 __ delayed()->ld_ptr(L1_scratch, entry_size, L3_scratch); | |
1369 | |
1370 // now zero the slot so we can find it. | |
123 | 1371 __ st_ptr(G0, L4_scratch, BasicObjectLock::obj_offset_in_bytes()); |
0 | 1372 |
1373 } | |
1374 | |
1375 // Initial entry to C++ interpreter from the call_stub. | |
1376 // This entry point is called the frame manager since it handles the generation | |
1377 // of interpreter activation frames via requests directly from the vm (via call_stub) | |
1378 // and via requests from the interpreter. The requests from the call_stub happen | |
1379 // directly thru the entry point. Requests from the interpreter happen via returning | |
1380 // from the interpreter and examining the message the interpreter has returned to | |
1381 // the frame manager. The frame manager can take the following requests: | |
1382 | |
1383 // NO_REQUEST - error, should never happen. | |
1384 // MORE_MONITORS - need a new monitor. Shuffle the expression stack on down and | |
1385 // allocate a new monitor. | |
1386 // CALL_METHOD - setup a new activation to call a new method. Very similar to what | |
1387 // happens during entry during the entry via the call stub. | |
1388 // RETURN_FROM_METHOD - remove an activation. Return to interpreter or call stub. | |
1389 // | |
1390 // Arguments: | |
1391 // | |
1392 // ebx: methodOop | |
1393 // ecx: receiver - unused (retrieved from stack as needed) | |
1394 // esi: previous frame manager state (NULL from the call_stub/c1/c2) | |
1395 // | |
1396 // | |
1397 // Stack layout at entry | |
1398 // | |
1399 // [ return address ] <--- esp | |
1400 // [ parameter n ] | |
1401 // ... | |
1402 // [ parameter 1 ] | |
1403 // [ expression stack ] | |
1404 // | |
1405 // | |
1406 // We are free to blow any registers we like because the call_stub which brought us here | |
1407 // initially has preserved the callee save registers already. | |
1408 // | |
1409 // | |
1410 | |
1411 static address interpreter_frame_manager = NULL; | |
1412 | |
1413 #ifdef ASSERT | |
1414 #define VALIDATE_STATE(scratch, marker) \ | |
1415 { \ | |
1416 Label skip; \ | |
1417 __ ld_ptr(STATE(_self_link), scratch); \ | |
1418 __ cmp(Lstate, scratch); \ | |
1419 __ brx(Assembler::equal, false, Assembler::pt, skip); \ | |
1420 __ delayed()->nop(); \ | |
1421 __ breakpoint_trap(); \ | |
1422 __ emit_long(marker); \ | |
1423 __ bind(skip); \ | |
1424 } | |
1425 #else | |
1426 #define VALIDATE_STATE(scratch, marker) | |
1427 #endif /* ASSERT */ | |
1428 | |
1429 void CppInterpreterGenerator::adjust_callers_stack(Register args) { | |
1430 // | |
1431 // Adjust caller's stack so that all the locals can be contiguous with | |
1432 // the parameters. | |
1433 // Worries about stack overflow make this a pain. | |
1434 // | |
1435 // Destroys args, G3_scratch, G3_scratch | |
1436 // In/Out O5_savedSP (sender's original SP) | |
1437 // | |
1438 // assert_different_registers(state, prev_state); | |
1439 const Register Gtmp = G3_scratch; | |
1440 const Register tmp = O2; | |
1441 const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())); | |
1442 const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset())); | |
1443 | |
1444 __ lduh(size_of_parameters, tmp); | |
1445 __ sll(tmp, LogBytesPerWord, Gtmp); // parameter size in bytes | |
1446 __ add(args, Gtmp, Gargs); // points to first local + BytesPerWord | |
1447 // NEW | |
1448 __ add(Gargs, -wordSize, Gargs); // points to first local[0] | |
1449 // determine extra space for non-argument locals & adjust caller's SP | |
1450 // Gtmp1: parameter size in words | |
1451 __ lduh(size_of_locals, Gtmp); | |
1452 __ compute_extra_locals_size_in_bytes(tmp, Gtmp, Gtmp); | |
1453 | |
1454 #if 1 | |
1455 // c2i adapters place the final interpreter argument in the register save area for O0/I0 | |
1456 // the call_stub will place the final interpreter argument at | |
1457 // frame::memory_parameter_word_sp_offset. This is mostly not noticable for either asm | |
1458 // or c++ interpreter. However with the c++ interpreter when we do a recursive call | |
1459 // and try to make it look good in the debugger we will store the argument to | |
1460 // RecursiveInterpreterActivation in the register argument save area. Without allocating | |
1461 // extra space for the compiler this will overwrite locals in the local array of the | |
1462 // interpreter. | |
1463 // QQQ still needed with frameless adapters??? | |
1464 | |
1465 const int c2i_adjust_words = frame::memory_parameter_word_sp_offset - frame::callee_register_argument_save_area_sp_offset; | |
1466 | |
1467 __ add(Gtmp, c2i_adjust_words*wordSize, Gtmp); | |
1468 #endif // 1 | |
1469 | |
1470 | |
1471 __ sub(SP, Gtmp, SP); // just caller's frame for the additional space we need. | |
1472 } | |
1473 | |
1474 address InterpreterGenerator::generate_normal_entry(bool synchronized) { | |
1475 | |
1476 // G5_method: methodOop | |
1477 // G2_thread: thread (unused) | |
1478 // Gargs: bottom of args (sender_sp) | |
1479 // O5: sender's sp | |
1480 | |
1481 // A single frame manager is plenty as we don't specialize for synchronized. We could and | |
1482 // the code is pretty much ready. Would need to change the test below and for good measure | |
1483 // modify generate_interpreter_state to only do the (pre) sync stuff stuff for synchronized | |
1484 // routines. Not clear this is worth it yet. | |
1485 | |
1486 if (interpreter_frame_manager) { | |
1487 return interpreter_frame_manager; | |
1488 } | |
1489 | |
1490 __ bind(frame_manager_entry); | |
1491 | |
1492 // the following temporary registers are used during frame creation | |
1493 const Register Gtmp1 = G3_scratch; | |
1494 // const Register Lmirror = L1; // native mirror (native calls only) | |
1495 | |
1496 const Address constants (G5_method, 0, in_bytes(methodOopDesc::constants_offset())); | |
1497 const Address access_flags (G5_method, 0, in_bytes(methodOopDesc::access_flags_offset())); | |
1498 const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())); | |
1499 const Address max_stack (G5_method, 0, in_bytes(methodOopDesc::max_stack_offset())); | |
1500 const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset())); | |
1501 | |
1502 address entry_point = __ pc(); | |
1503 __ mov(G0, prevState); // no current activation | |
1504 | |
1505 | |
1506 Label re_dispatch; | |
1507 | |
1508 __ bind(re_dispatch); | |
1509 | |
1510 // Interpreter needs to have locals completely contiguous. In order to do that | |
1511 // We must adjust the caller's stack pointer for any locals beyond just the | |
1512 // parameters | |
1513 adjust_callers_stack(Gargs); | |
1514 | |
1515 // O5_savedSP still contains sender's sp | |
1516 | |
1517 // NEW FRAME | |
1518 | |
1519 generate_compute_interpreter_state(Lstate, prevState, false); | |
1520 | |
1521 // At this point a new interpreter frame and state object are created and initialized | |
1522 // Lstate has the pointer to the new activation | |
1523 // Any stack banging or limit check should already be done. | |
1524 | |
1525 Label call_interpreter; | |
1526 | |
1527 __ bind(call_interpreter); | |
1528 | |
1529 | |
1530 #if 1 | |
1531 __ set(0xdead002, Lmirror); | |
1532 __ set(0xdead002, L2_scratch); | |
1533 __ set(0xdead003, L3_scratch); | |
1534 __ set(0xdead004, L4_scratch); | |
1535 __ set(0xdead005, Lscratch); | |
1536 __ set(0xdead006, Lscratch2); | |
1537 __ set(0xdead007, L7_scratch); | |
1538 | |
1539 __ set(0xdeaf002, O2); | |
1540 __ set(0xdeaf003, O3); | |
1541 __ set(0xdeaf004, O4); | |
1542 __ set(0xdeaf005, O5); | |
1543 #endif | |
1544 | |
1545 // Call interpreter (stack bang complete) enter here if message is | |
1546 // set and we know stack size is valid | |
1547 | |
1548 Label call_interpreter_2; | |
1549 | |
1550 __ bind(call_interpreter_2); | |
1551 | |
1552 #ifdef ASSERT | |
1553 { | |
1554 Label skip; | |
1555 __ ld_ptr(STATE(_frame_bottom), G3_scratch); | |
1556 __ cmp(G3_scratch, SP); | |
1557 __ brx(Assembler::equal, false, Assembler::pt, skip); | |
1558 __ delayed()->nop(); | |
1559 __ stop("SP not restored to frame bottom"); | |
1560 __ bind(skip); | |
1561 } | |
1562 #endif | |
1563 | |
1564 VALIDATE_STATE(G3_scratch, 4); | |
1565 __ set_last_Java_frame(SP, noreg); | |
1566 __ mov(Lstate, O0); // (arg) pointer to current state | |
1567 | |
1568 __ call(CAST_FROM_FN_PTR(address, | |
1569 JvmtiExport::can_post_interpreter_events() ? | |
1570 BytecodeInterpreter::runWithChecks | |
1571 : BytecodeInterpreter::run), | |
1572 relocInfo::runtime_call_type); | |
1573 | |
1574 __ delayed()->nop(); | |
1575 | |
1576 __ ld_ptr(STATE(_thread), G2_thread); | |
1577 __ reset_last_Java_frame(); | |
1578 | |
1579 // examine msg from interpreter to determine next action | |
1580 __ ld_ptr(STATE(_thread), G2_thread); // restore G2_thread | |
1581 | |
1582 __ ld(STATE(_msg), L1_scratch); // Get new message | |
1583 | |
1584 Label call_method; | |
1585 Label return_from_interpreted_method; | |
1586 Label throw_exception; | |
1587 Label do_OSR; | |
1588 Label bad_msg; | |
1589 Label resume_interpreter; | |
1590 | |
1591 __ cmp(L1_scratch, (int)BytecodeInterpreter::call_method); | |
1592 __ br(Assembler::equal, false, Assembler::pt, call_method); | |
1593 __ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::return_from_method); | |
1594 __ br(Assembler::equal, false, Assembler::pt, return_from_interpreted_method); | |
1595 __ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::throwing_exception); | |
1596 __ br(Assembler::equal, false, Assembler::pt, throw_exception); | |
1597 __ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::do_osr); | |
1598 __ br(Assembler::equal, false, Assembler::pt, do_OSR); | |
1599 __ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::more_monitors); | |
1600 __ br(Assembler::notEqual, false, Assembler::pt, bad_msg); | |
1601 | |
1602 // Allocate more monitor space, shuffle expression stack.... | |
1603 | |
1604 generate_more_monitors(); | |
1605 | |
1606 // new monitor slot allocated, resume the interpreter. | |
1607 | |
1608 __ set((int)BytecodeInterpreter::got_monitors, L1_scratch); | |
1609 VALIDATE_STATE(G3_scratch, 5); | |
1610 __ ba(false, call_interpreter); | |
1611 __ delayed()->st(L1_scratch, STATE(_msg)); | |
1612 | |
1613 // uncommon trap needs to jump to here to enter the interpreter (re-execute current bytecode) | |
1614 unctrap_frame_manager_entry = __ pc(); | |
1615 | |
1616 // QQQ what message do we send | |
1617 | |
1618 __ ba(false, call_interpreter); | |
1619 __ delayed()->ld_ptr(STATE(_frame_bottom), SP); // restore to full stack frame | |
1620 | |
1621 //============================================================================= | |
1622 // Returning from a compiled method into a deopted method. The bytecode at the | |
1623 // bcp has completed. The result of the bytecode is in the native abi (the tosca | |
1624 // for the template based interpreter). Any stack space that was used by the | |
1625 // bytecode that has completed has been removed (e.g. parameters for an invoke) | |
1626 // so all that we have to do is place any pending result on the expression stack | |
1627 // and resume execution on the next bytecode. | |
1628 | |
1629 generate_deopt_handling(); | |
1630 | |
1631 // ready to resume the interpreter | |
1632 | |
1633 __ set((int)BytecodeInterpreter::deopt_resume, L1_scratch); | |
1634 __ ba(false, call_interpreter); | |
1635 __ delayed()->st(L1_scratch, STATE(_msg)); | |
1636 | |
1637 // Current frame has caught an exception we need to dispatch to the | |
1638 // handler. We can get here because a native interpreter frame caught | |
1639 // an exception in which case there is no handler and we must rethrow | |
1640 // If it is a vanilla interpreted frame the we simply drop into the | |
1641 // interpreter and let it do the lookup. | |
1642 | |
1643 Interpreter::_rethrow_exception_entry = __ pc(); | |
1644 | |
1645 Label return_with_exception; | |
1646 Label unwind_and_forward; | |
1647 | |
1648 // O0: exception | |
1649 // O7: throwing pc | |
1650 | |
1651 // We want exception in the thread no matter what we ultimately decide about frame type. | |
1652 | |
1653 Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset())); | |
1654 __ verify_thread(); | |
1655 __ st_ptr(O0, exception_addr); | |
1656 | |
1657 // get the methodOop | |
1658 __ ld_ptr(STATE(_method), G5_method); | |
1659 | |
1660 // if this current frame vanilla or native? | |
1661 | |
1662 __ ld(access_flags, Gtmp1); | |
1663 __ btst(JVM_ACC_NATIVE, Gtmp1); | |
1664 __ br(Assembler::zero, false, Assembler::pt, return_with_exception); // vanilla interpreted frame handle directly | |
1665 __ delayed()->nop(); | |
1666 | |
1667 // We drop thru to unwind a native interpreted frame with a pending exception | |
1668 // We jump here for the initial interpreter frame with exception pending | |
1669 // We unwind the current acivation and forward it to our caller. | |
1670 | |
1671 __ bind(unwind_and_forward); | |
1672 | |
1673 // Unwind frame and jump to forward exception. unwinding will place throwing pc in O7 | |
1674 // as expected by forward_exception. | |
1675 | |
1676 __ restore(FP, G0, SP); // unwind interpreter state frame | |
1677 __ br(Assembler::always, false, Assembler::pt, StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); | |
1678 __ delayed()->mov(I5_savedSP->after_restore(), SP); | |
1679 | |
1680 // Return point from a call which returns a result in the native abi | |
1681 // (c1/c2/jni-native). This result must be processed onto the java | |
1682 // expression stack. | |
1683 // | |
1684 // A pending exception may be present in which case there is no result present | |
1685 | |
1686 address return_from_native_method = __ pc(); | |
1687 | |
1688 VALIDATE_STATE(G3_scratch, 6); | |
1689 | |
1690 // Result if any is in native abi result (O0..O1/F0..F1). The java expression | |
1691 // stack is in the state that the calling convention left it. | |
1692 // Copy the result from native abi result and place it on java expression stack. | |
1693 | |
1694 // Current interpreter state is present in Lstate | |
1695 | |
1696 // Exception pending? | |
1697 | |
1698 __ ld_ptr(STATE(_frame_bottom), SP); // restore to full stack frame | |
1699 __ ld_ptr(exception_addr, Lscratch); // get any pending exception | |
1700 __ tst(Lscratch); // exception pending? | |
1701 __ brx(Assembler::notZero, false, Assembler::pt, return_with_exception); | |
1702 __ delayed()->nop(); | |
1703 | |
1704 // Process the native abi result to java expression stack | |
1705 | |
1706 __ ld_ptr(STATE(_result._to_call._callee), L4_scratch); // called method | |
1707 __ ld_ptr(STATE(_stack), L1_scratch); // get top of java expr stack | |
1708 __ lduh(L4_scratch, in_bytes(methodOopDesc::size_of_parameters_offset()), L2_scratch); // get parameter size | |
1709 __ sll(L2_scratch, LogBytesPerWord, L2_scratch ); // parameter size in bytes | |
1710 __ add(L1_scratch, L2_scratch, L1_scratch); // stack destination for result | |
123 | 1711 __ ld(L4_scratch, in_bytes(methodOopDesc::result_index_offset()), L3_scratch); // called method result type index |
0 | 1712 |
1713 // tosca is really just native abi | |
1714 __ set((intptr_t)CppInterpreter::_tosca_to_stack, L4_scratch); | |
1715 __ sll(L3_scratch, LogBytesPerWord, L3_scratch); | |
1716 __ ld_ptr(L4_scratch, L3_scratch, Lscratch); // get typed result converter address | |
1717 __ jmpl(Lscratch, G0, O7); // and convert it | |
1718 __ delayed()->nop(); | |
1719 | |
1720 // L1_scratch points to top of stack (prepushed) | |
1721 | |
1722 __ ba(false, resume_interpreter); | |
1723 __ delayed()->mov(L1_scratch, O1); | |
1724 | |
1725 // An exception is being caught on return to a vanilla interpreter frame. | |
1726 // Empty the stack and resume interpreter | |
1727 | |
1728 __ bind(return_with_exception); | |
1729 | |
1730 __ ld_ptr(STATE(_frame_bottom), SP); // restore to full stack frame | |
1731 __ ld_ptr(STATE(_stack_base), O1); // empty java expression stack | |
1732 __ ba(false, resume_interpreter); | |
1733 __ delayed()->sub(O1, wordSize, O1); // account for prepush | |
1734 | |
1735 // Return from interpreted method we return result appropriate to the caller (i.e. "recursive" | |
1736 // interpreter call, or native) and unwind this interpreter activation. | |
1737 // All monitors should be unlocked. | |
1738 | |
1739 __ bind(return_from_interpreted_method); | |
1740 | |
1741 VALIDATE_STATE(G3_scratch, 7); | |
1742 | |
1743 Label return_to_initial_caller; | |
1744 | |
1745 // Interpreted result is on the top of the completed activation expression stack. | |
1746 // We must return it to the top of the callers stack if caller was interpreted | |
1747 // otherwise we convert to native abi result and return to call_stub/c1/c2 | |
1748 // The caller's expression stack was truncated by the call however the current activation | |
1749 // has enough stuff on the stack that we have usable space there no matter what. The | |
1750 // other thing that makes it easy is that the top of the caller's stack is stored in STATE(_locals) | |
1751 // for the current activation | |
1752 | |
1753 __ ld_ptr(STATE(_prev_link), L1_scratch); | |
1754 __ ld_ptr(STATE(_method), L2_scratch); // get method just executed | |
123 | 1755 __ ld(L2_scratch, in_bytes(methodOopDesc::result_index_offset()), L2_scratch); |
0 | 1756 __ tst(L1_scratch); |
1757 __ brx(Assembler::zero, false, Assembler::pt, return_to_initial_caller); | |
1758 __ delayed()->sll(L2_scratch, LogBytesPerWord, L2_scratch); | |
1759 | |
1760 // Copy result to callers java stack | |
1761 | |
1762 __ set((intptr_t)CppInterpreter::_stack_to_stack, L4_scratch); | |
1763 __ ld_ptr(L4_scratch, L2_scratch, Lscratch); // get typed result converter address | |
1764 __ ld_ptr(STATE(_stack), O0); // current top (prepushed) | |
1765 __ ld_ptr(STATE(_locals), O1); // stack destination | |
1766 | |
1767 // O0 - will be source, O1 - will be destination (preserved) | |
1768 __ jmpl(Lscratch, G0, O7); // and convert it | |
1769 __ delayed()->add(O0, wordSize, O0); // get source (top of current expr stack) | |
1770 | |
1771 // O1 == &locals[0] | |
1772 | |
1773 // Result is now on caller's stack. Just unwind current activation and resume | |
1774 | |
1775 Label unwind_recursive_activation; | |
1776 | |
1777 | |
1778 __ bind(unwind_recursive_activation); | |
1779 | |
1780 // O1 == &locals[0] (really callers stacktop) for activation now returning | |
1781 // returning to interpreter method from "recursive" interpreter call | |
1782 // result converter left O1 pointing to top of the( prepushed) java stack for method we are returning | |
1783 // to. Now all we must do is unwind the state from the completed call | |
1784 | |
1785 // Must restore stack | |
1786 VALIDATE_STATE(G3_scratch, 8); | |
1787 | |
1788 // Return to interpreter method after a method call (interpreted/native/c1/c2) has completed. | |
1789 // Result if any is already on the caller's stack. All we must do now is remove the now dead | |
1790 // frame and tell interpreter to resume. | |
1791 | |
1792 | |
1793 __ mov(O1, I1); // pass back new stack top across activation | |
1794 // POP FRAME HERE ================================== | |
1795 __ restore(FP, G0, SP); // unwind interpreter state frame | |
1796 __ ld_ptr(STATE(_frame_bottom), SP); // restore to full stack frame | |
1797 | |
1798 | |
1799 // Resume the interpreter. The current frame contains the current interpreter | |
1800 // state object. | |
1801 // | |
1802 // O1 == new java stack pointer | |
1803 | |
1804 __ bind(resume_interpreter); | |
1805 VALIDATE_STATE(G3_scratch, 10); | |
1806 | |
1807 // A frame we have already used before so no need to bang stack so use call_interpreter_2 entry | |
1808 | |
1809 __ set((int)BytecodeInterpreter::method_resume, L1_scratch); | |
1810 __ st(L1_scratch, STATE(_msg)); | |
1811 __ ba(false, call_interpreter_2); | |
1812 __ delayed()->st_ptr(O1, STATE(_stack)); | |
1813 | |
1814 | |
1815 // Fast accessor methods share this entry point. | |
1816 // This works because frame manager is in the same codelet | |
1817 // This can either be an entry via call_stub/c1/c2 or a recursive interpreter call | |
1818 // we need to do a little register fixup here once we distinguish the two of them | |
1819 if (UseFastAccessorMethods && !synchronized) { | |
1820 // Call stub_return address still in O7 | |
1821 __ bind(fast_accessor_slow_entry_path); | |
1822 __ set((intptr_t)return_from_native_method - 8, Gtmp1); | |
1823 __ cmp(Gtmp1, O7); // returning to interpreter? | |
1824 __ brx(Assembler::equal, true, Assembler::pt, re_dispatch); // yep | |
1825 __ delayed()->nop(); | |
1826 __ ba(false, re_dispatch); | |
1827 __ delayed()->mov(G0, prevState); // initial entry | |
1828 | |
1829 } | |
1830 | |
1831 // interpreter returning to native code (call_stub/c1/c2) | |
1832 // convert result and unwind initial activation | |
1833 // L2_scratch - scaled result type index | |
1834 | |
1835 __ bind(return_to_initial_caller); | |
1836 | |
1837 __ set((intptr_t)CppInterpreter::_stack_to_native_abi, L4_scratch); | |
1838 __ ld_ptr(L4_scratch, L2_scratch, Lscratch); // get typed result converter address | |
1839 __ ld_ptr(STATE(_stack), O0); // current top (prepushed) | |
1840 __ jmpl(Lscratch, G0, O7); // and convert it | |
1841 __ delayed()->add(O0, wordSize, O0); // get source (top of current expr stack) | |
1842 | |
1843 Label unwind_initial_activation; | |
1844 __ bind(unwind_initial_activation); | |
1845 | |
1846 // RETURN TO CALL_STUB/C1/C2 code (result if any in I0..I1/(F0/..F1) | |
1847 // we can return here with an exception that wasn't handled by interpreted code | |
1848 // how does c1/c2 see it on return? | |
1849 | |
1850 // compute resulting sp before/after args popped depending upon calling convention | |
1851 // __ ld_ptr(STATE(_saved_sp), Gtmp1); | |
1852 // | |
1853 // POP FRAME HERE ================================== | |
1854 __ restore(FP, G0, SP); | |
1855 __ retl(); | |
1856 __ delayed()->mov(I5_savedSP->after_restore(), SP); | |
1857 | |
1858 // OSR request, unwind the current frame and transfer to the OSR entry | |
1859 // and enter OSR nmethod | |
1860 | |
1861 __ bind(do_OSR); | |
1862 Label remove_initial_frame; | |
1863 __ ld_ptr(STATE(_prev_link), L1_scratch); | |
1864 __ ld_ptr(STATE(_result._osr._osr_buf), G1_scratch); | |
1865 | |
1866 // We are going to pop this frame. Is there another interpreter frame underneath | |
1867 // it or is it callstub/compiled? | |
1868 | |
1869 __ tst(L1_scratch); | |
1870 __ brx(Assembler::zero, false, Assembler::pt, remove_initial_frame); | |
1871 __ delayed()->ld_ptr(STATE(_result._osr._osr_entry), G3_scratch); | |
1872 | |
1873 // Frame underneath is an interpreter frame simply unwind | |
1874 // POP FRAME HERE ================================== | |
1875 __ restore(FP, G0, SP); // unwind interpreter state frame | |
1876 __ mov(I5_savedSP->after_restore(), SP); | |
1877 | |
1878 // Since we are now calling native need to change our "return address" from the | |
1879 // dummy RecursiveInterpreterActivation to a return from native | |
1880 | |
1881 __ set((intptr_t)return_from_native_method - 8, O7); | |
1882 | |
1883 __ jmpl(G3_scratch, G0, G0); | |
1884 __ delayed()->mov(G1_scratch, O0); | |
1885 | |
1886 __ bind(remove_initial_frame); | |
1887 | |
1888 // POP FRAME HERE ================================== | |
1889 __ restore(FP, G0, SP); | |
1890 __ mov(I5_savedSP->after_restore(), SP); | |
1891 __ jmpl(G3_scratch, G0, G0); | |
1892 __ delayed()->mov(G1_scratch, O0); | |
1893 | |
1894 // Call a new method. All we do is (temporarily) trim the expression stack | |
1895 // push a return address to bring us back to here and leap to the new entry. | |
1896 // At this point we have a topmost frame that was allocated by the frame manager | |
1897 // which contains the current method interpreted state. We trim this frame | |
1898 // of excess java expression stack entries and then recurse. | |
1899 | |
1900 __ bind(call_method); | |
1901 | |
1902 // stack points to next free location and not top element on expression stack | |
1903 // method expects sp to be pointing to topmost element | |
1904 | |
1905 __ ld_ptr(STATE(_thread), G2_thread); | |
1906 __ ld_ptr(STATE(_result._to_call._callee), G5_method); | |
1907 | |
1908 | |
1909 // SP already takes in to account the 2 extra words we use for slop | |
1910 // when we call a "static long no_params()" method. So if | |
1911 // we trim back sp by the amount of unused java expression stack | |
1912 // there will be automagically the 2 extra words we need. | |
1913 // We also have to worry about keeping SP aligned. | |
1914 | |
1915 __ ld_ptr(STATE(_stack), Gargs); | |
1916 __ ld_ptr(STATE(_stack_limit), L1_scratch); | |
1917 | |
1918 // compute the unused java stack size | |
1919 __ sub(Gargs, L1_scratch, L2_scratch); // compute unused space | |
1920 | |
123 | 1921 // Round down the unused space to that stack is always 16-byte aligned |
1922 // by making the unused space a multiple of the size of two longs. | |
0 | 1923 |
123 | 1924 __ and3(L2_scratch, -2*BytesPerLong, L2_scratch); |
0 | 1925 |
1926 // Now trim the stack | |
1927 __ add(SP, L2_scratch, SP); | |
1928 | |
1929 | |
1930 // Now point to the final argument (account for prepush) | |
1931 __ add(Gargs, wordSize, Gargs); | |
1932 #ifdef ASSERT | |
1933 // Make sure we have space for the window | |
1934 __ sub(Gargs, SP, L1_scratch); | |
1935 __ cmp(L1_scratch, 16*wordSize); | |
1936 { | |
1937 Label skip; | |
1938 __ brx(Assembler::greaterEqual, false, Assembler::pt, skip); | |
1939 __ delayed()->nop(); | |
1940 __ stop("killed stack"); | |
1941 __ bind(skip); | |
1942 } | |
1943 #endif // ASSERT | |
1944 | |
1945 // Create a new frame where we can store values that make it look like the interpreter | |
1946 // really recursed. | |
1947 | |
1948 // prepare to recurse or call specialized entry | |
1949 | |
1950 // First link the registers we need | |
1951 | |
1952 // make the pc look good in debugger | |
1953 __ set(CAST_FROM_FN_PTR(intptr_t, RecursiveInterpreterActivation), O7); | |
1954 // argument too | |
1955 __ mov(Lstate, I0); | |
1956 | |
1957 // Record our sending SP | |
1958 __ mov(SP, O5_savedSP); | |
1959 | |
1960 __ ld_ptr(STATE(_result._to_call._callee_entry_point), L2_scratch); | |
1961 __ set((intptr_t) entry_point, L1_scratch); | |
1962 __ cmp(L1_scratch, L2_scratch); | |
1963 __ brx(Assembler::equal, false, Assembler::pt, re_dispatch); | |
1964 __ delayed()->mov(Lstate, prevState); // link activations | |
1965 | |
1966 // method uses specialized entry, push a return so we look like call stub setup | |
1967 // this path will handle fact that result is returned in registers and not | |
1968 // on the java stack. | |
1969 | |
1970 __ set((intptr_t)return_from_native_method - 8, O7); | |
1971 __ jmpl(L2_scratch, G0, G0); // Do specialized entry | |
1972 __ delayed()->nop(); | |
1973 | |
1974 // | |
1975 // Bad Message from interpreter | |
1976 // | |
1977 __ bind(bad_msg); | |
1978 __ stop("Bad message from interpreter"); | |
1979 | |
1980 // Interpreted method "returned" with an exception pass it on... | |
1981 // Pass result, unwind activation and continue/return to interpreter/call_stub | |
1982 // We handle result (if any) differently based on return to interpreter or call_stub | |
1983 | |
1984 __ bind(throw_exception); | |
1985 __ ld_ptr(STATE(_prev_link), L1_scratch); | |
1986 __ tst(L1_scratch); | |
1987 __ brx(Assembler::zero, false, Assembler::pt, unwind_and_forward); | |
1988 __ delayed()->nop(); | |
1989 | |
1990 __ ld_ptr(STATE(_locals), O1); // get result of popping callee's args | |
1991 __ ba(false, unwind_recursive_activation); | |
1992 __ delayed()->nop(); | |
1993 | |
1994 interpreter_frame_manager = entry_point; | |
1995 return entry_point; | |
1996 } | |
1997 | |
1998 InterpreterGenerator::InterpreterGenerator(StubQueue* code) | |
1999 : CppInterpreterGenerator(code) { | |
2000 generate_all(); // down here so it can be "virtual" | |
2001 } | |
2002 | |
2003 | |
2004 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) { | |
2005 | |
2006 // Figure out the size of an interpreter frame (in words) given that we have a fully allocated | |
2007 // expression stack, the callee will have callee_extra_locals (so we can account for | |
2008 // frame extension) and monitor_size for monitors. Basically we need to calculate | |
2009 // this exactly like generate_fixed_frame/generate_compute_interpreter_state. | |
2010 // | |
2011 // | |
2012 // The big complicating thing here is that we must ensure that the stack stays properly | |
2013 // aligned. This would be even uglier if monitor size wasn't modulo what the stack | |
2014 // needs to be aligned for). We are given that the sp (fp) is already aligned by | |
2015 // the caller so we must ensure that it is properly aligned for our callee. | |
2016 // | |
2017 // Ths c++ interpreter always makes sure that we have a enough extra space on the | |
2018 // stack at all times to deal with the "stack long no_params()" method issue. This | |
2019 // is "slop_factor" here. | |
2020 const int slop_factor = 2; | |
2021 | |
2022 const int fixed_size = sizeof(BytecodeInterpreter)/wordSize + // interpreter state object | |
2023 frame::memory_parameter_word_sp_offset; // register save area + param window | |
710 | 2024 const int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries(); |
0 | 2025 return (round_to(max_stack + |
710 | 2026 extra_stack + |
0 | 2027 slop_factor + |
2028 fixed_size + | |
2029 monitor_size + | |
2030 (callee_extra_locals * Interpreter::stackElementWords()), WordsPerLong)); | |
2031 | |
2032 } | |
2033 | |
2034 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) { | |
2035 | |
2036 // See call_stub code | |
2037 int call_stub_size = round_to(7 + frame::memory_parameter_word_sp_offset, | |
2038 WordsPerLong); // 7 + register save area | |
2039 | |
2040 // Save space for one monitor to get into the interpreted method in case | |
2041 // the method is synchronized | |
2042 int monitor_size = method->is_synchronized() ? | |
2043 1*frame::interpreter_frame_monitor_size() : 0; | |
2044 return size_activation_helper(method->max_locals(), method->max_stack(), | |
2045 monitor_size) + call_stub_size; | |
2046 } | |
2047 | |
2048 void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill, | |
2049 frame* caller, | |
2050 frame* current, | |
2051 methodOop method, | |
2052 intptr_t* locals, | |
2053 intptr_t* stack, | |
2054 intptr_t* stack_base, | |
2055 intptr_t* monitor_base, | |
2056 intptr_t* frame_bottom, | |
2057 bool is_top_frame | |
2058 ) | |
2059 { | |
2060 // What about any vtable? | |
2061 // | |
2062 to_fill->_thread = JavaThread::current(); | |
2063 // This gets filled in later but make it something recognizable for now | |
2064 to_fill->_bcp = method->code_base(); | |
2065 to_fill->_locals = locals; | |
2066 to_fill->_constants = method->constants()->cache(); | |
2067 to_fill->_method = method; | |
2068 to_fill->_mdx = NULL; | |
2069 to_fill->_stack = stack; | |
2070 if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution() ) { | |
2071 to_fill->_msg = deopt_resume2; | |
2072 } else { | |
2073 to_fill->_msg = method_resume; | |
2074 } | |
2075 to_fill->_result._to_call._bcp_advance = 0; | |
2076 to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone | |
2077 to_fill->_result._to_call._callee = NULL; // doesn't matter to anyone | |
2078 to_fill->_prev_link = NULL; | |
2079 | |
2080 // Fill in the registers for the frame | |
2081 | |
2082 // Need to install _sender_sp. Actually not too hard in C++! | |
2083 // When the skeletal frames are layed out we fill in a value | |
2084 // for _sender_sp. That value is only correct for the oldest | |
2085 // skeletal frame constructed (because there is only a single | |
2086 // entry for "caller_adjustment". While the skeletal frames | |
2087 // exist that is good enough. We correct that calculation | |
2088 // here and get all the frames correct. | |
2089 | |
2090 // to_fill->_sender_sp = locals - (method->size_of_parameters() - 1); | |
2091 | |
2092 *current->register_addr(Lstate) = (intptr_t) to_fill; | |
2093 // skeletal already places a useful value here and this doesn't account | |
2094 // for alignment so don't bother. | |
2095 // *current->register_addr(I5_savedSP) = (intptr_t) locals - (method->size_of_parameters() - 1); | |
2096 | |
2097 if (caller->is_interpreted_frame()) { | |
2098 interpreterState prev = caller->get_interpreterState(); | |
2099 to_fill->_prev_link = prev; | |
2100 // Make the prev callee look proper | |
2101 prev->_result._to_call._callee = method; | |
2102 if (*prev->_bcp == Bytecodes::_invokeinterface) { | |
2103 prev->_result._to_call._bcp_advance = 5; | |
2104 } else { | |
2105 prev->_result._to_call._bcp_advance = 3; | |
2106 } | |
2107 } | |
2108 to_fill->_oop_temp = NULL; | |
2109 to_fill->_stack_base = stack_base; | |
2110 // Need +1 here because stack_base points to the word just above the first expr stack entry | |
2111 // and stack_limit is supposed to point to the word just below the last expr stack entry. | |
2112 // See generate_compute_interpreter_state. | |
710 | 2113 int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries(); |
2114 to_fill->_stack_limit = stack_base - (method->max_stack() + 1 + extra_stack); | |
0 | 2115 to_fill->_monitor_base = (BasicObjectLock*) monitor_base; |
2116 | |
2117 // sparc specific | |
2118 to_fill->_frame_bottom = frame_bottom; | |
2119 to_fill->_self_link = to_fill; | |
2120 #ifdef ASSERT | |
2121 to_fill->_native_fresult = 123456.789; | |
2122 to_fill->_native_lresult = CONST64(0xdeadcafedeafcafe); | |
2123 #endif | |
2124 } | |
2125 | |
2126 void BytecodeInterpreter::pd_layout_interpreterState(interpreterState istate, address last_Java_pc, intptr_t* last_Java_fp) { | |
2127 istate->_last_Java_pc = (intptr_t*) last_Java_pc; | |
2128 } | |
2129 | |
2130 | |
2131 int AbstractInterpreter::layout_activation(methodOop method, | |
2132 int tempcount, // Number of slots on java expression stack in use | |
2133 int popframe_extra_args, | |
2134 int moncount, // Number of active monitors | |
2135 int callee_param_size, | |
2136 int callee_locals_size, | |
2137 frame* caller, | |
2138 frame* interpreter_frame, | |
2139 bool is_top_frame) { | |
2140 | |
2141 assert(popframe_extra_args == 0, "NEED TO FIX"); | |
2142 // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state() | |
2143 // does as far as allocating an interpreter frame. | |
2144 // If interpreter_frame!=NULL, set up the method, locals, and monitors. | |
2145 // The frame interpreter_frame, if not NULL, is guaranteed to be the right size, | |
2146 // as determined by a previous call to this method. | |
2147 // It is also guaranteed to be walkable even though it is in a skeletal state | |
2148 // NOTE: return size is in words not bytes | |
2149 // NOTE: tempcount is the current size of the java expression stack. For top most | |
2150 // frames we will allocate a full sized expression stack and not the curback | |
2151 // version that non-top frames have. | |
2152 | |
2153 // Calculate the amount our frame will be adjust by the callee. For top frame | |
2154 // this is zero. | |
2155 | |
2156 // NOTE: ia64 seems to do this wrong (or at least backwards) in that it | |
2157 // calculates the extra locals based on itself. Not what the callee does | |
2158 // to it. So it ignores last_frame_adjust value. Seems suspicious as far | |
2159 // as getting sender_sp correct. | |
2160 | |
2161 int extra_locals_size = callee_locals_size - callee_param_size; | |
2162 int monitor_size = (sizeof(BasicObjectLock) * moncount) / wordSize; | |
2163 int full_frame_words = size_activation_helper(extra_locals_size, method->max_stack(), monitor_size); | |
2164 int short_frame_words = size_activation_helper(extra_locals_size, method->max_stack(), monitor_size); | |
2165 int frame_words = is_top_frame ? full_frame_words : short_frame_words; | |
2166 | |
2167 | |
2168 /* | |
2169 if we actually have a frame to layout we must now fill in all the pieces. This means both | |
2170 the interpreterState and the registers. | |
2171 */ | |
2172 if (interpreter_frame != NULL) { | |
2173 | |
2174 // MUCHO HACK | |
2175 | |
2176 intptr_t* frame_bottom = interpreter_frame->sp() - (full_frame_words - frame_words); | |
123 | 2177 // 'interpreter_frame->sp()' is unbiased while 'frame_bottom' must be a biased value in 64bit mode. |
2178 assert(((intptr_t)frame_bottom & 0xf) == 0, "SP biased in layout_activation"); | |
2179 frame_bottom = (intptr_t*)((intptr_t)frame_bottom - STACK_BIAS); | |
0 | 2180 |
2181 /* Now fillin the interpreterState object */ | |
2182 | |
2183 interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter)); | |
2184 | |
2185 | |
2186 intptr_t* locals; | |
2187 | |
2188 // Calculate the postion of locals[0]. This is painful because of | |
2189 // stack alignment (same as ia64). The problem is that we can | |
2190 // not compute the location of locals from fp(). fp() will account | |
2191 // for the extra locals but it also accounts for aligning the stack | |
2192 // and we can't determine if the locals[0] was misaligned but max_locals | |
2193 // was enough to have the | |
2194 // calculate postion of locals. fp already accounts for extra locals. | |
2195 // +2 for the static long no_params() issue. | |
2196 | |
2197 if (caller->is_interpreted_frame()) { | |
2198 // locals must agree with the caller because it will be used to set the | |
2199 // caller's tos when we return. | |
2200 interpreterState prev = caller->get_interpreterState(); | |
2201 // stack() is prepushed. | |
2202 locals = prev->stack() + method->size_of_parameters(); | |
2203 } else { | |
2204 // Lay out locals block in the caller adjacent to the register window save area. | |
2205 // | |
2206 // Compiled frames do not allocate a varargs area which is why this if | |
2207 // statement is needed. | |
2208 // | |
2209 intptr_t* fp = interpreter_frame->fp(); | |
2210 int local_words = method->max_locals() * Interpreter::stackElementWords(); | |
2211 | |
2212 if (caller->is_compiled_frame()) { | |
2213 locals = fp + frame::register_save_words + local_words - 1; | |
2214 } else { | |
2215 locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1; | |
2216 } | |
2217 | |
2218 } | |
2219 // END MUCHO HACK | |
2220 | |
2221 intptr_t* monitor_base = (intptr_t*) cur_state; | |
2222 intptr_t* stack_base = monitor_base - monitor_size; | |
2223 /* +1 because stack is always prepushed */ | |
2224 intptr_t* stack = stack_base - (tempcount + 1); | |
2225 | |
2226 | |
2227 BytecodeInterpreter::layout_interpreterState(cur_state, | |
2228 caller, | |
2229 interpreter_frame, | |
2230 method, | |
2231 locals, | |
2232 stack, | |
2233 stack_base, | |
2234 monitor_base, | |
2235 frame_bottom, | |
2236 is_top_frame); | |
2237 | |
2238 BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp()); | |
2239 | |
2240 } | |
2241 return frame_words; | |
2242 } | |
2243 | |
2244 #endif // CC_INTERP |