comparison src/cpu/ppc/vm/interpreter_ppc.cpp @ 14408:ec28f9c041ff

8019972: PPC64 (part 9): platform files for interpreter only VM. Summary: With this change the HotSpot core build works on Linux/PPC64. The VM succesfully executes simple test programs. Reviewed-by: kvn
author goetz
date Fri, 02 Aug 2013 16:46:45 +0200
parents
children 67fa91961822
comparison
equal deleted inserted replaced
14407:94c202aa2646 14408:ec28f9c041ff
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2012, 2013 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/assembler.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "interpreter/bytecodeHistogram.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "interpreter/interpreterGenerator.hpp"
32 #include "interpreter/interpreterRuntime.hpp"
33 #include "interpreter/templateTable.hpp"
34 #include "oops/arrayOop.hpp"
35 #include "oops/methodData.hpp"
36 #include "oops/method.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "prims/jvmtiExport.hpp"
39 #include "prims/jvmtiThreadState.hpp"
40 #include "prims/methodHandles.hpp"
41 #include "runtime/arguments.hpp"
42 #include "runtime/deoptimization.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "runtime/synchronizer.hpp"
47 #include "runtime/timer.hpp"
48 #include "runtime/vframeArray.hpp"
49 #include "utilities/debug.hpp"
50 #ifdef COMPILER1
51 #include "c1/c1_Runtime1.hpp"
52 #endif
53
54 #ifndef CC_INTERP
55 #error "CC_INTERP must be defined on PPC"
56 #endif
57
58 #define __ _masm->
59
60 #ifdef PRODUCT
61 #define BLOCK_COMMENT(str) // nothing
62 #else
63 #define BLOCK_COMMENT(str) __ block_comment(str)
64 #endif
65
66 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
67
68 int AbstractInterpreter::BasicType_as_index(BasicType type) {
69 int i = 0;
70 switch (type) {
71 case T_BOOLEAN: i = 0; break;
72 case T_CHAR : i = 1; break;
73 case T_BYTE : i = 2; break;
74 case T_SHORT : i = 3; break;
75 case T_INT : i = 4; break;
76 case T_LONG : i = 5; break;
77 case T_VOID : i = 6; break;
78 case T_FLOAT : i = 7; break;
79 case T_DOUBLE : i = 8; break;
80 case T_OBJECT : i = 9; break;
81 case T_ARRAY : i = 9; break;
82 default : ShouldNotReachHere();
83 }
84 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
85 return i;
86 }
87
88 address AbstractInterpreterGenerator::generate_slow_signature_handler() {
89 // Slow_signature handler that respects the PPC C calling conventions.
90 //
91 // We get called by the native entry code with our output register
92 // area == 8. First we call InterpreterRuntime::get_result_handler
93 // to copy the pointer to the signature string temporarily to the
94 // first C-argument and to return the result_handler in
95 // R3_RET. Since native_entry will copy the jni-pointer to the
96 // first C-argument slot later on, it is OK to occupy this slot
97 // temporarilly. Then we copy the argument list on the java
98 // expression stack into native varargs format on the native stack
99 // and load arguments into argument registers. Integer arguments in
100 // the varargs vector will be sign-extended to 8 bytes.
101 //
102 // On entry:
103 // R3_ARG1 - intptr_t* Address of java argument list in memory.
104 // R15_prev_state - BytecodeInterpreter* Address of interpreter state for
105 // this method
106 // R19_method
107 //
108 // On exit (just before return instruction):
109 // R3_RET - contains the address of the result_handler.
110 // R4_ARG2 - is not updated for static methods and contains "this" otherwise.
111 // R5_ARG3-R10_ARG8: - When the (i-2)th Java argument is not of type float or double,
112 // ARGi contains this argument. Otherwise, ARGi is not updated.
113 // F1_ARG1-F13_ARG13 - contain the first 13 arguments of type float or double.
114
115 const int LogSizeOfTwoInstructions = 3;
116
117 // FIXME: use Argument:: GL: Argument names different numbers!
118 const int max_fp_register_arguments = 13;
119 const int max_int_register_arguments = 6; // first 2 are reserved
120
121 const Register arg_java = R21_tmp1;
122 const Register arg_c = R22_tmp2;
123 const Register signature = R23_tmp3; // is string
124 const Register sig_byte = R24_tmp4;
125 const Register fpcnt = R25_tmp5;
126 const Register argcnt = R26_tmp6;
127 const Register intSlot = R27_tmp7;
128 const Register target_sp = R28_tmp8;
129 const FloatRegister floatSlot = F0;
130
131 address entry = __ emit_fd();
132
133 __ save_LR_CR(R0);
134 __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
135 // We use target_sp for storing arguments in the C frame.
136 __ mr(target_sp, R1_SP);
137 __ push_frame_abi112_nonvolatiles(0, R11_scratch1);
138
139 __ mr(arg_java, R3_ARG1);
140
141 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_signature), R16_thread, R19_method);
142
143 // Signature is in R3_RET. Signature is callee saved.
144 __ mr(signature, R3_RET);
145
146 // Reload method, it may have moved.
147 #ifdef CC_INTERP
148 __ ld(R19_method, state_(_method));
149 #else
150 __ unimplemented("slow signature handler 1");
151 #endif
152
153 // Get the result handler.
154 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method);
155
156 // Reload method, it may have moved.
157 #ifdef CC_INTERP
158 __ ld(R19_method, state_(_method));
159 #else
160 __ unimplemented("slow signature handler 2");
161 #endif
162
163 {
164 Label L;
165 // test if static
166 // _access_flags._flags must be at offset 0.
167 // TODO PPC port: requires change in shared code.
168 //assert(in_bytes(AccessFlags::flags_offset()) == 0,
169 // "MethodOopDesc._access_flags == MethodOopDesc._access_flags._flags");
170 // _access_flags must be a 32 bit value.
171 assert(sizeof(AccessFlags) == 4, "wrong size");
172 __ lwa(R11_scratch1/*access_flags*/, method_(access_flags));
173 // testbit with condition register.
174 __ testbitdi(CCR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT);
175 __ btrue(CCR0, L);
176 // For non-static functions, pass "this" in R4_ARG2 and copy it
177 // to 2nd C-arg slot.
178 // We need to box the Java object here, so we use arg_java
179 // (address of current Java stack slot) as argument and don't
180 // dereference it as in case of ints, floats, etc.
181 __ mr(R4_ARG2, arg_java);
182 __ addi(arg_java, arg_java, -BytesPerWord);
183 __ std(R4_ARG2, _abi(carg_2), target_sp);
184 __ bind(L);
185 }
186
187 // Will be incremented directly after loop_start. argcnt=0
188 // corresponds to 3rd C argument.
189 __ li(argcnt, -1);
190 // arg_c points to 3rd C argument
191 __ addi(arg_c, target_sp, _abi(carg_3));
192 // no floating-point args parsed so far
193 __ li(fpcnt, 0);
194
195 Label move_intSlot_to_ARG, move_floatSlot_to_FARG;
196 Label loop_start, loop_end;
197 Label do_int, do_long, do_float, do_double, do_dontreachhere, do_object, do_array, do_boxed;
198
199 // signature points to '(' at entry
200 #ifdef ASSERT
201 __ lbz(sig_byte, 0, signature);
202 __ cmplwi(CCR0, sig_byte, '(');
203 __ bne(CCR0, do_dontreachhere);
204 #endif
205
206 __ bind(loop_start);
207
208 __ addi(argcnt, argcnt, 1);
209 __ lbzu(sig_byte, 1, signature);
210
211 __ cmplwi(CCR0, sig_byte, ')'); // end of signature
212 __ beq(CCR0, loop_end);
213
214 __ cmplwi(CCR0, sig_byte, 'B'); // byte
215 __ beq(CCR0, do_int);
216
217 __ cmplwi(CCR0, sig_byte, 'C'); // char
218 __ beq(CCR0, do_int);
219
220 __ cmplwi(CCR0, sig_byte, 'D'); // double
221 __ beq(CCR0, do_double);
222
223 __ cmplwi(CCR0, sig_byte, 'F'); // float
224 __ beq(CCR0, do_float);
225
226 __ cmplwi(CCR0, sig_byte, 'I'); // int
227 __ beq(CCR0, do_int);
228
229 __ cmplwi(CCR0, sig_byte, 'J'); // long
230 __ beq(CCR0, do_long);
231
232 __ cmplwi(CCR0, sig_byte, 'S'); // short
233 __ beq(CCR0, do_int);
234
235 __ cmplwi(CCR0, sig_byte, 'Z'); // boolean
236 __ beq(CCR0, do_int);
237
238 __ cmplwi(CCR0, sig_byte, 'L'); // object
239 __ beq(CCR0, do_object);
240
241 __ cmplwi(CCR0, sig_byte, '['); // array
242 __ beq(CCR0, do_array);
243
244 // __ cmplwi(CCR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type
245 // __ beq(CCR0, do_void);
246
247 __ bind(do_dontreachhere);
248
249 __ unimplemented("ShouldNotReachHere in slow_signature_handler", 120);
250
251 __ bind(do_array);
252
253 {
254 Label start_skip, end_skip;
255
256 __ bind(start_skip);
257 __ lbzu(sig_byte, 1, signature);
258 __ cmplwi(CCR0, sig_byte, '[');
259 __ beq(CCR0, start_skip); // skip further brackets
260 __ cmplwi(CCR0, sig_byte, '9');
261 __ bgt(CCR0, end_skip); // no optional size
262 __ cmplwi(CCR0, sig_byte, '0');
263 __ bge(CCR0, start_skip); // skip optional size
264 __ bind(end_skip);
265
266 __ cmplwi(CCR0, sig_byte, 'L');
267 __ beq(CCR0, do_object); // for arrays of objects, the name of the object must be skipped
268 __ b(do_boxed); // otherwise, go directly to do_boxed
269 }
270
271 __ bind(do_object);
272 {
273 Label L;
274 __ bind(L);
275 __ lbzu(sig_byte, 1, signature);
276 __ cmplwi(CCR0, sig_byte, ';');
277 __ bne(CCR0, L);
278 }
279 // Need to box the Java object here, so we use arg_java (address of
280 // current Java stack slot) as argument and don't dereference it as
281 // in case of ints, floats, etc.
282 Label do_null;
283 __ bind(do_boxed);
284 __ ld(R0,0, arg_java);
285 __ cmpdi(CCR0, R0, 0);
286 __ li(intSlot,0);
287 __ beq(CCR0, do_null);
288 __ mr(intSlot, arg_java);
289 __ bind(do_null);
290 __ std(intSlot, 0, arg_c);
291 __ addi(arg_java, arg_java, -BytesPerWord);
292 __ addi(arg_c, arg_c, BytesPerWord);
293 __ cmplwi(CCR0, argcnt, max_int_register_arguments);
294 __ blt(CCR0, move_intSlot_to_ARG);
295 __ b(loop_start);
296
297 __ bind(do_int);
298 __ lwa(intSlot, 0, arg_java);
299 __ std(intSlot, 0, arg_c);
300 __ addi(arg_java, arg_java, -BytesPerWord);
301 __ addi(arg_c, arg_c, BytesPerWord);
302 __ cmplwi(CCR0, argcnt, max_int_register_arguments);
303 __ blt(CCR0, move_intSlot_to_ARG);
304 __ b(loop_start);
305
306 __ bind(do_long);
307 __ ld(intSlot, -BytesPerWord, arg_java);
308 __ std(intSlot, 0, arg_c);
309 __ addi(arg_java, arg_java, - 2 * BytesPerWord);
310 __ addi(arg_c, arg_c, BytesPerWord);
311 __ cmplwi(CCR0, argcnt, max_int_register_arguments);
312 __ blt(CCR0, move_intSlot_to_ARG);
313 __ b(loop_start);
314
315 __ bind(do_float);
316 __ lfs(floatSlot, 0, arg_java);
317 #if defined(LINUX)
318 __ stfs(floatSlot, 4, arg_c);
319 #elif defined(AIX)
320 __ stfs(floatSlot, 0, arg_c);
321 #else
322 #error "unknown OS"
323 #endif
324 __ addi(arg_java, arg_java, -BytesPerWord);
325 __ addi(arg_c, arg_c, BytesPerWord);
326 __ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
327 __ blt(CCR0, move_floatSlot_to_FARG);
328 __ b(loop_start);
329
330 __ bind(do_double);
331 __ lfd(floatSlot, - BytesPerWord, arg_java);
332 __ stfd(floatSlot, 0, arg_c);
333 __ addi(arg_java, arg_java, - 2 * BytesPerWord);
334 __ addi(arg_c, arg_c, BytesPerWord);
335 __ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
336 __ blt(CCR0, move_floatSlot_to_FARG);
337 __ b(loop_start);
338
339 __ bind(loop_end);
340
341 __ pop_frame();
342 __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
343 __ restore_LR_CR(R0);
344
345 __ blr();
346
347 Label move_int_arg, move_float_arg;
348 __ bind(move_int_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions)
349 __ mr(R5_ARG3, intSlot); __ b(loop_start);
350 __ mr(R6_ARG4, intSlot); __ b(loop_start);
351 __ mr(R7_ARG5, intSlot); __ b(loop_start);
352 __ mr(R8_ARG6, intSlot); __ b(loop_start);
353 __ mr(R9_ARG7, intSlot); __ b(loop_start);
354 __ mr(R10_ARG8, intSlot); __ b(loop_start);
355
356 __ bind(move_float_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions)
357 __ fmr(F1_ARG1, floatSlot); __ b(loop_start);
358 __ fmr(F2_ARG2, floatSlot); __ b(loop_start);
359 __ fmr(F3_ARG3, floatSlot); __ b(loop_start);
360 __ fmr(F4_ARG4, floatSlot); __ b(loop_start);
361 __ fmr(F5_ARG5, floatSlot); __ b(loop_start);
362 __ fmr(F6_ARG6, floatSlot); __ b(loop_start);
363 __ fmr(F7_ARG7, floatSlot); __ b(loop_start);
364 __ fmr(F8_ARG8, floatSlot); __ b(loop_start);
365 __ fmr(F9_ARG9, floatSlot); __ b(loop_start);
366 __ fmr(F10_ARG10, floatSlot); __ b(loop_start);
367 __ fmr(F11_ARG11, floatSlot); __ b(loop_start);
368 __ fmr(F12_ARG12, floatSlot); __ b(loop_start);
369 __ fmr(F13_ARG13, floatSlot); __ b(loop_start);
370
371 __ bind(move_intSlot_to_ARG);
372 __ sldi(R0, argcnt, LogSizeOfTwoInstructions);
373 __ load_const(R11_scratch1, move_int_arg); // Label must be bound here.
374 __ add(R11_scratch1, R0, R11_scratch1);
375 __ mtctr(R11_scratch1/*branch_target*/);
376 __ bctr();
377 __ bind(move_floatSlot_to_FARG);
378 __ sldi(R0, fpcnt, LogSizeOfTwoInstructions);
379 __ addi(fpcnt, fpcnt, 1);
380 __ load_const(R11_scratch1, move_float_arg); // Label must be bound here.
381 __ add(R11_scratch1, R0, R11_scratch1);
382 __ mtctr(R11_scratch1/*branch_target*/);
383 __ bctr();
384
385 return entry;
386 }
387
388 address AbstractInterpreterGenerator::generate_result_handler_for(BasicType type) {
389 //
390 // Registers alive
391 // R3_RET
392 // LR
393 //
394 // Registers updated
395 // R3_RET
396 //
397
398 Label done;
399 Label is_false;
400
401 address entry = __ pc();
402
403 switch (type) {
404 case T_BOOLEAN:
405 __ cmpwi(CCR0, R3_RET, 0);
406 __ beq(CCR0, is_false);
407 __ li(R3_RET, 1);
408 __ b(done);
409 __ bind(is_false);
410 __ li(R3_RET, 0);
411 break;
412 case T_BYTE:
413 // sign extend 8 bits
414 __ extsb(R3_RET, R3_RET);
415 break;
416 case T_CHAR:
417 // zero extend 16 bits
418 __ clrldi(R3_RET, R3_RET, 48);
419 break;
420 case T_SHORT:
421 // sign extend 16 bits
422 __ extsh(R3_RET, R3_RET);
423 break;
424 case T_INT:
425 // sign extend 32 bits
426 __ extsw(R3_RET, R3_RET);
427 break;
428 case T_LONG:
429 break;
430 case T_OBJECT:
431 // unbox result if not null
432 __ cmpdi(CCR0, R3_RET, 0);
433 __ beq(CCR0, done);
434 __ ld(R3_RET, 0, R3_RET);
435 __ verify_oop(R3_RET);
436 break;
437 case T_FLOAT:
438 break;
439 case T_DOUBLE:
440 break;
441 case T_VOID:
442 break;
443 default: ShouldNotReachHere();
444 }
445
446 __ BIND(done);
447 __ blr();
448
449 return entry;
450 }
451
452 // Abstract method entry.
453 //
454 address InterpreterGenerator::generate_abstract_entry(void) {
455 address entry = __ pc();
456
457 //
458 // Registers alive
459 // R16_thread - JavaThread*
460 // R19_method - callee's methodOop (method to be invoked)
461 // R1_SP - SP prepared such that caller's outgoing args are near top
462 // LR - return address to caller
463 //
464 // Stack layout at this point:
465 //
466 // 0 [TOP_IJAVA_FRAME_ABI] <-- R1_SP
467 // alignment (optional)
468 // [outgoing Java arguments]
469 // ...
470 // PARENT [PARENT_IJAVA_FRAME_ABI]
471 // ...
472 //
473
474 // Can't use call_VM here because we have not set up a new
475 // interpreter state. Make the call to the vm and make it look like
476 // our caller set up the JavaFrameAnchor.
477 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
478
479 // Push a new C frame and save LR.
480 __ save_LR_CR(R0);
481 __ push_frame_abi112_nonvolatiles(0, R11_scratch1);
482
483 // This is not a leaf but we have a JavaFrameAnchor now and we will
484 // check (create) exceptions afterward so this is ok.
485 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
486
487 // Pop the C frame and restore LR.
488 __ pop_frame();
489 __ restore_LR_CR(R0);
490
491 // Reset JavaFrameAnchor from call_VM_leaf above.
492 __ reset_last_Java_frame();
493
494 // Return to frame manager, it will handle the pending exception.
495 __ blr();
496
497 return entry;
498 }
499
500 // Call an accessor method (assuming it is resolved, otherwise drop into
501 // vanilla (slow path) entry.
502 address InterpreterGenerator::generate_accessor_entry(void) {
503 if(!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods)))
504 return NULL;
505
506 Label Ldone, Lslow_path;
507
508 const Register Rthis = R3_ARG1,
509 Rconst_method = R4_ARG2,
510 Rcodes = Rconst_method,
511 Rcpool_cache = R5_ARG3,
512 Rscratch = R11_scratch1,
513 Rjvmti_mode = Rscratch,
514 Roffset = R12_scratch2,
515 Rflags = R6_ARG4;
516
517 address entry = __ pc();
518
519 // Check for safepoint:
520 // Ditch this, real man don't need safepoint checks.
521
522 // Also check for JVMTI mode
523 // Check for null obj, take slow path if so.
524 #ifdef CC_INTERP
525 __ ld(Rthis, Interpreter::stackElementSize, R17_tos);
526 #else
527 Unimplemented()
528 #endif
529 __ lwz(Rjvmti_mode, thread_(interp_only_mode));
530 __ cmpdi(CCR1, Rthis, 0);
531 __ cmpwi(CCR0, Rjvmti_mode, 0);
532 __ crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2);
533 __ beq(CCR0, Lslow_path); // this==null or jvmti_mode!=0
534
535 // Do 2 things in parallel:
536 // 1. Load the index out of the first instruction word, which looks like this:
537 // <0x2a><0xb4><index (2 byte, native endianess)>.
538 // 2. Load constant pool cache base.
539 __ ld(Rconst_method, in_bytes(Method::const_offset()), R19_method);
540 __ ld(Rcpool_cache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
541
542 __ lhz(Rcodes, in_bytes(ConstMethod::codes_offset()) + 2, Rconst_method); // Lower half of 32 bit field.
543 __ ld(Rcpool_cache, ConstantPool::cache_offset_in_bytes(), Rcpool_cache);
544
545 // Get the const pool entry by means of <index>.
546 const int codes_shift = exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord);
547 __ slwi(Rscratch, Rcodes, codes_shift); // (codes&0xFFFF)<<codes_shift
548 __ add(Rcpool_cache, Rscratch, Rcpool_cache);
549
550 // Check if cpool cache entry is resolved.
551 // We are resolved if the indices offset contains the current bytecode.
552 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
553 // Big Endian:
554 __ lbz(Rscratch, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::indices_offset()) + 7 - 2, Rcpool_cache);
555 __ cmpwi(CCR0, Rscratch, Bytecodes::_getfield);
556 __ bne(CCR0, Lslow_path);
557 __ isync(); // Order succeeding loads wrt. load of _indices field from cpool_cache.
558
559 // Finally, start loading the value: Get cp cache entry into regs.
560 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcpool_cache);
561 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcpool_cache);
562
563 // Get field type.
564 // (Rflags>>ConstantPoolCacheEntry::tos_state_shift)&((1<<ConstantPoolCacheEntry::tos_state_bits)-1)
565 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
566
567 #ifdef ASSERT
568 __ ld(R9_ARG7, 0, R1_SP);
569 __ ld(R10_ARG8, 0, R21_sender_SP);
570 __ cmpd(CCR0, R9_ARG7, R10_ARG8);
571 __ asm_assert_eq("backlink", 0x543);
572 #endif // ASSERT
573 __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
574
575 // Load the return value according to field type.
576 Label Litos, Lltos, Lbtos, Lctos, Lstos;
577 __ cmpdi(CCR1, Rflags, itos);
578 __ cmpdi(CCR0, Rflags, ltos);
579 __ beq(CCR1, Litos);
580 __ beq(CCR0, Lltos);
581 __ cmpdi(CCR1, Rflags, btos);
582 __ cmpdi(CCR0, Rflags, ctos);
583 __ beq(CCR1, Lbtos);
584 __ beq(CCR0, Lctos);
585 __ cmpdi(CCR1, Rflags, stos);
586 __ beq(CCR1, Lstos);
587 #ifdef ASSERT
588 __ cmpdi(CCR0, Rflags, atos);
589 __ asm_assert_eq("what type is this?", 0x432);
590 #endif
591 // fallthru: __ bind(Latos);
592 __ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rthis);
593 __ blr();
594
595 __ bind(Litos);
596 __ lwax(R3_RET, Rthis, Roffset);
597 __ blr();
598
599 __ bind(Lltos);
600 __ ldx(R3_RET, Rthis, Roffset);
601 __ blr();
602
603 __ bind(Lbtos);
604 __ lbzx(R3_RET, Rthis, Roffset);
605 __ extsb(R3_RET, R3_RET);
606 __ blr();
607
608 __ bind(Lctos);
609 __ lhzx(R3_RET, Rthis, Roffset);
610 __ blr();
611
612 __ bind(Lstos);
613 __ lhax(R3_RET, Rthis, Roffset);
614 __ blr();
615
616 __ bind(Lslow_path);
617 assert(Interpreter::entry_for_kind(Interpreter::zerolocals), "Normal entry must have been generated by now");
618 __ load_const_optimized(Rscratch, Interpreter::entry_for_kind(Interpreter::zerolocals), R0);
619 __ mtctr(Rscratch);
620 __ bctr();
621 __ flush();
622
623 return entry;
624 }
625
626 // Interpreter intrinsic for WeakReference.get().
627 // 1. Don't push a full blown frame and go on dispatching, but fetch the value
628 // into R8 and return quickly
629 // 2. If G1 is active we *must* execute this intrinsic for corrrectness:
630 // It contains a GC barrier which puts the reference into the satb buffer
631 // to indicate that someone holds a strong reference to the object the
632 // weak ref points to!
633 address InterpreterGenerator::generate_Reference_get_entry(void) {
634 // Code: _aload_0, _getfield, _areturn
635 // parameter size = 1
636 //
637 // The code that gets generated by this routine is split into 2 parts:
638 // 1. the "intrinsified" code for G1 (or any SATB based GC),
639 // 2. the slow path - which is an expansion of the regular method entry.
640 //
641 // Notes:
642 // * In the G1 code we do not check whether we need to block for
643 // a safepoint. If G1 is enabled then we must execute the specialized
644 // code for Reference.get (except when the Reference object is null)
645 // so that we can log the value in the referent field with an SATB
646 // update buffer.
647 // If the code for the getfield template is modified so that the
648 // G1 pre-barrier code is executed when the current method is
649 // Reference.get() then going through the normal method entry
650 // will be fine.
651 // * The G1 code can, however, check the receiver object (the instance
652 // of java.lang.Reference) and jump to the slow path if null. If the
653 // Reference object is null then we obviously cannot fetch the referent
654 // and so we don't need to call the G1 pre-barrier. Thus we can use the
655 // regular method entry code to generate the NPE.
656 //
657 // This code is based on generate_accessor_enty.
658
659 address entry = __ pc();
660
661 const int referent_offset = java_lang_ref_Reference::referent_offset;
662 guarantee(referent_offset > 0, "referent offset not initialized");
663
664 if (UseG1GC) {
665 Label slow_path;
666
667 // Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH);
668
669 // In the G1 code we don't check if we need to reach a safepoint. We
670 // continue and the thread will safepoint at the next bytecode dispatch.
671
672 // If the receiver is null then it is OK to jump to the slow path.
673 #ifdef CC_INTERP
674 __ ld(R3_RET, Interpreter::stackElementSize, R17_tos); // get receiver
675 #else
676 Unimplemented();
677 #endif
678
679 // Check if receiver == NULL and go the slow path.
680 __ cmpdi(CCR0, R3_RET, 0);
681 __ beq(CCR0, slow_path);
682
683 // Load the value of the referent field.
684 __ load_heap_oop_not_null(R3_RET, referent_offset, R3_RET);
685
686 // Generate the G1 pre-barrier code to log the value of
687 // the referent field in an SATB buffer. Note with
688 // these parameters the pre-barrier does not generate
689 // the load of the previous value.
690
691 // Restore caller sp for c2i case.
692 #ifdef ASSERT
693 __ ld(R9_ARG7, 0, R1_SP);
694 __ ld(R10_ARG8, 0, R21_sender_SP);
695 __ cmpd(CCR0, R9_ARG7, R10_ARG8);
696 __ asm_assert_eq("backlink", 0x544);
697 #endif // ASSERT
698 __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
699
700 __ g1_write_barrier_pre(noreg, // obj
701 noreg, // offset
702 R3_RET, // pre_val
703 R11_scratch1, // tmp
704 R12_scratch2, // tmp
705 true); // needs_frame
706
707 __ blr();
708
709 // Generate regular method entry.
710 __ bind(slow_path);
711 assert(Interpreter::entry_for_kind(Interpreter::zerolocals), "Normal entry must have been generated by now");
712 __ load_const_optimized(R11_scratch1, Interpreter::entry_for_kind(Interpreter::zerolocals), R0);
713 __ mtctr(R11_scratch1);
714 __ bctr();
715 __ flush();
716
717 return entry;
718 } else {
719 return generate_accessor_entry();
720 }
721 }
722
723 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
724 // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
725 // the days we had adapter frames. When we deoptimize a situation where a
726 // compiled caller calls a compiled caller will have registers it expects
727 // to survive the call to the callee. If we deoptimize the callee the only
728 // way we can restore these registers is to have the oldest interpreter
729 // frame that we create restore these values. That is what this routine
730 // will accomplish.
731
732 // At the moment we have modified c2 to not have any callee save registers
733 // so this problem does not exist and this routine is just a place holder.
734
735 assert(f->is_interpreted_frame(), "must be interpreted");
736 }