0
|
1 /*
|
|
2 * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 #include "incls/_precompiled.incl"
|
|
26 #include "incls/_interpreter_x86_64.cpp.incl"
|
|
27
|
|
28 #define __ _masm->
|
|
29
|
|
30 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
|
|
31 const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize;
|
|
32 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
|
|
33
|
|
34 //-----------------------------------------------------------------------------
|
|
35
|
|
36 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
|
|
37 address entry = __ pc();
|
|
38
|
|
39 #ifdef ASSERT
|
|
40 {
|
|
41 Label L;
|
|
42 __ leaq(rax, Address(rbp,
|
|
43 frame::interpreter_frame_monitor_block_top_offset *
|
|
44 wordSize));
|
|
45 __ cmpq(rax, rsp); // rax = maximal rsp for current rbp (stack
|
|
46 // grows negative)
|
|
47 __ jcc(Assembler::aboveEqual, L); // check if frame is complete
|
|
48 __ stop ("interpreter frame not set up");
|
|
49 __ bind(L);
|
|
50 }
|
|
51 #endif // ASSERT
|
|
52 // Restore bcp under the assumption that the current frame is still
|
|
53 // interpreted
|
|
54 __ restore_bcp();
|
|
55
|
|
56 // expression stack must be empty before entering the VM if an
|
|
57 // exception happened
|
|
58 __ empty_expression_stack();
|
|
59 // throw exception
|
|
60 __ call_VM(noreg,
|
|
61 CAST_FROM_FN_PTR(address,
|
|
62 InterpreterRuntime::throw_StackOverflowError));
|
|
63 return entry;
|
|
64 }
|
|
65
|
|
66 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
|
|
67 const char* name) {
|
|
68 address entry = __ pc();
|
|
69 // expression stack must be empty before entering the VM if an
|
|
70 // exception happened
|
|
71 __ empty_expression_stack();
|
|
72 // setup parameters
|
|
73 // ??? convention: expect aberrant index in register ebx
|
|
74 __ lea(c_rarg1, ExternalAddress((address)name));
|
|
75 __ call_VM(noreg,
|
|
76 CAST_FROM_FN_PTR(address,
|
|
77 InterpreterRuntime::
|
|
78 throw_ArrayIndexOutOfBoundsException),
|
|
79 c_rarg1, rbx);
|
|
80 return entry;
|
|
81 }
|
|
82
|
|
83 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
|
|
84 address entry = __ pc();
|
|
85
|
|
86 // object is at TOS
|
|
87 __ popq(c_rarg1);
|
|
88
|
|
89 // expression stack must be empty before entering the VM if an
|
|
90 // exception happened
|
|
91 __ empty_expression_stack();
|
|
92
|
|
93 __ call_VM(noreg,
|
|
94 CAST_FROM_FN_PTR(address,
|
|
95 InterpreterRuntime::
|
|
96 throw_ClassCastException),
|
|
97 c_rarg1);
|
|
98 return entry;
|
|
99 }
|
|
100
|
|
101 address TemplateInterpreterGenerator::generate_exception_handler_common(
|
|
102 const char* name, const char* message, bool pass_oop) {
|
|
103 assert(!pass_oop || message == NULL, "either oop or message but not both");
|
|
104 address entry = __ pc();
|
|
105 if (pass_oop) {
|
|
106 // object is at TOS
|
|
107 __ popq(c_rarg2);
|
|
108 }
|
|
109 // expression stack must be empty before entering the VM if an
|
|
110 // exception happened
|
|
111 __ empty_expression_stack();
|
|
112 // setup parameters
|
|
113 __ lea(c_rarg1, ExternalAddress((address)name));
|
|
114 if (pass_oop) {
|
|
115 __ call_VM(rax, CAST_FROM_FN_PTR(address,
|
|
116 InterpreterRuntime::
|
|
117 create_klass_exception),
|
|
118 c_rarg1, c_rarg2);
|
|
119 } else {
|
|
120 // kind of lame ExternalAddress can't take NULL because
|
|
121 // external_word_Relocation will assert.
|
|
122 if (message != NULL) {
|
|
123 __ lea(c_rarg2, ExternalAddress((address)message));
|
|
124 } else {
|
|
125 __ movptr(c_rarg2, NULL_WORD);
|
|
126 }
|
|
127 __ call_VM(rax,
|
|
128 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
|
|
129 c_rarg1, c_rarg2);
|
|
130 }
|
|
131 // throw exception
|
|
132 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
|
|
133 return entry;
|
|
134 }
|
|
135
|
|
136
|
|
137 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
|
|
138 address entry = __ pc();
|
|
139 // NULL last_sp until next java call
|
|
140 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
|
141 __ dispatch_next(state);
|
|
142 return entry;
|
|
143 }
|
|
144
|
|
145
|
|
146 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
|
147 int step) {
|
|
148
|
|
149 // amd64 doesn't need to do anything special about compiled returns
|
|
150 // to the interpreter so the code that exists on x86 to place a sentinel
|
|
151 // here and the specialized cleanup code is not needed here.
|
|
152
|
|
153 address entry = __ pc();
|
|
154
|
|
155 // Restore stack bottom in case i2c adjusted stack
|
|
156 __ movq(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
|
|
157 // and NULL it as marker that esp is now tos until next java call
|
|
158 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
|
159
|
|
160 __ restore_bcp();
|
|
161 __ restore_locals();
|
|
162 __ get_cache_and_index_at_bcp(rbx, rcx, 1);
|
|
163 __ movl(rbx, Address(rbx, rcx,
|
|
164 Address::times_8,
|
|
165 in_bytes(constantPoolCacheOopDesc::base_offset()) +
|
|
166 3 * wordSize));
|
|
167 __ andl(rbx, 0xFF);
|
|
168 if (TaggedStackInterpreter) __ shll(rbx, 1); // 2 slots per parameter.
|
|
169 __ leaq(rsp, Address(rsp, rbx, Address::times_8));
|
|
170 __ dispatch_next(state, step);
|
|
171 return entry;
|
|
172 }
|
|
173
|
|
174
|
|
175 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
|
|
176 int step) {
|
|
177 address entry = __ pc();
|
|
178 // NULL last_sp until next java call
|
|
179 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
|
180 __ restore_bcp();
|
|
181 __ restore_locals();
|
|
182 // handle exceptions
|
|
183 {
|
|
184 Label L;
|
|
185 __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL);
|
|
186 __ jcc(Assembler::zero, L);
|
|
187 __ call_VM(noreg,
|
|
188 CAST_FROM_FN_PTR(address,
|
|
189 InterpreterRuntime::throw_pending_exception));
|
|
190 __ should_not_reach_here();
|
|
191 __ bind(L);
|
|
192 }
|
|
193 __ dispatch_next(state, step);
|
|
194 return entry;
|
|
195 }
|
|
196
|
|
197 int AbstractInterpreter::BasicType_as_index(BasicType type) {
|
|
198 int i = 0;
|
|
199 switch (type) {
|
|
200 case T_BOOLEAN: i = 0; break;
|
|
201 case T_CHAR : i = 1; break;
|
|
202 case T_BYTE : i = 2; break;
|
|
203 case T_SHORT : i = 3; break;
|
|
204 case T_INT : i = 4; break;
|
|
205 case T_LONG : i = 5; break;
|
|
206 case T_VOID : i = 6; break;
|
|
207 case T_FLOAT : i = 7; break;
|
|
208 case T_DOUBLE : i = 8; break;
|
|
209 case T_OBJECT : i = 9; break;
|
|
210 case T_ARRAY : i = 9; break;
|
|
211 default : ShouldNotReachHere();
|
|
212 }
|
|
213 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
|
|
214 "index out of bounds");
|
|
215 return i;
|
|
216 }
|
|
217
|
|
218
|
|
219 address TemplateInterpreterGenerator::generate_result_handler_for(
|
|
220 BasicType type) {
|
|
221 address entry = __ pc();
|
|
222 switch (type) {
|
|
223 case T_BOOLEAN: __ c2bool(rax); break;
|
|
224 case T_CHAR : __ movzwl(rax, rax); break;
|
|
225 case T_BYTE : __ sign_extend_byte(rax); break;
|
|
226 case T_SHORT : __ sign_extend_short(rax); break;
|
|
227 case T_INT : /* nothing to do */ break;
|
|
228 case T_LONG : /* nothing to do */ break;
|
|
229 case T_VOID : /* nothing to do */ break;
|
|
230 case T_FLOAT : /* nothing to do */ break;
|
|
231 case T_DOUBLE : /* nothing to do */ break;
|
|
232 case T_OBJECT :
|
|
233 // retrieve result from frame
|
|
234 __ movq(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
|
|
235 // and verify it
|
|
236 __ verify_oop(rax);
|
|
237 break;
|
|
238 default : ShouldNotReachHere();
|
|
239 }
|
|
240 __ ret(0); // return from result handler
|
|
241 return entry;
|
|
242 }
|
|
243
|
|
244 address TemplateInterpreterGenerator::generate_safept_entry_for(
|
|
245 TosState state,
|
|
246 address runtime_entry) {
|
|
247 address entry = __ pc();
|
|
248 __ push(state);
|
|
249 __ call_VM(noreg, runtime_entry);
|
|
250 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
|
|
251 return entry;
|
|
252 }
|
|
253
|
|
254
|
|
255
|
|
256 // Helpers for commoning out cases in the various type of method entries.
|
|
257 //
|
|
258
|
|
259
|
|
260 // increment invocation count & check for overflow
|
|
261 //
|
|
262 // Note: checking for negative value instead of overflow
|
|
263 // so we have a 'sticky' overflow test
|
|
264 //
|
|
265 // rbx: method
|
|
266 // ecx: invocation counter
|
|
267 //
|
|
268 void InterpreterGenerator::generate_counter_incr(
|
|
269 Label* overflow,
|
|
270 Label* profile_method,
|
|
271 Label* profile_method_continue) {
|
|
272
|
|
273 const Address invocation_counter(rbx,
|
|
274 methodOopDesc::invocation_counter_offset() +
|
|
275 InvocationCounter::counter_offset());
|
|
276 const Address backedge_counter(rbx,
|
|
277 methodOopDesc::backedge_counter_offset() +
|
|
278 InvocationCounter::counter_offset());
|
|
279
|
|
280 if (ProfileInterpreter) { // %%% Merge this into methodDataOop
|
|
281 __ incrementl(Address(rbx,
|
|
282 methodOopDesc::interpreter_invocation_counter_offset()));
|
|
283 }
|
|
284 // Update standard invocation counters
|
|
285 __ movl(rax, backedge_counter); // load backedge counter
|
|
286
|
|
287 __ incrementl(rcx, InvocationCounter::count_increment);
|
|
288 __ andl(rax, InvocationCounter::count_mask_value); // mask out the
|
|
289 // status bits
|
|
290
|
|
291 __ movl(invocation_counter, rcx); // save invocation count
|
|
292 __ addl(rcx, rax); // add both counters
|
|
293
|
|
294 // profile_method is non-null only for interpreted method so
|
|
295 // profile_method != NULL == !native_call
|
|
296
|
|
297 if (ProfileInterpreter && profile_method != NULL) {
|
|
298 // Test to see if we should create a method data oop
|
|
299 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
|
|
300 __ jcc(Assembler::less, *profile_method_continue);
|
|
301
|
|
302 // if no method data exists, go to profile_method
|
|
303 __ test_method_data_pointer(rax, *profile_method);
|
|
304 }
|
|
305
|
|
306 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
|
|
307 __ jcc(Assembler::aboveEqual, *overflow);
|
|
308 }
|
|
309
|
|
310 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
|
|
311
|
|
312 // Asm interpreter on entry
|
|
313 // r14 - locals
|
|
314 // r13 - bcp
|
|
315 // rbx - method
|
|
316 // edx - cpool --- DOES NOT APPEAR TO BE TRUE
|
|
317 // rbp - interpreter frame
|
|
318
|
|
319 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
|
|
320 // Everything as it was on entry
|
|
321 // rdx is not restored. Doesn't appear to really be set.
|
|
322
|
|
323 const Address size_of_parameters(rbx,
|
|
324 methodOopDesc::size_of_parameters_offset());
|
|
325
|
|
326 // InterpreterRuntime::frequency_counter_overflow takes two
|
|
327 // arguments, the first (thread) is passed by call_VM, the second
|
|
328 // indicates if the counter overflow occurs at a backwards branch
|
|
329 // (NULL bcp). We pass zero for it. The call returns the address
|
|
330 // of the verified entry point for the method or NULL if the
|
|
331 // compilation did not complete (either went background or bailed
|
|
332 // out).
|
|
333 __ movl(c_rarg1, 0);
|
|
334 __ call_VM(noreg,
|
|
335 CAST_FROM_FN_PTR(address,
|
|
336 InterpreterRuntime::frequency_counter_overflow),
|
|
337 c_rarg1);
|
|
338
|
|
339 __ movq(rbx, Address(rbp, method_offset)); // restore methodOop
|
|
340 // Preserve invariant that r13/r14 contain bcp/locals of sender frame
|
|
341 // and jump to the interpreted entry.
|
|
342 __ jmp(*do_continue, relocInfo::none);
|
|
343 }
|
|
344
|
|
345 // See if we've got enough room on the stack for locals plus overhead.
|
|
346 // The expression stack grows down incrementally, so the normal guard
|
|
347 // page mechanism will work for that.
|
|
348 //
|
|
349 // NOTE: Since the additional locals are also always pushed (wasn't
|
|
350 // obvious in generate_method_entry) so the guard should work for them
|
|
351 // too.
|
|
352 //
|
|
353 // Args:
|
|
354 // rdx: number of additional locals this frame needs (what we must check)
|
|
355 // rbx: methodOop
|
|
356 //
|
|
357 // Kills:
|
|
358 // rax
|
|
359 void InterpreterGenerator::generate_stack_overflow_check(void) {
|
|
360
|
|
361 // monitor entry size: see picture of stack set
|
|
362 // (generate_method_entry) and frame_amd64.hpp
|
|
363 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
|
364
|
|
365 // total overhead size: entry_size + (saved rbp through expr stack
|
|
366 // bottom). be sure to change this if you add/subtract anything
|
|
367 // to/from the overhead area
|
|
368 const int overhead_size =
|
|
369 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
|
|
370
|
|
371 const int page_size = os::vm_page_size();
|
|
372
|
|
373 Label after_frame_check;
|
|
374
|
|
375 // see if the frame is greater than one page in size. If so,
|
|
376 // then we need to verify there is enough stack space remaining
|
|
377 // for the additional locals.
|
|
378 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize());
|
|
379 __ jcc(Assembler::belowEqual, after_frame_check);
|
|
380
|
|
381 // compute rsp as if this were going to be the last frame on
|
|
382 // the stack before the red zone
|
|
383
|
|
384 const Address stack_base(r15_thread, Thread::stack_base_offset());
|
|
385 const Address stack_size(r15_thread, Thread::stack_size_offset());
|
|
386
|
|
387 // locals + overhead, in bytes
|
|
388 __ movq(rax, rdx);
|
|
389 __ shll(rax, Interpreter::logStackElementSize()); // 2 slots per parameter.
|
|
390 __ addq(rax, overhead_size);
|
|
391
|
|
392 #ifdef ASSERT
|
|
393 Label stack_base_okay, stack_size_okay;
|
|
394 // verify that thread stack base is non-zero
|
|
395 __ cmpq(stack_base, 0);
|
|
396 __ jcc(Assembler::notEqual, stack_base_okay);
|
|
397 __ stop("stack base is zero");
|
|
398 __ bind(stack_base_okay);
|
|
399 // verify that thread stack size is non-zero
|
|
400 __ cmpq(stack_size, 0);
|
|
401 __ jcc(Assembler::notEqual, stack_size_okay);
|
|
402 __ stop("stack size is zero");
|
|
403 __ bind(stack_size_okay);
|
|
404 #endif
|
|
405
|
|
406 // Add stack base to locals and subtract stack size
|
|
407 __ addq(rax, stack_base);
|
|
408 __ subq(rax, stack_size);
|
|
409
|
|
410 // add in the red and yellow zone sizes
|
|
411 __ addq(rax, (StackRedPages + StackYellowPages) * page_size);
|
|
412
|
|
413 // check against the current stack bottom
|
|
414 __ cmpq(rsp, rax);
|
|
415 __ jcc(Assembler::above, after_frame_check);
|
|
416
|
|
417 __ popq(rax); // get return address
|
|
418 __ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry()));
|
|
419
|
|
420 // all done with frame size check
|
|
421 __ bind(after_frame_check);
|
|
422 }
|
|
423
|
|
424 // Allocate monitor and lock method (asm interpreter)
|
|
425 //
|
|
426 // Args:
|
|
427 // rbx: methodOop
|
|
428 // r14: locals
|
|
429 //
|
|
430 // Kills:
|
|
431 // rax
|
|
432 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
|
|
433 // rscratch1, rscratch2 (scratch regs)
|
|
434 void InterpreterGenerator::lock_method(void) {
|
|
435 // synchronize method
|
|
436 const Address access_flags(rbx, methodOopDesc::access_flags_offset());
|
|
437 const Address monitor_block_top(
|
|
438 rbp,
|
|
439 frame::interpreter_frame_monitor_block_top_offset * wordSize);
|
|
440 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
|
441
|
|
442 #ifdef ASSERT
|
|
443 {
|
|
444 Label L;
|
|
445 __ movl(rax, access_flags);
|
|
446 __ testl(rax, JVM_ACC_SYNCHRONIZED);
|
|
447 __ jcc(Assembler::notZero, L);
|
|
448 __ stop("method doesn't need synchronization");
|
|
449 __ bind(L);
|
|
450 }
|
|
451 #endif // ASSERT
|
|
452
|
|
453 // get synchronization object
|
|
454 {
|
|
455 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() +
|
|
456 Klass::java_mirror_offset_in_bytes();
|
|
457 Label done;
|
|
458 __ movl(rax, access_flags);
|
|
459 __ testl(rax, JVM_ACC_STATIC);
|
|
460 // get receiver (assume this is frequent case)
|
|
461 __ movq(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
|
|
462 __ jcc(Assembler::zero, done);
|
|
463 __ movq(rax, Address(rbx, methodOopDesc::constants_offset()));
|
|
464 __ movq(rax, Address(rax,
|
|
465 constantPoolOopDesc::pool_holder_offset_in_bytes()));
|
|
466 __ movq(rax, Address(rax, mirror_offset));
|
|
467
|
|
468 #ifdef ASSERT
|
|
469 {
|
|
470 Label L;
|
|
471 __ testq(rax, rax);
|
|
472 __ jcc(Assembler::notZero, L);
|
|
473 __ stop("synchronization object is NULL");
|
|
474 __ bind(L);
|
|
475 }
|
|
476 #endif // ASSERT
|
|
477
|
|
478 __ bind(done);
|
|
479 }
|
|
480
|
|
481 // add space for monitor & lock
|
|
482 __ subq(rsp, entry_size); // add space for a monitor entry
|
|
483 __ movq(monitor_block_top, rsp); // set new monitor block top
|
|
484 // store object
|
|
485 __ movq(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
|
|
486 __ movq(c_rarg1, rsp); // object address
|
|
487 __ lock_object(c_rarg1);
|
|
488 }
|
|
489
|
|
490 // Generate a fixed interpreter frame. This is identical setup for
|
|
491 // interpreted methods and for native methods hence the shared code.
|
|
492 //
|
|
493 // Args:
|
|
494 // rax: return address
|
|
495 // rbx: methodOop
|
|
496 // r14: pointer to locals
|
|
497 // r13: sender sp
|
|
498 // rdx: cp cache
|
|
499 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
|
500 // initialize fixed part of activation frame
|
|
501 __ pushq(rax); // save return address
|
|
502 __ enter(); // save old & set new rbp
|
|
503 __ pushq(r13); // set sender sp
|
|
504 __ pushq((int)NULL_WORD); // leave last_sp as null
|
|
505 __ movq(r13, Address(rbx, methodOopDesc::const_offset())); // get constMethodOop
|
|
506 __ leaq(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase
|
|
507 __ pushq(rbx); // save methodOop
|
|
508 if (ProfileInterpreter) {
|
|
509 Label method_data_continue;
|
|
510 __ movq(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
|
|
511 __ testq(rdx, rdx);
|
|
512 __ jcc(Assembler::zero, method_data_continue);
|
|
513 __ addq(rdx, in_bytes(methodDataOopDesc::data_offset()));
|
|
514 __ bind(method_data_continue);
|
|
515 __ pushq(rdx); // set the mdp (method data pointer)
|
|
516 } else {
|
|
517 __ pushq(0);
|
|
518 }
|
|
519
|
|
520 __ movq(rdx, Address(rbx, methodOopDesc::constants_offset()));
|
|
521 __ movq(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
|
|
522 __ pushq(rdx); // set constant pool cache
|
|
523 __ pushq(r14); // set locals pointer
|
|
524 if (native_call) {
|
|
525 __ pushq(0); // no bcp
|
|
526 } else {
|
|
527 __ pushq(r13); // set bcp
|
|
528 }
|
|
529 __ pushq(0); // reserve word for pointer to expression stack bottom
|
|
530 __ movq(Address(rsp, 0), rsp); // set expression stack bottom
|
|
531 }
|
|
532
|
|
533 // End of helpers
|
|
534
|
|
535 // Interpreter stub for calling a native method. (asm interpreter)
|
|
536 // This sets up a somewhat different looking stack for calling the
|
|
537 // native method than the typical interpreter frame setup.
|
|
538 address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
|
539 // determine code generation flags
|
|
540 bool inc_counter = UseCompiler || CountCompiledCalls;
|
|
541
|
|
542 // rbx: methodOop
|
|
543 // r13: sender sp
|
|
544
|
|
545 address entry_point = __ pc();
|
|
546
|
|
547 const Address size_of_parameters(rbx, methodOopDesc::
|
|
548 size_of_parameters_offset());
|
|
549 const Address invocation_counter(rbx, methodOopDesc::
|
|
550 invocation_counter_offset() +
|
|
551 InvocationCounter::counter_offset());
|
|
552 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
|
|
553
|
|
554 // get parameter size (always needed)
|
|
555 __ load_unsigned_word(rcx, size_of_parameters);
|
|
556
|
|
557 // native calls don't need the stack size check since they have no
|
|
558 // expression stack and the arguments are already on the stack and
|
|
559 // we only add a handful of words to the stack
|
|
560
|
|
561 // rbx: methodOop
|
|
562 // rcx: size of parameters
|
|
563 // r13: sender sp
|
|
564 __ popq(rax); // get return address
|
|
565
|
|
566 // for natives the size of locals is zero
|
|
567
|
|
568 // compute beginning of parameters (r14)
|
|
569 if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter.
|
|
570 __ leaq(r14, Address(rsp, rcx, Address::times_8, -wordSize));
|
|
571
|
|
572 // add 2 zero-initialized slots for native calls
|
|
573 // initialize result_handler slot
|
|
574 __ pushq((int) NULL);
|
|
575 // slot for oop temp
|
|
576 // (static native method holder mirror/jni oop result)
|
|
577 __ pushq((int) NULL);
|
|
578
|
|
579 if (inc_counter) {
|
|
580 __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
|
|
581 }
|
|
582
|
|
583 // initialize fixed part of activation frame
|
|
584 generate_fixed_frame(true);
|
|
585
|
|
586 // make sure method is native & not abstract
|
|
587 #ifdef ASSERT
|
|
588 __ movl(rax, access_flags);
|
|
589 {
|
|
590 Label L;
|
|
591 __ testl(rax, JVM_ACC_NATIVE);
|
|
592 __ jcc(Assembler::notZero, L);
|
|
593 __ stop("tried to execute non-native method as native");
|
|
594 __ bind(L);
|
|
595 }
|
|
596 {
|
|
597 Label L;
|
|
598 __ testl(rax, JVM_ACC_ABSTRACT);
|
|
599 __ jcc(Assembler::zero, L);
|
|
600 __ stop("tried to execute abstract method in interpreter");
|
|
601 __ bind(L);
|
|
602 }
|
|
603 #endif
|
|
604
|
|
605 // Since at this point in the method invocation the exception handler
|
|
606 // would try to exit the monitor of synchronized methods which hasn't
|
|
607 // been entered yet, we set the thread local variable
|
|
608 // _do_not_unlock_if_synchronized to true. The remove_activation will
|
|
609 // check this flag.
|
|
610
|
|
611 const Address do_not_unlock_if_synchronized(r15_thread,
|
|
612 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
|
|
613 __ movbool(do_not_unlock_if_synchronized, true);
|
|
614
|
|
615 // increment invocation count & check for overflow
|
|
616 Label invocation_counter_overflow;
|
|
617 if (inc_counter) {
|
|
618 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
|
|
619 }
|
|
620
|
|
621 Label continue_after_compile;
|
|
622 __ bind(continue_after_compile);
|
|
623
|
|
624 bang_stack_shadow_pages(true);
|
|
625
|
|
626 // reset the _do_not_unlock_if_synchronized flag
|
|
627 __ movbool(do_not_unlock_if_synchronized, false);
|
|
628
|
|
629 // check for synchronized methods
|
|
630 // Must happen AFTER invocation_counter check and stack overflow check,
|
|
631 // so method is not locked if overflows.
|
|
632 if (synchronized) {
|
|
633 lock_method();
|
|
634 } else {
|
|
635 // no synchronization necessary
|
|
636 #ifdef ASSERT
|
|
637 {
|
|
638 Label L;
|
|
639 __ movl(rax, access_flags);
|
|
640 __ testl(rax, JVM_ACC_SYNCHRONIZED);
|
|
641 __ jcc(Assembler::zero, L);
|
|
642 __ stop("method needs synchronization");
|
|
643 __ bind(L);
|
|
644 }
|
|
645 #endif
|
|
646 }
|
|
647
|
|
648 // start execution
|
|
649 #ifdef ASSERT
|
|
650 {
|
|
651 Label L;
|
|
652 const Address monitor_block_top(rbp,
|
|
653 frame::interpreter_frame_monitor_block_top_offset * wordSize);
|
|
654 __ movq(rax, monitor_block_top);
|
|
655 __ cmpq(rax, rsp);
|
|
656 __ jcc(Assembler::equal, L);
|
|
657 __ stop("broken stack frame setup in interpreter");
|
|
658 __ bind(L);
|
|
659 }
|
|
660 #endif
|
|
661
|
|
662 // jvmti support
|
|
663 __ notify_method_entry();
|
|
664
|
|
665 // work registers
|
|
666 const Register method = rbx;
|
|
667 const Register t = r12;
|
|
668
|
|
669 // allocate space for parameters
|
|
670 __ get_method(method);
|
|
671 __ verify_oop(method);
|
|
672 __ load_unsigned_word(t,
|
|
673 Address(method,
|
|
674 methodOopDesc::size_of_parameters_offset()));
|
|
675 __ shll(t, Interpreter::logStackElementSize());
|
|
676
|
|
677 __ subq(rsp, t);
|
|
678 __ subq(rsp, frame::arg_reg_save_area_bytes); // windows
|
|
679 __ andq(rsp, -16); // must be 16 byte boundry (see amd64 ABI)
|
|
680
|
|
681 // get signature handler
|
|
682 {
|
|
683 Label L;
|
|
684 __ movq(t, Address(method, methodOopDesc::signature_handler_offset()));
|
|
685 __ testq(t, t);
|
|
686 __ jcc(Assembler::notZero, L);
|
|
687 __ call_VM(noreg,
|
|
688 CAST_FROM_FN_PTR(address,
|
|
689 InterpreterRuntime::prepare_native_call),
|
|
690 method);
|
|
691 __ get_method(method);
|
|
692 __ movq(t, Address(method, methodOopDesc::signature_handler_offset()));
|
|
693 __ bind(L);
|
|
694 }
|
|
695
|
|
696 // call signature handler
|
|
697 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == r14,
|
|
698 "adjust this code");
|
|
699 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp,
|
|
700 "adjust this code");
|
|
701 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
|
|
702 "adjust this code");
|
|
703
|
|
704 // The generated handlers do not touch RBX (the method oop).
|
|
705 // However, large signatures cannot be cached and are generated
|
|
706 // each time here. The slow-path generator can do a GC on return,
|
|
707 // so we must reload it after the call.
|
|
708 __ call(t);
|
|
709 __ get_method(method); // slow path can do a GC, reload RBX
|
|
710
|
|
711
|
|
712 // result handler is in rax
|
|
713 // set result handler
|
|
714 __ movq(Address(rbp,
|
|
715 (frame::interpreter_frame_result_handler_offset) * wordSize),
|
|
716 rax);
|
|
717
|
|
718 // pass mirror handle if static call
|
|
719 {
|
|
720 Label L;
|
|
721 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() +
|
|
722 Klass::java_mirror_offset_in_bytes();
|
|
723 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
|
|
724 __ testl(t, JVM_ACC_STATIC);
|
|
725 __ jcc(Assembler::zero, L);
|
|
726 // get mirror
|
|
727 __ movq(t, Address(method, methodOopDesc::constants_offset()));
|
|
728 __ movq(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
|
|
729 __ movq(t, Address(t, mirror_offset));
|
|
730 // copy mirror into activation frame
|
|
731 __ movq(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
|
|
732 t);
|
|
733 // pass handle to mirror
|
|
734 __ leaq(c_rarg1,
|
|
735 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
|
|
736 __ bind(L);
|
|
737 }
|
|
738
|
|
739 // get native function entry point
|
|
740 {
|
|
741 Label L;
|
|
742 __ movq(rax, Address(method, methodOopDesc::native_function_offset()));
|
|
743 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
|
|
744 __ movptr(rscratch2, unsatisfied.addr());
|
|
745 __ cmpq(rax, rscratch2);
|
|
746 __ jcc(Assembler::notEqual, L);
|
|
747 __ call_VM(noreg,
|
|
748 CAST_FROM_FN_PTR(address,
|
|
749 InterpreterRuntime::prepare_native_call),
|
|
750 method);
|
|
751 __ get_method(method);
|
|
752 __ verify_oop(method);
|
|
753 __ movq(rax, Address(method, methodOopDesc::native_function_offset()));
|
|
754 __ bind(L);
|
|
755 }
|
|
756
|
|
757 // pass JNIEnv
|
|
758 __ leaq(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
|
|
759
|
|
760 // It is enough that the pc() points into the right code
|
|
761 // segment. It does not have to be the correct return pc.
|
|
762 __ set_last_Java_frame(rsp, rbp, (address) __ pc());
|
|
763
|
|
764 // change thread state
|
|
765 #ifdef ASSERT
|
|
766 {
|
|
767 Label L;
|
|
768 __ movl(t, Address(r15_thread, JavaThread::thread_state_offset()));
|
|
769 __ cmpl(t, _thread_in_Java);
|
|
770 __ jcc(Assembler::equal, L);
|
|
771 __ stop("Wrong thread state in native stub");
|
|
772 __ bind(L);
|
|
773 }
|
|
774 #endif
|
|
775
|
|
776 // Change state to native
|
|
777
|
|
778 __ movl(Address(r15_thread, JavaThread::thread_state_offset()),
|
|
779 _thread_in_native);
|
|
780
|
|
781 // Call the native method.
|
|
782 __ call(rax);
|
|
783 // result potentially in rax or xmm0
|
|
784
|
|
785 // Depending on runtime options, either restore the MXCSR
|
|
786 // register after returning from the JNI Call or verify that
|
|
787 // it wasn't changed during -Xcheck:jni.
|
|
788 if (RestoreMXCSROnJNICalls) {
|
|
789 __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std()));
|
|
790 }
|
|
791 else if (CheckJNICalls) {
|
|
792 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::amd64::verify_mxcsr_entry())));
|
|
793 }
|
|
794
|
|
795 // NOTE: The order of these pushes is known to frame::interpreter_frame_result
|
|
796 // in order to extract the result of a method call. If the order of these
|
|
797 // pushes change or anything else is added to the stack then the code in
|
|
798 // interpreter_frame_result must also change.
|
|
799
|
|
800 __ push(dtos);
|
|
801 __ push(ltos);
|
|
802
|
|
803 // change thread state
|
|
804 __ movl(Address(r15_thread, JavaThread::thread_state_offset()),
|
|
805 _thread_in_native_trans);
|
|
806
|
|
807 if (os::is_MP()) {
|
|
808 if (UseMembar) {
|
|
809 // Force this write out before the read below
|
|
810 __ membar(Assembler::Membar_mask_bits(
|
|
811 Assembler::LoadLoad | Assembler::LoadStore |
|
|
812 Assembler::StoreLoad | Assembler::StoreStore));
|
|
813 } else {
|
|
814 // Write serialization page so VM thread can do a pseudo remote membar.
|
|
815 // We use the current thread pointer to calculate a thread specific
|
|
816 // offset to write to within the page. This minimizes bus traffic
|
|
817 // due to cache line collision.
|
|
818 __ serialize_memory(r15_thread, rscratch2);
|
|
819 }
|
|
820 }
|
|
821
|
|
822 // check for safepoint operation in progress and/or pending suspend requests
|
|
823 {
|
|
824 Label Continue;
|
|
825 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
|
|
826 SafepointSynchronize::_not_synchronized);
|
|
827
|
|
828 Label L;
|
|
829 __ jcc(Assembler::notEqual, L);
|
|
830 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
|
|
831 __ jcc(Assembler::equal, Continue);
|
|
832 __ bind(L);
|
|
833
|
|
834 // Don't use call_VM as it will see a possible pending exception
|
|
835 // and forward it and never return here preventing us from
|
|
836 // clearing _last_native_pc down below. Also can't use
|
|
837 // call_VM_leaf either as it will check to see if r13 & r14 are
|
|
838 // preserved and correspond to the bcp/locals pointers. So we do a
|
|
839 // runtime call by hand.
|
|
840 //
|
|
841 __ movq(c_rarg0, r15_thread);
|
|
842 __ movq(r12, rsp); // remember sp
|
|
843 __ subq(rsp, frame::arg_reg_save_area_bytes); // windows
|
|
844 __ andq(rsp, -16); // align stack as required by ABI
|
|
845 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
|
|
846 __ movq(rsp, r12); // restore sp
|
|
847 __ bind(Continue);
|
|
848 }
|
|
849
|
|
850 // change thread state
|
|
851 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
|
|
852
|
|
853 // reset_last_Java_frame
|
|
854 __ reset_last_Java_frame(true, true);
|
|
855
|
|
856 // reset handle block
|
|
857 __ movq(t, Address(r15_thread, JavaThread::active_handles_offset()));
|
|
858 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
|
|
859
|
|
860 // If result is an oop unbox and store it in frame where gc will see it
|
|
861 // and result handler will pick it up
|
|
862
|
|
863 {
|
|
864 Label no_oop, store_result;
|
|
865 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
|
|
866 __ cmpq(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
|
|
867 __ jcc(Assembler::notEqual, no_oop);
|
|
868 // retrieve result
|
|
869 __ pop(ltos);
|
|
870 __ testq(rax, rax);
|
|
871 __ jcc(Assembler::zero, store_result);
|
|
872 __ movq(rax, Address(rax, 0));
|
|
873 __ bind(store_result);
|
|
874 __ movq(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
|
|
875 // keep stack depth as expected by pushing oop which will eventually be discarde
|
|
876 __ push(ltos);
|
|
877 __ bind(no_oop);
|
|
878 }
|
|
879
|
|
880
|
|
881 {
|
|
882 Label no_reguard;
|
|
883 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()),
|
|
884 JavaThread::stack_guard_yellow_disabled);
|
|
885 __ jcc(Assembler::notEqual, no_reguard);
|
|
886
|
|
887 __ pushaq(); // XXX only save smashed registers
|
|
888 __ movq(r12, rsp); // remember sp
|
|
889 __ subq(rsp, frame::arg_reg_save_area_bytes); // windows
|
|
890 __ andq(rsp, -16); // align stack as required by ABI
|
|
891 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
|
|
892 __ movq(rsp, r12); // restore sp
|
|
893 __ popaq(); // XXX only restore smashed registers
|
|
894
|
|
895 __ bind(no_reguard);
|
|
896 }
|
|
897
|
|
898
|
|
899 // The method register is junk from after the thread_in_native transition
|
|
900 // until here. Also can't call_VM until the bcp has been
|
|
901 // restored. Need bcp for throwing exception below so get it now.
|
|
902 __ get_method(method);
|
|
903 __ verify_oop(method);
|
|
904
|
|
905 // restore r13 to have legal interpreter frame, i.e., bci == 0 <=>
|
|
906 // r13 == code_base()
|
|
907 __ movq(r13, Address(method, methodOopDesc::const_offset())); // get constMethodOop
|
|
908 __ leaq(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase
|
|
909 // handle exceptions (exception handling will handle unlocking!)
|
|
910 {
|
|
911 Label L;
|
|
912 __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL);
|
|
913 __ jcc(Assembler::zero, L);
|
|
914 // Note: At some point we may want to unify this with the code
|
|
915 // used in call_VM_base(); i.e., we should use the
|
|
916 // StubRoutines::forward_exception code. For now this doesn't work
|
|
917 // here because the rsp is not correctly set at this point.
|
|
918 __ MacroAssembler::call_VM(noreg,
|
|
919 CAST_FROM_FN_PTR(address,
|
|
920 InterpreterRuntime::throw_pending_exception));
|
|
921 __ should_not_reach_here();
|
|
922 __ bind(L);
|
|
923 }
|
|
924
|
|
925 // do unlocking if necessary
|
|
926 {
|
|
927 Label L;
|
|
928 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
|
|
929 __ testl(t, JVM_ACC_SYNCHRONIZED);
|
|
930 __ jcc(Assembler::zero, L);
|
|
931 // the code below should be shared with interpreter macro
|
|
932 // assembler implementation
|
|
933 {
|
|
934 Label unlock;
|
|
935 // BasicObjectLock will be first in list, since this is a
|
|
936 // synchronized method. However, need to check that the object
|
|
937 // has not been unlocked by an explicit monitorexit bytecode.
|
|
938 const Address monitor(rbp,
|
|
939 (intptr_t)(frame::interpreter_frame_initial_sp_offset *
|
|
940 wordSize - sizeof(BasicObjectLock)));
|
|
941
|
|
942 // monitor expect in c_rarg1 for slow unlock path
|
|
943 __ leaq(c_rarg1, monitor); // address of first monitor
|
|
944
|
|
945 __ movq(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
|
|
946 __ testq(t, t);
|
|
947 __ jcc(Assembler::notZero, unlock);
|
|
948
|
|
949 // Entry already unlocked, need to throw exception
|
|
950 __ MacroAssembler::call_VM(noreg,
|
|
951 CAST_FROM_FN_PTR(address,
|
|
952 InterpreterRuntime::throw_illegal_monitor_state_exception));
|
|
953 __ should_not_reach_here();
|
|
954
|
|
955 __ bind(unlock);
|
|
956 __ unlock_object(c_rarg1);
|
|
957 }
|
|
958 __ bind(L);
|
|
959 }
|
|
960
|
|
961 // jvmti support
|
|
962 // Note: This must happen _after_ handling/throwing any exceptions since
|
|
963 // the exception handler code notifies the runtime of method exits
|
|
964 // too. If this happens before, method entry/exit notifications are
|
|
965 // not properly paired (was bug - gri 11/22/99).
|
|
966 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
|
|
967
|
|
968 // restore potential result in edx:eax, call result handler to
|
|
969 // restore potential result in ST0 & handle result
|
|
970
|
|
971 __ pop(ltos);
|
|
972 __ pop(dtos);
|
|
973
|
|
974 __ movq(t, Address(rbp,
|
|
975 (frame::interpreter_frame_result_handler_offset) * wordSize));
|
|
976 __ call(t);
|
|
977
|
|
978 // remove activation
|
|
979 __ movq(t, Address(rbp,
|
|
980 frame::interpreter_frame_sender_sp_offset *
|
|
981 wordSize)); // get sender sp
|
|
982 __ leave(); // remove frame anchor
|
|
983 __ popq(rdi); // get return address
|
|
984 __ movq(rsp, t); // set sp to sender sp
|
|
985 __ jmp(rdi);
|
|
986
|
|
987 if (inc_counter) {
|
|
988 // Handle overflow of counter and compile method
|
|
989 __ bind(invocation_counter_overflow);
|
|
990 generate_counter_overflow(&continue_after_compile);
|
|
991 }
|
|
992
|
|
993 return entry_point;
|
|
994 }
|
|
995
|
|
996 //
|
|
997 // Generic interpreted method entry to (asm) interpreter
|
|
998 //
|
|
999 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|
1000 // determine code generation flags
|
|
1001 bool inc_counter = UseCompiler || CountCompiledCalls;
|
|
1002
|
|
1003 // ebx: methodOop
|
|
1004 // r13: sender sp
|
|
1005 address entry_point = __ pc();
|
|
1006
|
|
1007 const Address size_of_parameters(rbx,
|
|
1008 methodOopDesc::size_of_parameters_offset());
|
|
1009 const Address size_of_locals(rbx, methodOopDesc::size_of_locals_offset());
|
|
1010 const Address invocation_counter(rbx,
|
|
1011 methodOopDesc::invocation_counter_offset() +
|
|
1012 InvocationCounter::counter_offset());
|
|
1013 const Address access_flags(rbx, methodOopDesc::access_flags_offset());
|
|
1014
|
|
1015 // get parameter size (always needed)
|
|
1016 __ load_unsigned_word(rcx, size_of_parameters);
|
|
1017
|
|
1018 // rbx: methodOop
|
|
1019 // rcx: size of parameters
|
|
1020 // r13: sender_sp (could differ from sp+wordSize if we were called via c2i )
|
|
1021
|
|
1022 __ load_unsigned_word(rdx, size_of_locals); // get size of locals in words
|
|
1023 __ subl(rdx, rcx); // rdx = no. of additional locals
|
|
1024
|
|
1025 // YYY
|
|
1026 // __ incrementl(rdx);
|
|
1027 // __ andl(rdx, -2);
|
|
1028
|
|
1029 // see if we've got enough room on the stack for locals plus overhead.
|
|
1030 generate_stack_overflow_check();
|
|
1031
|
|
1032 // get return address
|
|
1033 __ popq(rax);
|
|
1034
|
|
1035 // compute beginning of parameters (r14)
|
|
1036 if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter.
|
|
1037 __ leaq(r14, Address(rsp, rcx, Address::times_8, -wordSize));
|
|
1038
|
|
1039 // rdx - # of additional locals
|
|
1040 // allocate space for locals
|
|
1041 // explicitly initialize locals
|
|
1042 {
|
|
1043 Label exit, loop;
|
|
1044 __ testl(rdx, rdx);
|
|
1045 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
|
|
1046 __ bind(loop);
|
|
1047 if (TaggedStackInterpreter) __ pushq((int) NULL); // push tag
|
|
1048 __ pushq((int) NULL); // initialize local variables
|
|
1049 __ decrementl(rdx); // until everything initialized
|
|
1050 __ jcc(Assembler::greater, loop);
|
|
1051 __ bind(exit);
|
|
1052 }
|
|
1053
|
|
1054 // (pre-)fetch invocation count
|
|
1055 if (inc_counter) {
|
|
1056 __ movl(rcx, invocation_counter);
|
|
1057 }
|
|
1058 // initialize fixed part of activation frame
|
|
1059 generate_fixed_frame(false);
|
|
1060
|
|
1061 // make sure method is not native & not abstract
|
|
1062 #ifdef ASSERT
|
|
1063 __ movl(rax, access_flags);
|
|
1064 {
|
|
1065 Label L;
|
|
1066 __ testl(rax, JVM_ACC_NATIVE);
|
|
1067 __ jcc(Assembler::zero, L);
|
|
1068 __ stop("tried to execute native method as non-native");
|
|
1069 __ bind(L);
|
|
1070 }
|
|
1071 {
|
|
1072 Label L;
|
|
1073 __ testl(rax, JVM_ACC_ABSTRACT);
|
|
1074 __ jcc(Assembler::zero, L);
|
|
1075 __ stop("tried to execute abstract method in interpreter");
|
|
1076 __ bind(L);
|
|
1077 }
|
|
1078 #endif
|
|
1079
|
|
1080 // Since at this point in the method invocation the exception
|
|
1081 // handler would try to exit the monitor of synchronized methods
|
|
1082 // which hasn't been entered yet, we set the thread local variable
|
|
1083 // _do_not_unlock_if_synchronized to true. The remove_activation
|
|
1084 // will check this flag.
|
|
1085
|
|
1086 const Address do_not_unlock_if_synchronized(r15_thread,
|
|
1087 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
|
|
1088 __ movbool(do_not_unlock_if_synchronized, true);
|
|
1089
|
|
1090 // increment invocation count & check for overflow
|
|
1091 Label invocation_counter_overflow;
|
|
1092 Label profile_method;
|
|
1093 Label profile_method_continue;
|
|
1094 if (inc_counter) {
|
|
1095 generate_counter_incr(&invocation_counter_overflow,
|
|
1096 &profile_method,
|
|
1097 &profile_method_continue);
|
|
1098 if (ProfileInterpreter) {
|
|
1099 __ bind(profile_method_continue);
|
|
1100 }
|
|
1101 }
|
|
1102
|
|
1103 Label continue_after_compile;
|
|
1104 __ bind(continue_after_compile);
|
|
1105
|
|
1106 // check for synchronized interpreted methods
|
|
1107 bang_stack_shadow_pages(false);
|
|
1108
|
|
1109 // reset the _do_not_unlock_if_synchronized flag
|
|
1110 __ movbool(do_not_unlock_if_synchronized, false);
|
|
1111
|
|
1112 // check for synchronized methods
|
|
1113 // Must happen AFTER invocation_counter check and stack overflow check,
|
|
1114 // so method is not locked if overflows.
|
|
1115 if (synchronized) {
|
|
1116 // Allocate monitor and lock method
|
|
1117 lock_method();
|
|
1118 } else {
|
|
1119 // no synchronization necessary
|
|
1120 #ifdef ASSERT
|
|
1121 {
|
|
1122 Label L;
|
|
1123 __ movl(rax, access_flags);
|
|
1124 __ testl(rax, JVM_ACC_SYNCHRONIZED);
|
|
1125 __ jcc(Assembler::zero, L);
|
|
1126 __ stop("method needs synchronization");
|
|
1127 __ bind(L);
|
|
1128 }
|
|
1129 #endif
|
|
1130 }
|
|
1131
|
|
1132 // start execution
|
|
1133 #ifdef ASSERT
|
|
1134 {
|
|
1135 Label L;
|
|
1136 const Address monitor_block_top (rbp,
|
|
1137 frame::interpreter_frame_monitor_block_top_offset * wordSize);
|
|
1138 __ movq(rax, monitor_block_top);
|
|
1139 __ cmpq(rax, rsp);
|
|
1140 __ jcc(Assembler::equal, L);
|
|
1141 __ stop("broken stack frame setup in interpreter");
|
|
1142 __ bind(L);
|
|
1143 }
|
|
1144 #endif
|
|
1145
|
|
1146 // jvmti support
|
|
1147 __ notify_method_entry();
|
|
1148
|
|
1149 __ dispatch_next(vtos);
|
|
1150
|
|
1151 // invocation counter overflow
|
|
1152 if (inc_counter) {
|
|
1153 if (ProfileInterpreter) {
|
|
1154 // We have decided to profile this method in the interpreter
|
|
1155 __ bind(profile_method);
|
|
1156
|
|
1157 __ call_VM(noreg,
|
|
1158 CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method),
|
|
1159 r13, true);
|
|
1160
|
|
1161 __ movq(rbx, Address(rbp, method_offset)); // restore methodOop
|
|
1162 __ movq(rax, Address(rbx,
|
|
1163 in_bytes(methodOopDesc::method_data_offset())));
|
|
1164 __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
|
|
1165 rax);
|
|
1166 __ test_method_data_pointer(rax, profile_method_continue);
|
|
1167 __ addq(rax, in_bytes(methodDataOopDesc::data_offset()));
|
|
1168 __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
|
|
1169 rax);
|
|
1170 __ jmp(profile_method_continue);
|
|
1171 }
|
|
1172 // Handle overflow of counter and compile method
|
|
1173 __ bind(invocation_counter_overflow);
|
|
1174 generate_counter_overflow(&continue_after_compile);
|
|
1175 }
|
|
1176
|
|
1177 return entry_point;
|
|
1178 }
|
|
1179
|
|
1180 // Entry points
|
|
1181 //
|
|
1182 // Here we generate the various kind of entries into the interpreter.
|
|
1183 // The two main entry type are generic bytecode methods and native
|
|
1184 // call method. These both come in synchronized and non-synchronized
|
|
1185 // versions but the frame layout they create is very similar. The
|
|
1186 // other method entry types are really just special purpose entries
|
|
1187 // that are really entry and interpretation all in one. These are for
|
|
1188 // trivial methods like accessor, empty, or special math methods.
|
|
1189 //
|
|
1190 // When control flow reaches any of the entry types for the interpreter
|
|
1191 // the following holds ->
|
|
1192 //
|
|
1193 // Arguments:
|
|
1194 //
|
|
1195 // rbx: methodOop
|
|
1196 //
|
|
1197 // Stack layout immediately at entry
|
|
1198 //
|
|
1199 // [ return address ] <--- rsp
|
|
1200 // [ parameter n ]
|
|
1201 // ...
|
|
1202 // [ parameter 1 ]
|
|
1203 // [ expression stack ] (caller's java expression stack)
|
|
1204
|
|
1205 // Assuming that we don't go to one of the trivial specialized entries
|
|
1206 // the stack will look like below when we are ready to execute the
|
|
1207 // first bytecode (or call the native routine). The register usage
|
|
1208 // will be as the template based interpreter expects (see
|
|
1209 // interpreter_amd64.hpp).
|
|
1210 //
|
|
1211 // local variables follow incoming parameters immediately; i.e.
|
|
1212 // the return address is moved to the end of the locals).
|
|
1213 //
|
|
1214 // [ monitor entry ] <--- rsp
|
|
1215 // ...
|
|
1216 // [ monitor entry ]
|
|
1217 // [ expr. stack bottom ]
|
|
1218 // [ saved r13 ]
|
|
1219 // [ current r14 ]
|
|
1220 // [ methodOop ]
|
|
1221 // [ saved ebp ] <--- rbp
|
|
1222 // [ return address ]
|
|
1223 // [ local variable m ]
|
|
1224 // ...
|
|
1225 // [ local variable 1 ]
|
|
1226 // [ parameter n ]
|
|
1227 // ...
|
|
1228 // [ parameter 1 ] <--- r14
|
|
1229
|
|
1230 address AbstractInterpreterGenerator::generate_method_entry(
|
|
1231 AbstractInterpreter::MethodKind kind) {
|
|
1232 // determine code generation flags
|
|
1233 bool synchronized = false;
|
|
1234 address entry_point = NULL;
|
|
1235
|
|
1236 switch (kind) {
|
|
1237 case Interpreter::zerolocals : break;
|
|
1238 case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
|
1239 case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break;
|
|
1240 case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break;
|
|
1241 case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break;
|
|
1242 case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break;
|
|
1243 case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break;
|
|
1244 case Interpreter::java_lang_math_sin : break;
|
|
1245 case Interpreter::java_lang_math_cos : break;
|
|
1246 case Interpreter::java_lang_math_tan : break;
|
|
1247 case Interpreter::java_lang_math_abs : break;
|
|
1248 case Interpreter::java_lang_math_log : break;
|
|
1249 case Interpreter::java_lang_math_log10 : break;
|
|
1250 case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break;
|
|
1251 default : ShouldNotReachHere(); break;
|
|
1252 }
|
|
1253
|
|
1254 if (entry_point) {
|
|
1255 return entry_point;
|
|
1256 }
|
|
1257
|
|
1258 return ((InterpreterGenerator*) this)->
|
|
1259 generate_normal_entry(synchronized);
|
|
1260 }
|
|
1261
|
|
1262 // How much stack a method activation needs in words.
|
|
1263 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
|
|
1264 const int entry_size = frame::interpreter_frame_monitor_size();
|
|
1265
|
|
1266 // total overhead size: entry_size + (saved rbp thru expr stack
|
|
1267 // bottom). be sure to change this if you add/subtract anything
|
|
1268 // to/from the overhead area
|
|
1269 const int overhead_size =
|
|
1270 -(frame::interpreter_frame_initial_sp_offset) + entry_size;
|
|
1271
|
|
1272 const int stub_code = frame::entry_frame_after_call_words;
|
|
1273 const int method_stack = (method->max_locals() + method->max_stack()) *
|
|
1274 Interpreter::stackElementWords();
|
|
1275 return (overhead_size + method_stack + stub_code);
|
|
1276 }
|
|
1277
|
|
1278 int AbstractInterpreter::layout_activation(methodOop method,
|
|
1279 int tempcount,
|
|
1280 int popframe_extra_args,
|
|
1281 int moncount,
|
|
1282 int callee_param_count,
|
|
1283 int callee_locals,
|
|
1284 frame* caller,
|
|
1285 frame* interpreter_frame,
|
|
1286 bool is_top_frame) {
|
|
1287 // Note: This calculation must exactly parallel the frame setup
|
|
1288 // in AbstractInterpreterGenerator::generate_method_entry.
|
|
1289 // If interpreter_frame!=NULL, set up the method, locals, and monitors.
|
|
1290 // The frame interpreter_frame, if not NULL, is guaranteed to be the
|
|
1291 // right size, as determined by a previous call to this method.
|
|
1292 // It is also guaranteed to be walkable even though it is in a skeletal state
|
|
1293
|
|
1294 // fixed size of an interpreter frame:
|
|
1295 int max_locals = method->max_locals() * Interpreter::stackElementWords();
|
|
1296 int extra_locals = (method->max_locals() - method->size_of_parameters()) *
|
|
1297 Interpreter::stackElementWords();
|
|
1298
|
|
1299 int overhead = frame::sender_sp_offset -
|
|
1300 frame::interpreter_frame_initial_sp_offset;
|
|
1301 // Our locals were accounted for by the caller (or last_frame_adjust
|
|
1302 // on the transistion) Since the callee parameters already account
|
|
1303 // for the callee's params we only need to account for the extra
|
|
1304 // locals.
|
|
1305 int size = overhead +
|
|
1306 (callee_locals - callee_param_count)*Interpreter::stackElementWords() +
|
|
1307 moncount * frame::interpreter_frame_monitor_size() +
|
|
1308 tempcount* Interpreter::stackElementWords() + popframe_extra_args;
|
|
1309 if (interpreter_frame != NULL) {
|
|
1310 #ifdef ASSERT
|
|
1311 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(),
|
|
1312 "Frame not properly walkable");
|
|
1313 assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
|
|
1314 #endif
|
|
1315
|
|
1316 interpreter_frame->interpreter_frame_set_method(method);
|
|
1317 // NOTE the difference in using sender_sp and
|
|
1318 // interpreter_frame_sender_sp interpreter_frame_sender_sp is
|
|
1319 // the original sp of the caller (the unextended_sp) and
|
|
1320 // sender_sp is fp+16 XXX
|
|
1321 intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
|
|
1322
|
|
1323 interpreter_frame->interpreter_frame_set_locals(locals);
|
|
1324 BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
|
|
1325 BasicObjectLock* monbot = montop - moncount;
|
|
1326 interpreter_frame->interpreter_frame_set_monitor_end(monbot);
|
|
1327
|
|
1328 // Set last_sp
|
|
1329 intptr_t* esp = (intptr_t*) monbot -
|
|
1330 tempcount*Interpreter::stackElementWords() -
|
|
1331 popframe_extra_args;
|
|
1332 interpreter_frame->interpreter_frame_set_last_sp(esp);
|
|
1333
|
|
1334 // All frames but the initial (oldest) interpreter frame we fill in have
|
|
1335 // a value for sender_sp that allows walking the stack but isn't
|
|
1336 // truly correct. Correct the value here.
|
|
1337 if (extra_locals != 0 &&
|
|
1338 interpreter_frame->sender_sp() ==
|
|
1339 interpreter_frame->interpreter_frame_sender_sp()) {
|
|
1340 interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
|
|
1341 extra_locals);
|
|
1342 }
|
|
1343 *interpreter_frame->interpreter_frame_cache_addr() =
|
|
1344 method->constants()->cache();
|
|
1345 }
|
|
1346 return size;
|
|
1347 }
|
|
1348
|
|
1349 //-----------------------------------------------------------------------------
|
|
1350 // Exceptions
|
|
1351
|
|
1352 void TemplateInterpreterGenerator::generate_throw_exception() {
|
|
1353 // Entry point in previous activation (i.e., if the caller was
|
|
1354 // interpreted)
|
|
1355 Interpreter::_rethrow_exception_entry = __ pc();
|
|
1356 // Restore sp to interpreter_frame_last_sp even though we are going
|
|
1357 // to empty the expression stack for the exception processing.
|
|
1358 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
|
1359 // rax: exception
|
|
1360 // rdx: return address/pc that threw exception
|
|
1361 __ restore_bcp(); // r13 points to call/send
|
|
1362 __ restore_locals();
|
|
1363 // Entry point for exceptions thrown within interpreter code
|
|
1364 Interpreter::_throw_exception_entry = __ pc();
|
|
1365 // expression stack is undefined here
|
|
1366 // rax: exception
|
|
1367 // r13: exception bcp
|
|
1368 __ verify_oop(rax);
|
|
1369 __ movq(c_rarg1, rax);
|
|
1370
|
|
1371 // expression stack must be empty before entering the VM in case of
|
|
1372 // an exception
|
|
1373 __ empty_expression_stack();
|
|
1374 // find exception handler address and preserve exception oop
|
|
1375 __ call_VM(rdx,
|
|
1376 CAST_FROM_FN_PTR(address,
|
|
1377 InterpreterRuntime::exception_handler_for_exception),
|
|
1378 c_rarg1);
|
|
1379 // rax: exception handler entry point
|
|
1380 // rdx: preserved exception oop
|
|
1381 // r13: bcp for exception handler
|
|
1382 __ push_ptr(rdx); // push exception which is now the only value on the stack
|
|
1383 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
|
|
1384
|
|
1385 // If the exception is not handled in the current frame the frame is
|
|
1386 // removed and the exception is rethrown (i.e. exception
|
|
1387 // continuation is _rethrow_exception).
|
|
1388 //
|
|
1389 // Note: At this point the bci is still the bxi for the instruction
|
|
1390 // which caused the exception and the expression stack is
|
|
1391 // empty. Thus, for any VM calls at this point, GC will find a legal
|
|
1392 // oop map (with empty expression stack).
|
|
1393
|
|
1394 // In current activation
|
|
1395 // tos: exception
|
|
1396 // esi: exception bcp
|
|
1397
|
|
1398 //
|
|
1399 // JVMTI PopFrame support
|
|
1400 //
|
|
1401
|
|
1402 Interpreter::_remove_activation_preserving_args_entry = __ pc();
|
|
1403 __ empty_expression_stack();
|
|
1404 // Set the popframe_processing bit in pending_popframe_condition
|
|
1405 // indicating that we are currently handling popframe, so that
|
|
1406 // call_VMs that may happen later do not trigger new popframe
|
|
1407 // handling cycles.
|
|
1408 __ movl(rdx, Address(r15_thread, JavaThread::popframe_condition_offset()));
|
|
1409 __ orl(rdx, JavaThread::popframe_processing_bit);
|
|
1410 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), rdx);
|
|
1411
|
|
1412 {
|
|
1413 // Check to see whether we are returning to a deoptimized frame.
|
|
1414 // (The PopFrame call ensures that the caller of the popped frame is
|
|
1415 // either interpreted or compiled and deoptimizes it if compiled.)
|
|
1416 // In this case, we can't call dispatch_next() after the frame is
|
|
1417 // popped, but instead must save the incoming arguments and restore
|
|
1418 // them after deoptimization has occurred.
|
|
1419 //
|
|
1420 // Note that we don't compare the return PC against the
|
|
1421 // deoptimization blob's unpack entry because of the presence of
|
|
1422 // adapter frames in C2.
|
|
1423 Label caller_not_deoptimized;
|
|
1424 __ movq(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize));
|
|
1425 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
|
|
1426 InterpreterRuntime::interpreter_contains), c_rarg1);
|
|
1427 __ testl(rax, rax);
|
|
1428 __ jcc(Assembler::notZero, caller_not_deoptimized);
|
|
1429
|
|
1430 // Compute size of arguments for saving when returning to
|
|
1431 // deoptimized caller
|
|
1432 __ get_method(rax);
|
|
1433 __ load_unsigned_word(rax, Address(rax, in_bytes(methodOopDesc::
|
|
1434 size_of_parameters_offset())));
|
|
1435 __ shll(rax, Interpreter::logStackElementSize());
|
|
1436 __ restore_locals(); // XXX do we need this?
|
|
1437 __ subq(r14, rax);
|
|
1438 __ addq(r14, wordSize);
|
|
1439 // Save these arguments
|
|
1440 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
|
|
1441 Deoptimization::
|
|
1442 popframe_preserve_args),
|
|
1443 r15_thread, rax, r14);
|
|
1444
|
|
1445 __ remove_activation(vtos, rdx,
|
|
1446 /* throw_monitor_exception */ false,
|
|
1447 /* install_monitor_exception */ false,
|
|
1448 /* notify_jvmdi */ false);
|
|
1449
|
|
1450 // Inform deoptimization that it is responsible for restoring
|
|
1451 // these arguments
|
|
1452 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
|
|
1453 JavaThread::popframe_force_deopt_reexecution_bit);
|
|
1454
|
|
1455 // Continue in deoptimization handler
|
|
1456 __ jmp(rdx);
|
|
1457
|
|
1458 __ bind(caller_not_deoptimized);
|
|
1459 }
|
|
1460
|
|
1461 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */
|
|
1462 /* throw_monitor_exception */ false,
|
|
1463 /* install_monitor_exception */ false,
|
|
1464 /* notify_jvmdi */ false);
|
|
1465
|
|
1466 // Finish with popframe handling
|
|
1467 // A previous I2C followed by a deoptimization might have moved the
|
|
1468 // outgoing arguments further up the stack. PopFrame expects the
|
|
1469 // mutations to those outgoing arguments to be preserved and other
|
|
1470 // constraints basically require this frame to look exactly as
|
|
1471 // though it had previously invoked an interpreted activation with
|
|
1472 // no space between the top of the expression stack (current
|
|
1473 // last_sp) and the top of stack. Rather than force deopt to
|
|
1474 // maintain this kind of invariant all the time we call a small
|
|
1475 // fixup routine to move the mutated arguments onto the top of our
|
|
1476 // expression stack if necessary.
|
|
1477 __ movq(c_rarg1, rsp);
|
|
1478 __ movq(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
|
|
1479 // PC must point into interpreter here
|
|
1480 __ set_last_Java_frame(noreg, rbp, __ pc());
|
|
1481 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
|
|
1482 __ reset_last_Java_frame(true, true);
|
|
1483 // Restore the last_sp and null it out
|
|
1484 __ movq(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
|
|
1485 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
|
1486
|
|
1487 __ restore_bcp(); // XXX do we need this?
|
|
1488 __ restore_locals(); // XXX do we need this?
|
|
1489 // The method data pointer was incremented already during
|
|
1490 // call profiling. We have to restore the mdp for the current bcp.
|
|
1491 if (ProfileInterpreter) {
|
|
1492 __ set_method_data_pointer_for_bcp();
|
|
1493 }
|
|
1494
|
|
1495 // Clear the popframe condition flag
|
|
1496 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
|
|
1497 JavaThread::popframe_inactive);
|
|
1498
|
|
1499 __ dispatch_next(vtos);
|
|
1500 // end of PopFrame support
|
|
1501
|
|
1502 Interpreter::_remove_activation_entry = __ pc();
|
|
1503
|
|
1504 // preserve exception over this code sequence
|
|
1505 __ pop_ptr(rax);
|
|
1506 __ movq(Address(r15_thread, JavaThread::vm_result_offset()), rax);
|
|
1507 // remove the activation (without doing throws on illegalMonitorExceptions)
|
|
1508 __ remove_activation(vtos, rdx, false, true, false);
|
|
1509 // restore exception
|
|
1510 __ movq(rax, Address(r15_thread, JavaThread::vm_result_offset()));
|
|
1511 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), NULL_WORD);
|
|
1512 __ verify_oop(rax);
|
|
1513
|
|
1514 // In between activations - previous activation type unknown yet
|
|
1515 // compute continuation point - the continuation point expects the
|
|
1516 // following registers set up:
|
|
1517 //
|
|
1518 // rax: exception
|
|
1519 // rdx: return address/pc that threw exception
|
|
1520 // rsp: expression stack of caller
|
|
1521 // rbp: ebp of caller
|
|
1522 __ pushq(rax); // save exception
|
|
1523 __ pushq(rdx); // save return address
|
|
1524 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
|
|
1525 SharedRuntime::exception_handler_for_return_address),
|
|
1526 rdx);
|
|
1527 __ movq(rbx, rax); // save exception handler
|
|
1528 __ popq(rdx); // restore return address
|
|
1529 __ popq(rax); // restore exception
|
|
1530 // Note that an "issuing PC" is actually the next PC after the call
|
|
1531 __ jmp(rbx); // jump to exception
|
|
1532 // handler of caller
|
|
1533 }
|
|
1534
|
|
1535
|
|
1536 //
|
|
1537 // JVMTI ForceEarlyReturn support
|
|
1538 //
|
|
1539 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
|
|
1540 address entry = __ pc();
|
|
1541
|
|
1542 __ restore_bcp();
|
|
1543 __ restore_locals();
|
|
1544 __ empty_expression_stack();
|
|
1545 __ load_earlyret_value(state);
|
|
1546
|
|
1547 __ movq(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
|
|
1548 Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset());
|
|
1549
|
|
1550 // Clear the earlyret state
|
|
1551 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
|
|
1552
|
|
1553 __ remove_activation(state, rsi,
|
|
1554 false, /* throw_monitor_exception */
|
|
1555 false, /* install_monitor_exception */
|
|
1556 true); /* notify_jvmdi */
|
|
1557 __ jmp(rsi);
|
|
1558
|
|
1559 return entry;
|
|
1560 } // end of ForceEarlyReturn support
|
|
1561
|
|
1562
|
|
1563 //-----------------------------------------------------------------------------
|
|
1564 // Helper for vtos entry point generation
|
|
1565
|
|
1566 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
|
|
1567 address& bep,
|
|
1568 address& cep,
|
|
1569 address& sep,
|
|
1570 address& aep,
|
|
1571 address& iep,
|
|
1572 address& lep,
|
|
1573 address& fep,
|
|
1574 address& dep,
|
|
1575 address& vep) {
|
|
1576 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
|
|
1577 Label L;
|
|
1578 aep = __ pc(); __ push_ptr(); __ jmp(L);
|
|
1579 fep = __ pc(); __ push_f(); __ jmp(L);
|
|
1580 dep = __ pc(); __ push_d(); __ jmp(L);
|
|
1581 lep = __ pc(); __ push_l(); __ jmp(L);
|
|
1582 bep = cep = sep =
|
|
1583 iep = __ pc(); __ push_i();
|
|
1584 vep = __ pc();
|
|
1585 __ bind(L);
|
|
1586 generate_and_dispatch(t);
|
|
1587 }
|
|
1588
|
|
1589
|
|
1590 //-----------------------------------------------------------------------------
|
|
1591 // Generation of individual instructions
|
|
1592
|
|
1593 // helpers for generate_and_dispatch
|
|
1594
|
|
1595
|
|
1596 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
|
|
1597 : TemplateInterpreterGenerator(code) {
|
|
1598 generate_all(); // down here so it can be "virtual"
|
|
1599 }
|
|
1600
|
|
1601 //-----------------------------------------------------------------------------
|
|
1602
|
|
1603 // Non-product code
|
|
1604 #ifndef PRODUCT
|
|
1605 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
|
|
1606 address entry = __ pc();
|
|
1607
|
|
1608 __ push(state);
|
|
1609 __ pushq(c_rarg0);
|
|
1610 __ pushq(c_rarg1);
|
|
1611 __ pushq(c_rarg2);
|
|
1612 __ pushq(c_rarg3);
|
|
1613 __ movq(c_rarg2, rax); // Pass itos
|
|
1614 #ifdef _WIN64
|
|
1615 __ movflt(xmm3, xmm0); // Pass ftos
|
|
1616 #endif
|
|
1617 __ call_VM(noreg,
|
|
1618 CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
|
|
1619 c_rarg1, c_rarg2, c_rarg3);
|
|
1620 __ popq(c_rarg3);
|
|
1621 __ popq(c_rarg2);
|
|
1622 __ popq(c_rarg1);
|
|
1623 __ popq(c_rarg0);
|
|
1624 __ pop(state);
|
|
1625 __ ret(0); // return from result handler
|
|
1626
|
|
1627 return entry;
|
|
1628 }
|
|
1629
|
|
1630 void TemplateInterpreterGenerator::count_bytecode() {
|
|
1631 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
|
|
1632 }
|
|
1633
|
|
1634 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
|
|
1635 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
|
|
1636 }
|
|
1637
|
|
1638 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
|
|
1639 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index));
|
|
1640 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
|
|
1641 __ orl(rbx,
|
|
1642 ((int) t->bytecode()) <<
|
|
1643 BytecodePairHistogram::log2_number_of_codes);
|
|
1644 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
|
|
1645 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters));
|
|
1646 __ incrementl(Address(rscratch1, rbx, Address::times_4));
|
|
1647 }
|
|
1648
|
|
1649
|
|
1650 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
|
|
1651 // Call a little run-time stub to avoid blow-up for each bytecode.
|
|
1652 // The run-time runtime saves the right registers, depending on
|
|
1653 // the tosca in-state for the given template.
|
|
1654
|
|
1655 assert(Interpreter::trace_code(t->tos_in()) != NULL,
|
|
1656 "entry must have been generated");
|
|
1657 __ movq(r12, rsp); // remember sp
|
|
1658 __ andq(rsp, -16); // align stack as required by ABI
|
|
1659 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
|
|
1660 __ movq(rsp, r12); // restore sp
|
|
1661 }
|
|
1662
|
|
1663
|
|
1664 void TemplateInterpreterGenerator::stop_interpreter_at() {
|
|
1665 Label L;
|
|
1666 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
|
|
1667 StopInterpreterAt);
|
|
1668 __ jcc(Assembler::notEqual, L);
|
|
1669 __ int3();
|
|
1670 __ bind(L);
|
|
1671 }
|
|
1672 #endif // !PRODUCT
|