comparison src/cpu/x86/vm/templateInterpreter_x86_32.cpp @ 304:dc7f315e41f7

5108146: Merge i486 and amd64 cpu directories 6459804: Want client (c1) compiler for x86_64 (amd64) for faster start-up Reviewed-by: kvn
author never
date Wed, 27 Aug 2008 00:21:55 -0700
parents a61af66fc99e
children 9ee9cf798b59
comparison
equal deleted inserted replaced
303:fa4d1d240383 304:dc7f315e41f7
41 // Note: There should be a minimal interpreter frame set up when stack 41 // Note: There should be a minimal interpreter frame set up when stack
42 // overflow occurs since we check explicitly for it now. 42 // overflow occurs since we check explicitly for it now.
43 // 43 //
44 #ifdef ASSERT 44 #ifdef ASSERT
45 { Label L; 45 { Label L;
46 __ leal(rax, Address(rbp, 46 __ lea(rax, Address(rbp,
47 frame::interpreter_frame_monitor_block_top_offset * wordSize)); 47 frame::interpreter_frame_monitor_block_top_offset * wordSize));
48 __ cmpl(rax, rsp); // rax, = maximal rsp for current rbp, 48 __ cmpptr(rax, rsp); // rax, = maximal rsp for current rbp,
49 // (stack grows negative) 49 // (stack grows negative)
50 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 50 __ jcc(Assembler::aboveEqual, L); // check if frame is complete
51 __ stop ("interpreter frame not set up"); 51 __ stop ("interpreter frame not set up");
52 __ bind(L); 52 __ bind(L);
53 } 53 }
78 } 78 }
79 79
80 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 80 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
81 address entry = __ pc(); 81 address entry = __ pc();
82 // object is at TOS 82 // object is at TOS
83 __ popl(rax); 83 __ pop(rax);
84 // expression stack must be empty before entering the VM if an exception 84 // expression stack must be empty before entering the VM if an exception
85 // happened 85 // happened
86 __ empty_expression_stack(); 86 __ empty_expression_stack();
87 __ empty_FPU_stack(); 87 __ empty_FPU_stack();
88 __ call_VM(noreg, 88 __ call_VM(noreg,
95 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 95 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
96 assert(!pass_oop || message == NULL, "either oop or message but not both"); 96 assert(!pass_oop || message == NULL, "either oop or message but not both");
97 address entry = __ pc(); 97 address entry = __ pc();
98 if (pass_oop) { 98 if (pass_oop) {
99 // object is at TOS 99 // object is at TOS
100 __ popl(rbx); 100 __ pop(rbx);
101 } 101 }
102 // expression stack must be empty before entering the VM if an exception happened 102 // expression stack must be empty before entering the VM if an exception happened
103 __ empty_expression_stack(); 103 __ empty_expression_stack();
104 __ empty_FPU_stack(); 104 __ empty_FPU_stack();
105 // setup parameters 105 // setup parameters
108 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), rax, rbx); 108 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), rax, rbx);
109 } else { 109 } else {
110 if (message != NULL) { 110 if (message != NULL) {
111 __ lea(rbx, ExternalAddress((address)message)); 111 __ lea(rbx, ExternalAddress((address)message));
112 } else { 112 } else {
113 __ movl(rbx, NULL_WORD); 113 __ movptr(rbx, (int32_t)NULL_WORD);
114 } 114 }
115 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx); 115 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx);
116 } 116 }
117 // throw exception 117 // throw exception
118 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 118 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
121 121
122 122
123 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 123 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
124 address entry = __ pc(); 124 address entry = __ pc();
125 // NULL last_sp until next java call 125 // NULL last_sp until next java call
126 __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 126 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
127 __ dispatch_next(state); 127 __ dispatch_next(state);
128 return entry; 128 return entry;
129 } 129 }
130 130
131 131
158 __ bind(interpreter_entry); 158 __ bind(interpreter_entry);
159 159
160 // In SSE mode, interpreter returns FP results in xmm0 but they need 160 // In SSE mode, interpreter returns FP results in xmm0 but they need
161 // to end up back on the FPU so it can operate on them. 161 // to end up back on the FPU so it can operate on them.
162 if (state == ftos && UseSSE >= 1) { 162 if (state == ftos && UseSSE >= 1) {
163 __ subl(rsp, wordSize); 163 __ subptr(rsp, wordSize);
164 __ movflt(Address(rsp, 0), xmm0); 164 __ movflt(Address(rsp, 0), xmm0);
165 __ fld_s(Address(rsp, 0)); 165 __ fld_s(Address(rsp, 0));
166 __ addl(rsp, wordSize); 166 __ addptr(rsp, wordSize);
167 } else if (state == dtos && UseSSE >= 2) { 167 } else if (state == dtos && UseSSE >= 2) {
168 __ subl(rsp, 2*wordSize); 168 __ subptr(rsp, 2*wordSize);
169 __ movdbl(Address(rsp, 0), xmm0); 169 __ movdbl(Address(rsp, 0), xmm0);
170 __ fld_d(Address(rsp, 0)); 170 __ fld_d(Address(rsp, 0));
171 __ addl(rsp, 2*wordSize); 171 __ addptr(rsp, 2*wordSize);
172 } 172 }
173 173
174 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_return_entry_for in interpreter"); 174 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_return_entry_for in interpreter");
175 175
176 // Restore stack bottom in case i2c adjusted stack 176 // Restore stack bottom in case i2c adjusted stack
177 __ movl(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 177 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
178 // and NULL it as marker that rsp is now tos until next java call 178 // and NULL it as marker that rsp is now tos until next java call
179 __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 179 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
180 180
181 __ restore_bcp(); 181 __ restore_bcp();
182 __ restore_locals(); 182 __ restore_locals();
183 __ get_cache_and_index_at_bcp(rbx, rcx, 1); 183 __ get_cache_and_index_at_bcp(rbx, rcx, 1);
184 __ movl(rbx, Address(rbx, rcx, 184 __ movl(rbx, Address(rbx, rcx,
185 Address::times_4, constantPoolCacheOopDesc::base_offset() + 185 Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
186 ConstantPoolCacheEntry::flags_offset())); 186 ConstantPoolCacheEntry::flags_offset()));
187 __ andl(rbx, 0xFF); 187 __ andptr(rbx, 0xFF);
188 __ leal(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); 188 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));
189 __ dispatch_next(state, step); 189 __ dispatch_next(state, step);
190 return entry; 190 return entry;
191 } 191 }
192 192
193 193
194 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 194 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
195 address entry = __ pc(); 195 address entry = __ pc();
196 196
197 // In SSE mode, FP results are in xmm0 197 // In SSE mode, FP results are in xmm0
198 if (state == ftos && UseSSE > 0) { 198 if (state == ftos && UseSSE > 0) {
199 __ subl(rsp, wordSize); 199 __ subptr(rsp, wordSize);
200 __ movflt(Address(rsp, 0), xmm0); 200 __ movflt(Address(rsp, 0), xmm0);
201 __ fld_s(Address(rsp, 0)); 201 __ fld_s(Address(rsp, 0));
202 __ addl(rsp, wordSize); 202 __ addptr(rsp, wordSize);
203 } else if (state == dtos && UseSSE >= 2) { 203 } else if (state == dtos && UseSSE >= 2) {
204 __ subl(rsp, 2*wordSize); 204 __ subptr(rsp, 2*wordSize);
205 __ movdbl(Address(rsp, 0), xmm0); 205 __ movdbl(Address(rsp, 0), xmm0);
206 __ fld_d(Address(rsp, 0)); 206 __ fld_d(Address(rsp, 0));
207 __ addl(rsp, 2*wordSize); 207 __ addptr(rsp, 2*wordSize);
208 } 208 }
209 209
210 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_deopt_entry_for in interpreter"); 210 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_deopt_entry_for in interpreter");
211 211
212 // The stack is not extended by deopt but we must NULL last_sp as this 212 // The stack is not extended by deopt but we must NULL last_sp as this
213 // entry is like a "return". 213 // entry is like a "return".
214 __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 214 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
215 __ restore_bcp(); 215 __ restore_bcp();
216 __ restore_locals(); 216 __ restore_locals();
217 // handle exceptions 217 // handle exceptions
218 { Label L; 218 { Label L;
219 const Register thread = rcx; 219 const Register thread = rcx;
220 __ get_thread(thread); 220 __ get_thread(thread);
221 __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 221 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
222 __ jcc(Assembler::zero, L); 222 __ jcc(Assembler::zero, L);
223 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 223 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
224 __ should_not_reach_here(); 224 __ should_not_reach_here();
225 __ bind(L); 225 __ bind(L);
226 } 226 }
252 252
253 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 253 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
254 address entry = __ pc(); 254 address entry = __ pc();
255 switch (type) { 255 switch (type) {
256 case T_BOOLEAN: __ c2bool(rax); break; 256 case T_BOOLEAN: __ c2bool(rax); break;
257 case T_CHAR : __ andl(rax, 0xFFFF); break; 257 case T_CHAR : __ andptr(rax, 0xFFFF); break;
258 case T_BYTE : __ sign_extend_byte (rax); break; 258 case T_BYTE : __ sign_extend_byte (rax); break;
259 case T_SHORT : __ sign_extend_short(rax); break; 259 case T_SHORT : __ sign_extend_short(rax); break;
260 case T_INT : /* nothing to do */ break; 260 case T_INT : /* nothing to do */ break;
261 case T_DOUBLE : 261 case T_DOUBLE :
262 case T_FLOAT : 262 case T_FLOAT :
263 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 263 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
264 __ popl(t); // remove return address first 264 __ pop(t); // remove return address first
265 __ pop_dtos_to_rsp(); 265 __ pop_dtos_to_rsp();
266 // Must return a result for interpreter or compiler. In SSE 266 // Must return a result for interpreter or compiler. In SSE
267 // mode, results are returned in xmm0 and the FPU stack must 267 // mode, results are returned in xmm0 and the FPU stack must
268 // be empty. 268 // be empty.
269 if (type == T_FLOAT && UseSSE >= 1) { 269 if (type == T_FLOAT && UseSSE >= 1) {
278 } else { 278 } else {
279 // restore ST0 279 // restore ST0
280 __ fld_d(Address(rsp, 0)); 280 __ fld_d(Address(rsp, 0));
281 } 281 }
282 // and pop the temp 282 // and pop the temp
283 __ addl(rsp, 2 * wordSize); 283 __ addptr(rsp, 2 * wordSize);
284 __ pushl(t); // restore return address 284 __ push(t); // restore return address
285 } 285 }
286 break; 286 break;
287 case T_OBJECT : 287 case T_OBJECT :
288 // retrieve result from frame 288 // retrieve result from frame
289 __ movl(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 289 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
290 // and verify it 290 // and verify it
291 __ verify_oop(rax); 291 __ verify_oop(rax);
292 break; 292 break;
293 default : ShouldNotReachHere(); 293 default : ShouldNotReachHere();
294 } 294 }
320 320
321 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset()); 321 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
322 const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset()); 322 const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset());
323 323
324 if (ProfileInterpreter) { // %%% Merge this into methodDataOop 324 if (ProfileInterpreter) { // %%% Merge this into methodDataOop
325 __ increment(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset())); 325 __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
326 } 326 }
327 // Update standard invocation counters 327 // Update standard invocation counters
328 __ movl(rax, backedge_counter); // load backedge counter 328 __ movl(rax, backedge_counter); // load backedge counter
329 329
330 __ increment(rcx, InvocationCounter::count_increment); 330 __ incrementl(rcx, InvocationCounter::count_increment);
331 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 331 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
332 332
333 __ movl(invocation_counter, rcx); // save invocation count 333 __ movl(invocation_counter, rcx); // save invocation count
334 __ addl(rcx, rax); // add both counters 334 __ addl(rcx, rax); // add both counters
335 335
380 380
381 // InterpreterRuntime::frequency_counter_overflow takes one argument 381 // InterpreterRuntime::frequency_counter_overflow takes one argument
382 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). 382 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
383 // The call returns the address of the verified entry point for the method or NULL 383 // The call returns the address of the verified entry point for the method or NULL
384 // if the compilation did not complete (either went background or bailed out). 384 // if the compilation did not complete (either went background or bailed out).
385 __ movl(rax, (int)false); 385 __ movptr(rax, (int32_t)false);
386 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax); 386 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
387 387
388 __ movl(rbx, Address(rbp, method_offset)); // restore methodOop 388 __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
389 389
390 // Preserve invariant that rsi/rdi contain bcp/locals of sender frame 390 // Preserve invariant that rsi/rdi contain bcp/locals of sender frame
391 // and jump to the interpreted entry. 391 // and jump to the interpreted entry.
392 __ jmp(*do_continue, relocInfo::none); 392 __ jmp(*do_continue, relocInfo::none);
393 393
431 // compute rsp as if this were going to be the last frame on 431 // compute rsp as if this were going to be the last frame on
432 // the stack before the red zone 432 // the stack before the red zone
433 433
434 Label after_frame_check_pop; 434 Label after_frame_check_pop;
435 435
436 __ pushl(rsi); 436 __ push(rsi);
437 437
438 const Register thread = rsi; 438 const Register thread = rsi;
439 439
440 __ get_thread(thread); 440 __ get_thread(thread);
441 441
442 const Address stack_base(thread, Thread::stack_base_offset()); 442 const Address stack_base(thread, Thread::stack_base_offset());
443 const Address stack_size(thread, Thread::stack_size_offset()); 443 const Address stack_size(thread, Thread::stack_size_offset());
444 444
445 // locals + overhead, in bytes 445 // locals + overhead, in bytes
446 __ leal(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size)); 446 __ lea(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size));
447 447
448 #ifdef ASSERT 448 #ifdef ASSERT
449 Label stack_base_okay, stack_size_okay; 449 Label stack_base_okay, stack_size_okay;
450 // verify that thread stack base is non-zero 450 // verify that thread stack base is non-zero
451 __ cmpl(stack_base, 0); 451 __ cmpptr(stack_base, (int32_t)NULL_WORD);
452 __ jcc(Assembler::notEqual, stack_base_okay); 452 __ jcc(Assembler::notEqual, stack_base_okay);
453 __ stop("stack base is zero"); 453 __ stop("stack base is zero");
454 __ bind(stack_base_okay); 454 __ bind(stack_base_okay);
455 // verify that thread stack size is non-zero 455 // verify that thread stack size is non-zero
456 __ cmpl(stack_size, 0); 456 __ cmpptr(stack_size, 0);
457 __ jcc(Assembler::notEqual, stack_size_okay); 457 __ jcc(Assembler::notEqual, stack_size_okay);
458 __ stop("stack size is zero"); 458 __ stop("stack size is zero");
459 __ bind(stack_size_okay); 459 __ bind(stack_size_okay);
460 #endif 460 #endif
461 461
462 // Add stack base to locals and subtract stack size 462 // Add stack base to locals and subtract stack size
463 __ addl(rax, stack_base); 463 __ addptr(rax, stack_base);
464 __ subl(rax, stack_size); 464 __ subptr(rax, stack_size);
465 465
466 // Use the maximum number of pages we might bang. 466 // Use the maximum number of pages we might bang.
467 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : 467 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
468 (StackRedPages+StackYellowPages); 468 (StackRedPages+StackYellowPages);
469 __ addl(rax, max_pages * page_size); 469 __ addptr(rax, max_pages * page_size);
470 470
471 // check against the current stack bottom 471 // check against the current stack bottom
472 __ cmpl(rsp, rax); 472 __ cmpptr(rsp, rax);
473 __ jcc(Assembler::above, after_frame_check_pop); 473 __ jcc(Assembler::above, after_frame_check_pop);
474 474
475 __ popl(rsi); // get saved bcp / (c++ prev state ). 475 __ pop(rsi); // get saved bcp / (c++ prev state ).
476 476
477 __ popl(rax); // get return address 477 __ pop(rax); // get return address
478 __ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry())); 478 __ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry()));
479 479
480 // all done with frame size check 480 // all done with frame size check
481 __ bind(after_frame_check_pop); 481 __ bind(after_frame_check_pop);
482 __ popl(rsi); 482 __ pop(rsi);
483 483
484 __ bind(after_frame_check); 484 __ bind(after_frame_check);
485 } 485 }
486 486
487 // Allocate monitor and lock method (asm interpreter) 487 // Allocate monitor and lock method (asm interpreter)
505 // get synchronization object 505 // get synchronization object
506 { Label done; 506 { Label done;
507 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); 507 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
508 __ movl(rax, access_flags); 508 __ movl(rax, access_flags);
509 __ testl(rax, JVM_ACC_STATIC); 509 __ testl(rax, JVM_ACC_STATIC);
510 __ movl(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case) 510 __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
511 __ jcc(Assembler::zero, done); 511 __ jcc(Assembler::zero, done);
512 __ movl(rax, Address(rbx, methodOopDesc::constants_offset())); 512 __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
513 __ movl(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes())); 513 __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
514 __ movl(rax, Address(rax, mirror_offset)); 514 __ movptr(rax, Address(rax, mirror_offset));
515 __ bind(done); 515 __ bind(done);
516 } 516 }
517 // add space for monitor & lock 517 // add space for monitor & lock
518 __ subl(rsp, entry_size); // add space for a monitor entry 518 __ subptr(rsp, entry_size); // add space for a monitor entry
519 __ movl(monitor_block_top, rsp); // set new monitor block top 519 __ movptr(monitor_block_top, rsp); // set new monitor block top
520 __ movl(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object 520 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
521 __ movl(rdx, rsp); // object address 521 __ mov(rdx, rsp); // object address
522 __ lock_object(rdx); 522 __ lock_object(rdx);
523 } 523 }
524 524
525 // 525 //
526 // Generate a fixed interpreter frame. This is identical setup for interpreted methods 526 // Generate a fixed interpreter frame. This is identical setup for interpreted methods
527 // and for native methods hence the shared code. 527 // and for native methods hence the shared code.
528 528
529 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 529 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
530 // initialize fixed part of activation frame 530 // initialize fixed part of activation frame
531 __ pushl(rax); // save return address 531 __ push(rax); // save return address
532 __ enter(); // save old & set new rbp, 532 __ enter(); // save old & set new rbp,
533 533
534 534
535 __ pushl(rsi); // set sender sp 535 __ push(rsi); // set sender sp
536 __ pushl(NULL_WORD); // leave last_sp as null 536 __ push((int32_t)NULL_WORD); // leave last_sp as null
537 __ movl(rsi, Address(rbx,methodOopDesc::const_offset())); // get constMethodOop 537 __ movptr(rsi, Address(rbx,methodOopDesc::const_offset())); // get constMethodOop
538 __ leal(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase 538 __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
539 __ pushl(rbx); // save methodOop 539 __ push(rbx); // save methodOop
540 if (ProfileInterpreter) { 540 if (ProfileInterpreter) {
541 Label method_data_continue; 541 Label method_data_continue;
542 __ movl(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); 542 __ movptr(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
543 __ testl(rdx, rdx); 543 __ testptr(rdx, rdx);
544 __ jcc(Assembler::zero, method_data_continue); 544 __ jcc(Assembler::zero, method_data_continue);
545 __ addl(rdx, in_bytes(methodDataOopDesc::data_offset())); 545 __ addptr(rdx, in_bytes(methodDataOopDesc::data_offset()));
546 __ bind(method_data_continue); 546 __ bind(method_data_continue);
547 __ pushl(rdx); // set the mdp (method data pointer) 547 __ push(rdx); // set the mdp (method data pointer)
548 } else { 548 } else {
549 __ pushl(0); 549 __ push(0);
550 } 550 }
551 551
552 __ movl(rdx, Address(rbx, methodOopDesc::constants_offset())); 552 __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
553 __ movl(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes())); 553 __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
554 __ pushl(rdx); // set constant pool cache 554 __ push(rdx); // set constant pool cache
555 __ pushl(rdi); // set locals pointer 555 __ push(rdi); // set locals pointer
556 if (native_call) { 556 if (native_call) {
557 __ pushl(0); // no bcp 557 __ push(0); // no bcp
558 } else { 558 } else {
559 __ pushl(rsi); // set bcp 559 __ push(rsi); // set bcp
560 } 560 }
561 __ pushl(0); // reserve word for pointer to expression stack bottom 561 __ push(0); // reserve word for pointer to expression stack bottom
562 __ movl(Address(rsp, 0), rsp); // set expression stack bottom 562 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
563 } 563 }
564 564
565 // End of helpers 565 // End of helpers
566 566
567 // 567 //
596 // Note: We can only use this code if the getfield has been resolved 596 // Note: We can only use this code if the getfield has been resolved
597 // and if we don't have a null-pointer exception => check for 597 // and if we don't have a null-pointer exception => check for
598 // these conditions first and use slow path if necessary. 598 // these conditions first and use slow path if necessary.
599 // rbx,: method 599 // rbx,: method
600 // rcx: receiver 600 // rcx: receiver
601 __ movl(rax, Address(rsp, wordSize)); 601 __ movptr(rax, Address(rsp, wordSize));
602 602
603 // check if local 0 != NULL and read field 603 // check if local 0 != NULL and read field
604 __ testl(rax, rax); 604 __ testptr(rax, rax);
605 __ jcc(Assembler::zero, slow_path); 605 __ jcc(Assembler::zero, slow_path);
606 606
607 __ movl(rdi, Address(rbx, methodOopDesc::constants_offset())); 607 __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
608 // read first instruction word and extract bytecode @ 1 and index @ 2 608 // read first instruction word and extract bytecode @ 1 and index @ 2
609 __ movl(rdx, Address(rbx, methodOopDesc::const_offset())); 609 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
610 __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset())); 610 __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
611 // Shift codes right to get the index on the right. 611 // Shift codes right to get the index on the right.
612 // The bytecode fetched looks like <index><0xb4><0x2a> 612 // The bytecode fetched looks like <index><0xb4><0x2a>
613 __ shrl(rdx, 2*BitsPerByte); 613 __ shrl(rdx, 2*BitsPerByte);
614 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); 614 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
615 __ movl(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes())); 615 __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
616 616
617 // rax,: local 0 617 // rax,: local 0
618 // rbx,: method 618 // rbx,: method
619 // rcx: receiver - do not destroy since it is needed for slow path! 619 // rcx: receiver - do not destroy since it is needed for slow path!
620 // rcx: scratch 620 // rcx: scratch
627 // contains Bytecode::_getfield in b1 byte. 627 // contains Bytecode::_getfield in b1 byte.
628 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below"); 628 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
629 __ movl(rcx, 629 __ movl(rcx,
630 Address(rdi, 630 Address(rdi,
631 rdx, 631 rdx,
632 Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); 632 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
633 __ shrl(rcx, 2*BitsPerByte); 633 __ shrl(rcx, 2*BitsPerByte);
634 __ andl(rcx, 0xFF); 634 __ andl(rcx, 0xFF);
635 __ cmpl(rcx, Bytecodes::_getfield); 635 __ cmpl(rcx, Bytecodes::_getfield);
636 __ jcc(Assembler::notEqual, slow_path); 636 __ jcc(Assembler::notEqual, slow_path);
637 637
638 // Note: constant pool entry is not valid before bytecode is resolved 638 // Note: constant pool entry is not valid before bytecode is resolved
639 __ movl(rcx, 639 __ movptr(rcx,
640 Address(rdi, 640 Address(rdi,
641 rdx, 641 rdx,
642 Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())); 642 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()));
643 __ movl(rdx, 643 __ movl(rdx,
644 Address(rdi, 644 Address(rdi,
645 rdx, 645 rdx,
646 Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())); 646 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset()));
647 647
648 Label notByte, notShort, notChar; 648 Label notByte, notShort, notChar;
649 const Address field_address (rax, rcx, Address::times_1); 649 const Address field_address (rax, rcx, Address::times_1);
650 650
651 // Need to differentiate between igetfield, agetfield, bgetfield etc. 651 // Need to differentiate between igetfield, agetfield, bgetfield etc.
680 __ jcc(Assembler::equal, okay); 680 __ jcc(Assembler::equal, okay);
681 __ stop("what type is this?"); 681 __ stop("what type is this?");
682 __ bind(okay); 682 __ bind(okay);
683 #endif // ASSERT 683 #endif // ASSERT
684 // All the rest are a 32 bit wordsize 684 // All the rest are a 32 bit wordsize
685 __ movl(rax, field_address); 685 // This is ok for now. Since fast accessors should be going away
686 __ movptr(rax, field_address);
686 687
687 __ bind(xreturn_path); 688 __ bind(xreturn_path);
688 689
689 // _ireturn/_areturn 690 // _ireturn/_areturn
690 __ popl(rdi); // get return address 691 __ pop(rdi); // get return address
691 __ movl(rsp, rsi); // set sp to sender sp 692 __ mov(rsp, rsi); // set sp to sender sp
692 __ jmp(rdi); 693 __ jmp(rdi);
693 694
694 // generate a vanilla interpreter entry as the slow path 695 // generate a vanilla interpreter entry as the slow path
695 __ bind(slow_path); 696 __ bind(slow_path);
696 697
730 731
731 // rbx,: methodOop 732 // rbx,: methodOop
732 // rcx: size of parameters 733 // rcx: size of parameters
733 // rsi: sender sp 734 // rsi: sender sp
734 735
735 __ popl(rax); // get return address 736 __ pop(rax); // get return address
736 // for natives the size of locals is zero 737 // for natives the size of locals is zero
737 738
738 // compute beginning of parameters (rdi) 739 // compute beginning of parameters (rdi)
739 __ leal(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 740 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
740 741
741 742
742 // add 2 zero-initialized slots for native calls 743 // add 2 zero-initialized slots for native calls
743 // NULL result handler 744 // NULL result handler
744 __ pushl(NULL_WORD); 745 __ push((int32_t)NULL_WORD);
745 // NULL oop temp (mirror or jni oop result) 746 // NULL oop temp (mirror or jni oop result)
746 __ pushl(NULL_WORD); 747 __ push((int32_t)NULL_WORD);
747 748
748 if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count 749 if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
749 // initialize fixed part of activation frame 750 // initialize fixed part of activation frame
750 751
751 generate_fixed_frame(true); 752 generate_fixed_frame(true);
816 // start execution 817 // start execution
817 #ifdef ASSERT 818 #ifdef ASSERT
818 { Label L; 819 { Label L;
819 const Address monitor_block_top (rbp, 820 const Address monitor_block_top (rbp,
820 frame::interpreter_frame_monitor_block_top_offset * wordSize); 821 frame::interpreter_frame_monitor_block_top_offset * wordSize);
821 __ movl(rax, monitor_block_top); 822 __ movptr(rax, monitor_block_top);
822 __ cmpl(rax, rsp); 823 __ cmpptr(rax, rsp);
823 __ jcc(Assembler::equal, L); 824 __ jcc(Assembler::equal, L);
824 __ stop("broken stack frame setup in interpreter"); 825 __ stop("broken stack frame setup in interpreter");
825 __ bind(L); 826 __ bind(L);
826 } 827 }
827 #endif 828 #endif
836 837
837 // allocate space for parameters 838 // allocate space for parameters
838 __ get_method(method); 839 __ get_method(method);
839 __ verify_oop(method); 840 __ verify_oop(method);
840 __ load_unsigned_word(t, Address(method, methodOopDesc::size_of_parameters_offset())); 841 __ load_unsigned_word(t, Address(method, methodOopDesc::size_of_parameters_offset()));
841 __ shll(t, Interpreter::logStackElementSize()); 842 __ shlptr(t, Interpreter::logStackElementSize());
842 __ addl(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 843 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
843 __ subl(rsp, t); 844 __ subptr(rsp, t);
844 __ andl(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 845 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
845 846
846 // get signature handler 847 // get signature handler
847 { Label L; 848 { Label L;
848 __ movl(t, Address(method, methodOopDesc::signature_handler_offset())); 849 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
849 __ testl(t, t); 850 __ testptr(t, t);
850 __ jcc(Assembler::notZero, L); 851 __ jcc(Assembler::notZero, L);
851 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); 852 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
852 __ get_method(method); 853 __ get_method(method);
853 __ movl(t, Address(method, methodOopDesc::signature_handler_offset())); 854 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
854 __ bind(L); 855 __ bind(L);
855 } 856 }
856 857
857 // call signature handler 858 // call signature handler
858 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rdi, "adjust this code"); 859 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rdi, "adjust this code");
865 __ call(t); 866 __ call(t);
866 __ get_method(method); // slow path call blows RBX on DevStudio 5.0 867 __ get_method(method); // slow path call blows RBX on DevStudio 5.0
867 868
868 // result handler is in rax, 869 // result handler is in rax,
869 // set result handler 870 // set result handler
870 __ movl(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax); 871 __ movptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax);
871 872
872 // pass mirror handle if static call 873 // pass mirror handle if static call
873 { Label L; 874 { Label L;
874 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); 875 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
875 __ movl(t, Address(method, methodOopDesc::access_flags_offset())); 876 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
876 __ testl(t, JVM_ACC_STATIC); 877 __ testl(t, JVM_ACC_STATIC);
877 __ jcc(Assembler::zero, L); 878 __ jcc(Assembler::zero, L);
878 // get mirror 879 // get mirror
879 __ movl(t, Address(method, methodOopDesc:: constants_offset())); 880 __ movptr(t, Address(method, methodOopDesc:: constants_offset()));
880 __ movl(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes())); 881 __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
881 __ movl(t, Address(t, mirror_offset)); 882 __ movptr(t, Address(t, mirror_offset));
882 // copy mirror into activation frame 883 // copy mirror into activation frame
883 __ movl(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t); 884 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t);
884 // pass handle to mirror 885 // pass handle to mirror
885 __ leal(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 886 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
886 __ movl(Address(rsp, wordSize), t); 887 __ movptr(Address(rsp, wordSize), t);
887 __ bind(L); 888 __ bind(L);
888 } 889 }
889 890
890 // get native function entry point 891 // get native function entry point
891 { Label L; 892 { Label L;
892 __ movl(rax, Address(method, methodOopDesc::native_function_offset())); 893 __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
893 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 894 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
894 __ cmp32(rax, unsatisfied.addr()); 895 __ cmpptr(rax, unsatisfied.addr());
895 __ jcc(Assembler::notEqual, L); 896 __ jcc(Assembler::notEqual, L);
896 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); 897 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
897 __ get_method(method); 898 __ get_method(method);
898 __ verify_oop(method); 899 __ verify_oop(method);
899 __ movl(rax, Address(method, methodOopDesc::native_function_offset())); 900 __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
900 __ bind(L); 901 __ bind(L);
901 } 902 }
902 903
903 // pass JNIEnv 904 // pass JNIEnv
904 __ get_thread(thread); 905 __ get_thread(thread);
905 __ leal(t, Address(thread, JavaThread::jni_environment_offset())); 906 __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
906 __ movl(Address(rsp, 0), t); 907 __ movptr(Address(rsp, 0), t);
907 908
908 // set_last_Java_frame_before_call 909 // set_last_Java_frame_before_call
909 // It is enough that the pc() 910 // It is enough that the pc()
910 // points into the right code segment. It does not have to be the correct return pc. 911 // points into the right code segment. It does not have to be the correct return pc.
911 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 912 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
932 if (VM_Version::supports_sse()) { 933 if (VM_Version::supports_sse()) {
933 if (RestoreMXCSROnJNICalls) { 934 if (RestoreMXCSROnJNICalls) {
934 __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std())); 935 __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
935 } 936 }
936 else if (CheckJNICalls ) { 937 else if (CheckJNICalls ) {
937 __ call(RuntimeAddress(StubRoutines::i486::verify_mxcsr_entry())); 938 __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
938 } 939 }
939 } 940 }
940 941
941 // Either restore the x87 floating pointer control word after returning 942 // Either restore the x87 floating pointer control word after returning
942 // from the JNI call or verify that it wasn't changed. 943 // from the JNI call or verify that it wasn't changed.
943 if (CheckJNICalls) { 944 if (CheckJNICalls) {
944 __ call(RuntimeAddress(StubRoutines::i486::verify_fpu_cntrl_wrd_entry())); 945 __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
945 } 946 }
946 947
947 // save potential result in ST(0) & rdx:rax 948 // save potential result in ST(0) & rdx:rax
948 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 949 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
949 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 950 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
973 // change thread state 974 // change thread state
974 __ get_thread(thread); 975 __ get_thread(thread);
975 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans); 976 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
976 if(os::is_MP()) { 977 if(os::is_MP()) {
977 if (UseMembar) { 978 if (UseMembar) {
978 __ membar(); // Force this write out before the read below 979 // Force this write out before the read below
980 __ membar(Assembler::Membar_mask_bits(
981 Assembler::LoadLoad | Assembler::LoadStore |
982 Assembler::StoreLoad | Assembler::StoreStore));
979 } else { 983 } else {
980 // Write serialization page so VM thread can do a pseudo remote membar. 984 // Write serialization page so VM thread can do a pseudo remote membar.
981 // We use the current thread pointer to calculate a thread specific 985 // We use the current thread pointer to calculate a thread specific
982 // offset to write to within the page. This minimizes bus traffic 986 // offset to write to within the page. This minimizes bus traffic
983 // due to cache line collision. 987 // due to cache line collision.
1006 // and never return here preventing us from clearing _last_native_pc down below. 1010 // and never return here preventing us from clearing _last_native_pc down below.
1007 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are 1011 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1008 // preserved and correspond to the bcp/locals pointers. So we do a runtime call 1012 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
1009 // by hand. 1013 // by hand.
1010 // 1014 //
1011 __ pushl(thread); 1015 __ push(thread);
1012 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1016 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1013 JavaThread::check_special_condition_for_native_trans))); 1017 JavaThread::check_special_condition_for_native_trans)));
1014 __ increment(rsp, wordSize); 1018 __ increment(rsp, wordSize);
1015 __ get_thread(thread); 1019 __ get_thread(thread);
1016 1020
1021 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1025 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1022 1026
1023 __ reset_last_Java_frame(thread, true, true); 1027 __ reset_last_Java_frame(thread, true, true);
1024 1028
1025 // reset handle block 1029 // reset handle block
1026 __ movl(t, Address(thread, JavaThread::active_handles_offset())); 1030 __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
1027 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), 0); 1031 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
1028 1032
1029 // If result was an oop then unbox and save it in the frame 1033 // If result was an oop then unbox and save it in the frame
1030 { Label L; 1034 { Label L;
1031 Label no_oop, store_result; 1035 Label no_oop, store_result;
1032 ExternalAddress handler(AbstractInterpreter::result_handler(T_OBJECT)); 1036 ExternalAddress handler(AbstractInterpreter::result_handler(T_OBJECT));
1033 __ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), 1037 __ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize),
1034 handler.addr()); 1038 handler.addr());
1035 __ jcc(Assembler::notEqual, no_oop); 1039 __ jcc(Assembler::notEqual, no_oop);
1036 __ cmpl(Address(rsp, 0), NULL_WORD); 1040 __ cmpptr(Address(rsp, 0), (int32_t)NULL_WORD);
1037 __ pop(ltos); 1041 __ pop(ltos);
1038 __ testl(rax, rax); 1042 __ testptr(rax, rax);
1039 __ jcc(Assembler::zero, store_result); 1043 __ jcc(Assembler::zero, store_result);
1040 // unbox 1044 // unbox
1041 __ movl(rax, Address(rax, 0)); 1045 __ movptr(rax, Address(rax, 0));
1042 __ bind(store_result); 1046 __ bind(store_result);
1043 __ movl(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax); 1047 __ movptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax);
1044 // keep stack depth as expected by pushing oop which will eventually be discarded 1048 // keep stack depth as expected by pushing oop which will eventually be discarded
1045 __ push(ltos); 1049 __ push(ltos);
1046 __ bind(no_oop); 1050 __ bind(no_oop);
1047 } 1051 }
1048 1052
1049 { 1053 {
1050 Label no_reguard; 1054 Label no_reguard;
1051 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled); 1055 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
1052 __ jcc(Assembler::notEqual, no_reguard); 1056 __ jcc(Assembler::notEqual, no_reguard);
1053 1057
1054 __ pushad(); 1058 __ pusha();
1055 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1059 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1056 __ popad(); 1060 __ popa();
1057 1061
1058 __ bind(no_reguard); 1062 __ bind(no_reguard);
1059 } 1063 }
1060 1064
1061 // restore rsi to have legal interpreter frame, 1065 // restore rsi to have legal interpreter frame,
1062 // i.e., bci == 0 <=> rsi == code_base() 1066 // i.e., bci == 0 <=> rsi == code_base()
1063 // Can't call_VM until bcp is within reasonable. 1067 // Can't call_VM until bcp is within reasonable.
1064 __ get_method(method); // method is junk from thread_in_native to now. 1068 __ get_method(method); // method is junk from thread_in_native to now.
1065 __ verify_oop(method); 1069 __ verify_oop(method);
1066 __ movl(rsi, Address(method,methodOopDesc::const_offset())); // get constMethodOop 1070 __ movptr(rsi, Address(method,methodOopDesc::const_offset())); // get constMethodOop
1067 __ leal(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase 1071 __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
1068 1072
1069 // handle exceptions (exception handling will handle unlocking!) 1073 // handle exceptions (exception handling will handle unlocking!)
1070 { Label L; 1074 { Label L;
1071 __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 1075 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1072 __ jcc(Assembler::zero, L); 1076 __ jcc(Assembler::zero, L);
1073 // Note: At some point we may want to unify this with the code used in call_VM_base(); 1077 // Note: At some point we may want to unify this with the code used in call_VM_base();
1074 // i.e., we should use the StubRoutines::forward_exception code. For now this 1078 // i.e., we should use the StubRoutines::forward_exception code. For now this
1075 // doesn't work here because the rsp is not correctly set at this point. 1079 // doesn't work here because the rsp is not correctly set at this point.
1076 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 1080 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
1087 { Label unlock; 1091 { Label unlock;
1088 // BasicObjectLock will be first in list, since this is a synchronized method. However, need 1092 // BasicObjectLock will be first in list, since this is a synchronized method. However, need
1089 // to check that the object has not been unlocked by an explicit monitorexit bytecode. 1093 // to check that the object has not been unlocked by an explicit monitorexit bytecode.
1090 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock)); 1094 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
1091 1095
1092 __ leal(rdx, monitor); // address of first monitor 1096 __ lea(rdx, monitor); // address of first monitor
1093 1097
1094 __ movl(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); 1098 __ movptr(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));
1095 __ testl(t, t); 1099 __ testptr(t, t);
1096 __ jcc(Assembler::notZero, unlock); 1100 __ jcc(Assembler::notZero, unlock);
1097 1101
1098 // Entry already unlocked, need to throw exception 1102 // Entry already unlocked, need to throw exception
1099 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1103 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1100 __ should_not_reach_here(); 1104 __ should_not_reach_here();
1112 // not properly paired (was bug - gri 11/22/99). 1116 // not properly paired (was bug - gri 11/22/99).
1113 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1117 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1114 1118
1115 // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result 1119 // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
1116 __ pop(ltos); 1120 __ pop(ltos);
1117 __ movl(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1121 __ movptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
1118 __ call(t); 1122 __ call(t);
1119 1123
1120 // remove activation 1124 // remove activation
1121 __ movl(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp 1125 __ movptr(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1122 __ leave(); // remove frame anchor 1126 __ leave(); // remove frame anchor
1123 __ popl(rdi); // get return address 1127 __ pop(rdi); // get return address
1124 __ movl(rsp, t); // set sp to sender sp 1128 __ mov(rsp, t); // set sp to sender sp
1125 __ jmp(rdi); 1129 __ jmp(rdi);
1126 1130
1127 if (inc_counter) { 1131 if (inc_counter) {
1128 // Handle overflow of counter and compile method 1132 // Handle overflow of counter and compile method
1129 __ bind(invocation_counter_overflow); 1133 __ bind(invocation_counter_overflow);
1163 1167
1164 // see if we've got enough room on the stack for locals plus overhead. 1168 // see if we've got enough room on the stack for locals plus overhead.
1165 generate_stack_overflow_check(); 1169 generate_stack_overflow_check();
1166 1170
1167 // get return address 1171 // get return address
1168 __ popl(rax); 1172 __ pop(rax);
1169 1173
1170 // compute beginning of parameters (rdi) 1174 // compute beginning of parameters (rdi)
1171 __ leal(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1175 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
1172 1176
1173 // rdx - # of additional locals 1177 // rdx - # of additional locals
1174 // allocate space for locals 1178 // allocate space for locals
1175 // explicitly initialize locals 1179 // explicitly initialize locals
1176 { 1180 {
1177 Label exit, loop; 1181 Label exit, loop;
1178 __ testl(rdx, rdx); 1182 __ testl(rdx, rdx);
1179 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1183 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1180 __ bind(loop); 1184 __ bind(loop);
1181 if (TaggedStackInterpreter) __ pushl(NULL_WORD); // push tag 1185 if (TaggedStackInterpreter) {
1182 __ pushl(NULL_WORD); // initialize local variables 1186 __ push((int32_t)NULL_WORD); // push tag
1187 }
1188 __ push((int32_t)NULL_WORD); // initialize local variables
1183 __ decrement(rdx); // until everything initialized 1189 __ decrement(rdx); // until everything initialized
1184 __ jcc(Assembler::greater, loop); 1190 __ jcc(Assembler::greater, loop);
1185 __ bind(exit); 1191 __ bind(exit);
1186 } 1192 }
1187 1193
1260 // start execution 1266 // start execution
1261 #ifdef ASSERT 1267 #ifdef ASSERT
1262 { Label L; 1268 { Label L;
1263 const Address monitor_block_top (rbp, 1269 const Address monitor_block_top (rbp,
1264 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1270 frame::interpreter_frame_monitor_block_top_offset * wordSize);
1265 __ movl(rax, monitor_block_top); 1271 __ movptr(rax, monitor_block_top);
1266 __ cmpl(rax, rsp); 1272 __ cmpptr(rax, rsp);
1267 __ jcc(Assembler::equal, L); 1273 __ jcc(Assembler::equal, L);
1268 __ stop("broken stack frame setup in interpreter"); 1274 __ stop("broken stack frame setup in interpreter");
1269 __ bind(L); 1275 __ bind(L);
1270 } 1276 }
1271 #endif 1277 #endif
1281 // We have decided to profile this method in the interpreter 1287 // We have decided to profile this method in the interpreter
1282 __ bind(profile_method); 1288 __ bind(profile_method);
1283 1289
1284 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi, true); 1290 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi, true);
1285 1291
1286 __ movl(rbx, Address(rbp, method_offset)); // restore methodOop 1292 __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
1287 __ movl(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); 1293 __ movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
1288 __ movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax); 1294 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
1289 __ test_method_data_pointer(rax, profile_method_continue); 1295 __ test_method_data_pointer(rax, profile_method_continue);
1290 __ addl(rax, in_bytes(methodDataOopDesc::data_offset())); 1296 __ addptr(rax, in_bytes(methodDataOopDesc::data_offset()));
1291 __ movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax); 1297 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
1292 __ jmp(profile_method_continue); 1298 __ jmp(profile_method_continue);
1293 } 1299 }
1294 // Handle overflow of counter and compile method 1300 // Handle overflow of counter and compile method
1295 __ bind(invocation_counter_overflow); 1301 __ bind(invocation_counter_overflow);
1296 generate_counter_overflow(&continue_after_compile); 1302 generate_counter_overflow(&continue_after_compile);
1480 // Entry point in previous activation (i.e., if the caller was interpreted) 1486 // Entry point in previous activation (i.e., if the caller was interpreted)
1481 Interpreter::_rethrow_exception_entry = __ pc(); 1487 Interpreter::_rethrow_exception_entry = __ pc();
1482 1488
1483 // Restore sp to interpreter_frame_last_sp even though we are going 1489 // Restore sp to interpreter_frame_last_sp even though we are going
1484 // to empty the expression stack for the exception processing. 1490 // to empty the expression stack for the exception processing.
1485 __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 1491 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
1486 // rax,: exception 1492 // rax,: exception
1487 // rdx: return address/pc that threw exception 1493 // rdx: return address/pc that threw exception
1488 __ restore_bcp(); // rsi points to call/send 1494 __ restore_bcp(); // rsi points to call/send
1489 __ restore_locals(); 1495 __ restore_locals();
1490 1496
1542 // 1548 //
1543 // Note that we don't compare the return PC against the 1549 // Note that we don't compare the return PC against the
1544 // deoptimization blob's unpack entry because of the presence of 1550 // deoptimization blob's unpack entry because of the presence of
1545 // adapter frames in C2. 1551 // adapter frames in C2.
1546 Label caller_not_deoptimized; 1552 Label caller_not_deoptimized;
1547 __ movl(rdx, Address(rbp, frame::return_addr_offset * wordSize)); 1553 __ movptr(rdx, Address(rbp, frame::return_addr_offset * wordSize));
1548 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), rdx); 1554 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), rdx);
1549 __ testl(rax, rax); 1555 __ testl(rax, rax);
1550 __ jcc(Assembler::notZero, caller_not_deoptimized); 1556 __ jcc(Assembler::notZero, caller_not_deoptimized);
1551 1557
1552 // Compute size of arguments for saving when returning to deoptimized caller 1558 // Compute size of arguments for saving when returning to deoptimized caller
1553 __ get_method(rax); 1559 __ get_method(rax);
1554 __ verify_oop(rax); 1560 __ verify_oop(rax);
1555 __ load_unsigned_word(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset()))); 1561 __ load_unsigned_word(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset())));
1556 __ shll(rax, Interpreter::logStackElementSize()); 1562 __ shlptr(rax, Interpreter::logStackElementSize());
1557 __ restore_locals(); 1563 __ restore_locals();
1558 __ subl(rdi, rax); 1564 __ subptr(rdi, rax);
1559 __ addl(rdi, wordSize); 1565 __ addptr(rdi, wordSize);
1560 // Save these arguments 1566 // Save these arguments
1561 __ get_thread(rcx); 1567 __ get_thread(rcx);
1562 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), rcx, rax, rdi); 1568 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), rcx, rax, rdi);
1563 1569
1564 __ remove_activation(vtos, rdx, 1570 __ remove_activation(vtos, rdx,
1590 // no space between the top of the expression stack (current 1596 // no space between the top of the expression stack (current
1591 // last_sp) and the top of stack. Rather than force deopt to 1597 // last_sp) and the top of stack. Rather than force deopt to
1592 // maintain this kind of invariant all the time we call a small 1598 // maintain this kind of invariant all the time we call a small
1593 // fixup routine to move the mutated arguments onto the top of our 1599 // fixup routine to move the mutated arguments onto the top of our
1594 // expression stack if necessary. 1600 // expression stack if necessary.
1595 __ movl(rax, rsp); 1601 __ mov(rax, rsp);
1596 __ movl(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1602 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1597 __ get_thread(rcx); 1603 __ get_thread(rcx);
1598 // PC must point into interpreter here 1604 // PC must point into interpreter here
1599 __ set_last_Java_frame(rcx, noreg, rbp, __ pc()); 1605 __ set_last_Java_frame(rcx, noreg, rbp, __ pc());
1600 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), rcx, rax, rbx); 1606 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), rcx, rax, rbx);
1601 __ get_thread(rcx); 1607 __ get_thread(rcx);
1602 __ reset_last_Java_frame(rcx, true, true); 1608 __ reset_last_Java_frame(rcx, true, true);
1603 // Restore the last_sp and null it out 1609 // Restore the last_sp and null it out
1604 __ movl(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1610 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1605 __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 1611 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
1606 1612
1607 __ restore_bcp(); 1613 __ restore_bcp();
1608 __ restore_locals(); 1614 __ restore_locals();
1609 // The method data pointer was incremented already during 1615 // The method data pointer was incremented already during
1610 // call profiling. We have to restore the mdp for the current bcp. 1616 // call profiling. We have to restore the mdp for the current bcp.
1622 Interpreter::_remove_activation_entry = __ pc(); 1628 Interpreter::_remove_activation_entry = __ pc();
1623 1629
1624 // preserve exception over this code sequence 1630 // preserve exception over this code sequence
1625 __ pop_ptr(rax); 1631 __ pop_ptr(rax);
1626 __ get_thread(rcx); 1632 __ get_thread(rcx);
1627 __ movl(Address(rcx, JavaThread::vm_result_offset()), rax); 1633 __ movptr(Address(rcx, JavaThread::vm_result_offset()), rax);
1628 // remove the activation (without doing throws on illegalMonitorExceptions) 1634 // remove the activation (without doing throws on illegalMonitorExceptions)
1629 __ remove_activation(vtos, rdx, false, true, false); 1635 __ remove_activation(vtos, rdx, false, true, false);
1630 // restore exception 1636 // restore exception
1631 __ get_thread(rcx); 1637 __ get_thread(rcx);
1632 __ movl(rax, Address(rcx, JavaThread::vm_result_offset())); 1638 __ movptr(rax, Address(rcx, JavaThread::vm_result_offset()));
1633 __ movl(Address(rcx, JavaThread::vm_result_offset()), NULL_WORD); 1639 __ movptr(Address(rcx, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
1634 __ verify_oop(rax); 1640 __ verify_oop(rax);
1635 1641
1636 // Inbetween activations - previous activation type unknown yet 1642 // Inbetween activations - previous activation type unknown yet
1637 // compute continuation point - the continuation point expects 1643 // compute continuation point - the continuation point expects
1638 // the following registers set up: 1644 // the following registers set up:
1639 // 1645 //
1640 // rax,: exception 1646 // rax,: exception
1641 // rdx: return address/pc that threw exception 1647 // rdx: return address/pc that threw exception
1642 // rsp: expression stack of caller 1648 // rsp: expression stack of caller
1643 // rbp,: rbp, of caller 1649 // rbp,: rbp, of caller
1644 __ pushl(rax); // save exception 1650 __ push(rax); // save exception
1645 __ pushl(rdx); // save return address 1651 __ push(rdx); // save return address
1646 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rdx); 1652 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rdx);
1647 __ movl(rbx, rax); // save exception handler 1653 __ mov(rbx, rax); // save exception handler
1648 __ popl(rdx); // restore return address 1654 __ pop(rdx); // restore return address
1649 __ popl(rax); // restore exception 1655 __ pop(rax); // restore exception
1650 // Note that an "issuing PC" is actually the next PC after the call 1656 // Note that an "issuing PC" is actually the next PC after the call
1651 __ jmp(rbx); // jump to exception handler of caller 1657 __ jmp(rbx); // jump to exception handler of caller
1652 } 1658 }
1653 1659
1654 1660
1663 __ empty_expression_stack(); 1669 __ empty_expression_stack();
1664 __ empty_FPU_stack(); 1670 __ empty_FPU_stack();
1665 __ load_earlyret_value(state); 1671 __ load_earlyret_value(state);
1666 1672
1667 __ get_thread(rcx); 1673 __ get_thread(rcx);
1668 __ movl(rcx, Address(rcx, JavaThread::jvmti_thread_state_offset())); 1674 __ movptr(rcx, Address(rcx, JavaThread::jvmti_thread_state_offset()));
1669 const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1675 const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
1670 1676
1671 // Clear the earlyret state 1677 // Clear the earlyret state
1672 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1678 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
1673 1679
1714 #ifndef PRODUCT 1720 #ifndef PRODUCT
1715 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1721 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1716 address entry = __ pc(); 1722 address entry = __ pc();
1717 1723
1718 // prepare expression stack 1724 // prepare expression stack
1719 __ popl(rcx); // pop return address so expression stack is 'pure' 1725 __ pop(rcx); // pop return address so expression stack is 'pure'
1720 __ push(state); // save tosca 1726 __ push(state); // save tosca
1721 1727
1722 // pass tosca registers as arguments & call tracer 1728 // pass tosca registers as arguments & call tracer
1723 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx); 1729 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx);
1724 __ movl(rcx, rax); // make sure return address is not destroyed by pop(state) 1730 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state)
1725 __ pop(state); // restore tosca 1731 __ pop(state); // restore tosca
1726 1732
1727 // return 1733 // return
1728 __ jmp(rcx); 1734 __ jmp(rcx);
1729 1735
1730 return entry; 1736 return entry;
1731 } 1737 }
1732 1738
1733 1739
1734 void TemplateInterpreterGenerator::count_bytecode() { 1740 void TemplateInterpreterGenerator::count_bytecode() {
1735 __ increment(ExternalAddress((address) &BytecodeCounter::_counter_value)); 1741 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
1736 } 1742 }
1737 1743
1738 1744
1739 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1745 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1740 __ increment(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 1746 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
1741 } 1747 }
1742 1748
1743 1749
1744 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1750 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1745 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 1751 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
1746 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1752 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
1747 __ orl(rbx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 1753 __ orl(rbx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1748 ExternalAddress table((address) BytecodePairHistogram::_counters); 1754 ExternalAddress table((address) BytecodePairHistogram::_counters);
1749 Address index(noreg, rbx, Address::times_4); 1755 Address index(noreg, rbx, Address::times_4);
1750 __ increment(ArrayAddress(table, index)); 1756 __ incrementl(ArrayAddress(table, index));
1751 } 1757 }
1752 1758
1753 1759
1754 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1760 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1755 // Call a little run-time stub to avoid blow-up for each bytecode. 1761 // Call a little run-time stub to avoid blow-up for each bytecode.