comparison src/cpu/x86/vm/sharedRuntime_x86_64.cpp @ 304:dc7f315e41f7

5108146: Merge i486 and amd64 cpu directories 6459804: Want client (c1) compiler for x86_64 (amd64) for faster start-up Reviewed-by: kvn
author never
date Wed, 27 Aug 2008 00:21:55 -0700
parents d1605aabd0a1
children 52a431267315 70998f2e05ef
comparison
equal deleted inserted replaced
303:fa4d1d240383 304:dc7f315e41f7
118 // Offsets into the register save area 118 // Offsets into the register save area
119 // Used by deoptimization when it is managing result register 119 // Used by deoptimization when it is managing result register
120 // values on its own 120 // values on its own
121 121
122 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; } 122 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
123 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
123 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; } 124 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
124 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; } 125 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
125 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; } 126 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
126 127
127 // During deoptimization only the result registers need to be restored, 128 // During deoptimization only the result registers need to be restored,
150 151
151 __ enter(); // rsp becomes 16-byte aligned here 152 __ enter(); // rsp becomes 16-byte aligned here
152 __ push_CPU_state(); // Push a multiple of 16 bytes 153 __ push_CPU_state(); // Push a multiple of 16 bytes
153 if (frame::arg_reg_save_area_bytes != 0) { 154 if (frame::arg_reg_save_area_bytes != 0) {
154 // Allocate argument register save area 155 // Allocate argument register save area
155 __ subq(rsp, frame::arg_reg_save_area_bytes); 156 __ subptr(rsp, frame::arg_reg_save_area_bytes);
156 } 157 }
157 158
158 // Set an oopmap for the call site. This oopmap will map all 159 // Set an oopmap for the call site. This oopmap will map all
159 // oop-registers and debug-info registers as callee-saved. This 160 // oop-registers and debug-info registers as callee-saved. This
160 // will allow deoptimization at this safepoint to find all possible 161 // will allow deoptimization at this safepoint to find all possible
264 } 265 }
265 266
266 void RegisterSaver::restore_live_registers(MacroAssembler* masm) { 267 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
267 if (frame::arg_reg_save_area_bytes != 0) { 268 if (frame::arg_reg_save_area_bytes != 0) {
268 // Pop arg register save area 269 // Pop arg register save area
269 __ addq(rsp, frame::arg_reg_save_area_bytes); 270 __ addptr(rsp, frame::arg_reg_save_area_bytes);
270 } 271 }
271 // Recover CPU state 272 // Recover CPU state
272 __ pop_CPU_state(); 273 __ pop_CPU_state();
273 // Get the rbp described implicitly by the calling convention (no oopMap) 274 // Get the rbp described implicitly by the calling convention (no oopMap)
274 __ popq(rbp); 275 __ pop(rbp);
275 } 276 }
276 277
277 void RegisterSaver::restore_result_registers(MacroAssembler* masm) { 278 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
278 279
279 // Just restore result register. Only used by deoptimization. By 280 // Just restore result register. Only used by deoptimization. By
283 // restoration so only result registers need to be restored here. 284 // restoration so only result registers need to be restored here.
284 285
285 // Restore fp result register 286 // Restore fp result register
286 __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes())); 287 __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
287 // Restore integer result register 288 // Restore integer result register
288 __ movq(rax, Address(rsp, rax_offset_in_bytes())); 289 __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
290 __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
291
289 // Pop all of the register save are off the stack except the return address 292 // Pop all of the register save are off the stack except the return address
290 __ addq(rsp, return_offset_in_bytes()); 293 __ addptr(rsp, return_offset_in_bytes());
291 } 294 }
292 295
293 // The java_calling_convention describes stack locations as ideal slots on 296 // The java_calling_convention describes stack locations as ideal slots on
294 // a frame with no abi restrictions. Since we must observe abi restrictions 297 // a frame with no abi restrictions. Since we must observe abi restrictions
295 // (like the placement of the register window) the slots must be biased by 298 // (like the placement of the register window) the slots must be biased by
405 408
406 // Patch the callers callsite with entry to compiled code if it exists. 409 // Patch the callers callsite with entry to compiled code if it exists.
407 static void patch_callers_callsite(MacroAssembler *masm) { 410 static void patch_callers_callsite(MacroAssembler *masm) {
408 Label L; 411 Label L;
409 __ verify_oop(rbx); 412 __ verify_oop(rbx);
410 __ cmpq(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int)NULL_WORD); 413 __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
411 __ jcc(Assembler::equal, L); 414 __ jcc(Assembler::equal, L);
412 415
413 // Save the current stack pointer 416 // Save the current stack pointer
414 __ movq(r13, rsp); 417 __ mov(r13, rsp);
415 // Schedule the branch target address early. 418 // Schedule the branch target address early.
416 // Call into the VM to patch the caller, then jump to compiled callee 419 // Call into the VM to patch the caller, then jump to compiled callee
417 // rax isn't live so capture return address while we easily can 420 // rax isn't live so capture return address while we easily can
418 __ movq(rax, Address(rsp, 0)); 421 __ movptr(rax, Address(rsp, 0));
419 422
420 // align stack so push_CPU_state doesn't fault 423 // align stack so push_CPU_state doesn't fault
421 __ andq(rsp, -(StackAlignmentInBytes)); 424 __ andptr(rsp, -(StackAlignmentInBytes));
422 __ push_CPU_state(); 425 __ push_CPU_state();
423 426
424 427
425 __ verify_oop(rbx); 428 __ verify_oop(rbx);
426 // VM needs caller's callsite 429 // VM needs caller's callsite
428 // This needs to be a long call since we will relocate this adapter to 431 // This needs to be a long call since we will relocate this adapter to
429 // the codeBuffer and it may not reach 432 // the codeBuffer and it may not reach
430 433
431 // Allocate argument register save area 434 // Allocate argument register save area
432 if (frame::arg_reg_save_area_bytes != 0) { 435 if (frame::arg_reg_save_area_bytes != 0) {
433 __ subq(rsp, frame::arg_reg_save_area_bytes); 436 __ subptr(rsp, frame::arg_reg_save_area_bytes);
434 } 437 }
435 __ movq(c_rarg0, rbx); 438 __ mov(c_rarg0, rbx);
436 __ movq(c_rarg1, rax); 439 __ mov(c_rarg1, rax);
437 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite))); 440 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
438 441
439 // De-allocate argument register save area 442 // De-allocate argument register save area
440 if (frame::arg_reg_save_area_bytes != 0) { 443 if (frame::arg_reg_save_area_bytes != 0) {
441 __ addq(rsp, frame::arg_reg_save_area_bytes); 444 __ addptr(rsp, frame::arg_reg_save_area_bytes);
442 } 445 }
443 446
444 __ pop_CPU_state(); 447 __ pop_CPU_state();
445 // restore sp 448 // restore sp
446 __ movq(rsp, r13); 449 __ mov(rsp, r13);
447 __ bind(L); 450 __ bind(L);
448 } 451 }
449 452
450 // Helper function to put tags in interpreter stack. 453 // Helper function to put tags in interpreter stack.
451 static void tag_stack(MacroAssembler *masm, const BasicType sig, int st_off) { 454 static void tag_stack(MacroAssembler *masm, const BasicType sig, int st_off) {
452 if (TaggedStackInterpreter) { 455 if (TaggedStackInterpreter) {
453 int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0); 456 int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0);
454 if (sig == T_OBJECT || sig == T_ARRAY) { 457 if (sig == T_OBJECT || sig == T_ARRAY) {
455 __ mov64(Address(rsp, tag_offset), frame::TagReference); 458 __ movptr(Address(rsp, tag_offset), (int32_t) frame::TagReference);
456 } else if (sig == T_LONG || sig == T_DOUBLE) { 459 } else if (sig == T_LONG || sig == T_DOUBLE) {
457 int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1); 460 int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1);
458 __ mov64(Address(rsp, next_tag_offset), frame::TagValue); 461 __ movptr(Address(rsp, next_tag_offset), (int32_t) frame::TagValue);
459 __ mov64(Address(rsp, tag_offset), frame::TagValue); 462 __ movptr(Address(rsp, tag_offset), (int32_t) frame::TagValue);
460 } else { 463 } else {
461 __ mov64(Address(rsp, tag_offset), frame::TagValue); 464 __ movptr(Address(rsp, tag_offset), (int32_t) frame::TagValue);
462 } 465 }
463 } 466 }
464 } 467 }
465 468
466 469
488 491
489 // stack is aligned, keep it that way 492 // stack is aligned, keep it that way
490 extraspace = round_to(extraspace, 2*wordSize); 493 extraspace = round_to(extraspace, 2*wordSize);
491 494
492 // Get return address 495 // Get return address
493 __ popq(rax); 496 __ pop(rax);
494 497
495 // set senderSP value 498 // set senderSP value
496 __ movq(r13, rsp); 499 __ mov(r13, rsp);
497 500
498 __ subq(rsp, extraspace); 501 __ subptr(rsp, extraspace);
499 502
500 // Store the return address in the expected location 503 // Store the return address in the expected location
501 __ movq(Address(rsp, 0), rax); 504 __ movptr(Address(rsp, 0), rax);
502 505
503 // Now write the args into the outgoing interpreter space 506 // Now write the args into the outgoing interpreter space
504 for (int i = 0; i < total_args_passed; i++) { 507 for (int i = 0; i < total_args_passed; i++) {
505 if (sig_bt[i] == T_VOID) { 508 if (sig_bt[i] == T_VOID) {
506 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 509 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
535 // memory to memory use rax 538 // memory to memory use rax
536 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace; 539 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
537 if (!r_2->is_valid()) { 540 if (!r_2->is_valid()) {
538 // sign extend?? 541 // sign extend??
539 __ movl(rax, Address(rsp, ld_off)); 542 __ movl(rax, Address(rsp, ld_off));
540 __ movq(Address(rsp, st_off), rax); 543 __ movptr(Address(rsp, st_off), rax);
541 tag_stack(masm, sig_bt[i], st_off); 544 tag_stack(masm, sig_bt[i], st_off);
542 545
543 } else { 546 } else {
544 547
545 __ movq(rax, Address(rsp, ld_off)); 548 __ movq(rax, Address(rsp, ld_off));
551 // st_off == MSW, next_off == LSW 554 // st_off == MSW, next_off == LSW
552 __ movq(Address(rsp, next_off), rax); 555 __ movq(Address(rsp, next_off), rax);
553 #ifdef ASSERT 556 #ifdef ASSERT
554 // Overwrite the unused slot with known junk 557 // Overwrite the unused slot with known junk
555 __ mov64(rax, CONST64(0xdeadffffdeadaaaa)); 558 __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
556 __ movq(Address(rsp, st_off), rax); 559 __ movptr(Address(rsp, st_off), rax);
557 #endif /* ASSERT */ 560 #endif /* ASSERT */
558 tag_stack(masm, sig_bt[i], next_off); 561 tag_stack(masm, sig_bt[i], next_off);
559 } else { 562 } else {
560 __ movq(Address(rsp, st_off), rax); 563 __ movq(Address(rsp, st_off), rax);
561 tag_stack(masm, sig_bt[i], st_off); 564 tag_stack(masm, sig_bt[i], st_off);
574 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 577 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
575 // long/double in gpr 578 // long/double in gpr
576 #ifdef ASSERT 579 #ifdef ASSERT
577 // Overwrite the unused slot with known junk 580 // Overwrite the unused slot with known junk
578 __ mov64(rax, CONST64(0xdeadffffdeadaaab)); 581 __ mov64(rax, CONST64(0xdeadffffdeadaaab));
579 __ movq(Address(rsp, st_off), rax); 582 __ movptr(Address(rsp, st_off), rax);
580 #endif /* ASSERT */ 583 #endif /* ASSERT */
581 __ movq(Address(rsp, next_off), r); 584 __ movq(Address(rsp, next_off), r);
582 tag_stack(masm, sig_bt[i], next_off); 585 tag_stack(masm, sig_bt[i], next_off);
583 } else { 586 } else {
584 __ movq(Address(rsp, st_off), r); 587 __ movptr(Address(rsp, st_off), r);
585 tag_stack(masm, sig_bt[i], st_off); 588 tag_stack(masm, sig_bt[i], st_off);
586 } 589 }
587 } 590 }
588 } else { 591 } else {
589 assert(r_1->is_XMMRegister(), ""); 592 assert(r_1->is_XMMRegister(), "");
593 tag_stack(masm, sig_bt[i], st_off); 596 tag_stack(masm, sig_bt[i], st_off);
594 } else { 597 } else {
595 #ifdef ASSERT 598 #ifdef ASSERT
596 // Overwrite the unused slot with known junk 599 // Overwrite the unused slot with known junk
597 __ mov64(rax, CONST64(0xdeadffffdeadaaac)); 600 __ mov64(rax, CONST64(0xdeadffffdeadaaac));
598 __ movq(Address(rsp, st_off), rax); 601 __ movptr(Address(rsp, st_off), rax);
599 #endif /* ASSERT */ 602 #endif /* ASSERT */
600 __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister()); 603 __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
601 tag_stack(masm, sig_bt[i], next_off); 604 tag_stack(masm, sig_bt[i], next_off);
602 } 605 }
603 } 606 }
604 } 607 }
605 608
606 // Schedule the branch target address early. 609 // Schedule the branch target address early.
607 __ movq(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset()))); 610 __ movptr(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset())));
608 __ jmp(rcx); 611 __ jmp(rcx);
609 } 612 }
610 613
611 static void gen_i2c_adapter(MacroAssembler *masm, 614 static void gen_i2c_adapter(MacroAssembler *masm,
612 int total_args_passed, 615 int total_args_passed,
629 // we must align the stack to 16 bytes on an i2c entry else we 632 // we must align the stack to 16 bytes on an i2c entry else we
630 // lose alignment we expect in all compiled code and register 633 // lose alignment we expect in all compiled code and register
631 // save code can segv when fxsave instructions find improperly 634 // save code can segv when fxsave instructions find improperly
632 // aligned stack pointer. 635 // aligned stack pointer.
633 636
634 __ movq(rax, Address(rsp, 0)); 637 __ movptr(rax, Address(rsp, 0));
635 638
636 // Cut-out for having no stack args. Since up to 2 int/oop args are passed 639 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
637 // in registers, we will occasionally have no stack args. 640 // in registers, we will occasionally have no stack args.
638 int comp_words_on_stack = 0; 641 int comp_words_on_stack = 0;
639 if (comp_args_on_stack) { 642 if (comp_args_on_stack) {
643 646
644 // Convert 4-byte c2 stack slots to words. 647 // Convert 4-byte c2 stack slots to words.
645 comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 648 comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
646 // Round up to miminum stack alignment, in wordSize 649 // Round up to miminum stack alignment, in wordSize
647 comp_words_on_stack = round_to(comp_words_on_stack, 2); 650 comp_words_on_stack = round_to(comp_words_on_stack, 2);
648 __ subq(rsp, comp_words_on_stack * wordSize); 651 __ subptr(rsp, comp_words_on_stack * wordSize);
649 } 652 }
650 653
651 654
652 // Ensure compiled code always sees stack at proper alignment 655 // Ensure compiled code always sees stack at proper alignment
653 __ andq(rsp, -16); 656 __ andptr(rsp, -16);
654 657
655 // push the return address and misalign the stack that youngest frame always sees 658 // push the return address and misalign the stack that youngest frame always sees
656 // as far as the placement of the call instruction 659 // as far as the placement of the call instruction
657 __ pushq(rax); 660 __ push(rax);
658 661
659 // Will jump to the compiled code just as if compiled code was doing it. 662 // Will jump to the compiled code just as if compiled code was doing it.
660 // Pre-load the register-jump target early, to schedule it better. 663 // Pre-load the register-jump target early, to schedule it better.
661 __ movq(r11, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset()))); 664 __ movptr(r11, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
662 665
663 // Now generate the shuffle code. Pick up all register args and move the 666 // Now generate the shuffle code. Pick up all register args and move the
664 // rest through the floating point stack top. 667 // rest through the floating point stack top.
665 for (int i = 0; i < total_args_passed; i++) { 668 for (int i = 0; i < total_args_passed; i++) {
666 if (sig_bt[i] == T_VOID) { 669 if (sig_bt[i] == T_VOID) {
695 // Convert stack slot to an SP offset (+ wordSize to account for return address ) 698 // Convert stack slot to an SP offset (+ wordSize to account for return address )
696 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize; 699 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
697 if (!r_2->is_valid()) { 700 if (!r_2->is_valid()) {
698 // sign extend??? 701 // sign extend???
699 __ movl(rax, Address(r13, ld_off)); 702 __ movl(rax, Address(r13, ld_off));
700 __ movq(Address(rsp, st_off), rax); 703 __ movptr(Address(rsp, st_off), rax);
701 } else { 704 } else {
702 // 705 //
703 // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE 706 // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
704 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case 707 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
705 // So we must adjust where to pick up the data to match the interpreter. 708 // So we must adjust where to pick up the data to match the interpreter.
749 // invisible to the stack walking code. Unfortunately if 752 // invisible to the stack walking code. Unfortunately if
750 // we try and find the callee by normal means a safepoint 753 // we try and find the callee by normal means a safepoint
751 // is possible. So we stash the desired callee in the thread 754 // is possible. So we stash the desired callee in the thread
752 // and the vm will find there should this case occur. 755 // and the vm will find there should this case occur.
753 756
754 __ movq(Address(r15_thread, JavaThread::callee_target_offset()), rbx); 757 __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
755 758
756 // put methodOop where a c2i would expect should we end up there 759 // put methodOop where a c2i would expect should we end up there
757 // only needed becaus eof c2 resolve stubs return methodOop as a result in 760 // only needed becaus eof c2 resolve stubs return methodOop as a result in
758 // rax 761 // rax
759 __ movq(rax, rbx); 762 __ mov(rax, rbx);
760 __ jmp(r11); 763 __ jmp(r11);
761 } 764 }
762 765
763 // --------------------------------------------------------------- 766 // ---------------------------------------------------------------
764 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 767 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
790 { 793 {
791 __ verify_oop(holder); 794 __ verify_oop(holder);
792 __ load_klass(temp, receiver); 795 __ load_klass(temp, receiver);
793 __ verify_oop(temp); 796 __ verify_oop(temp);
794 797
795 __ cmpq(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset())); 798 __ cmpptr(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset()));
796 __ movq(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset())); 799 __ movptr(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset()));
797 __ jcc(Assembler::equal, ok); 800 __ jcc(Assembler::equal, ok);
798 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 801 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
799 802
800 __ bind(ok); 803 __ bind(ok);
801 // Method might have been compiled since the call site was patched to 804 // Method might have been compiled since the call site was patched to
802 // interpreted if that is the case treat it as a miss so we can get 805 // interpreted if that is the case treat it as a miss so we can get
803 // the call site corrected. 806 // the call site corrected.
804 __ cmpq(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int)NULL_WORD); 807 __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
805 __ jcc(Assembler::equal, skip_fixup); 808 __ jcc(Assembler::equal, skip_fixup);
806 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 809 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
807 } 810 }
808 811
809 address c2i_entry = __ pc(); 812 address c2i_entry = __ pc();
978 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 981 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
979 if (is_receiver) { 982 if (is_receiver) {
980 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 983 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
981 } 984 }
982 985
983 __ cmpq(Address(rbp, reg2offset_in(src.first())), (int)NULL_WORD); 986 __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
984 __ leaq(rHandle, Address(rbp, reg2offset_in(src.first()))); 987 __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
985 // conditionally move a NULL 988 // conditionally move a NULL
986 __ cmovq(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first()))); 989 __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
987 } else { 990 } else {
988 991
989 // Oop is in an a register we must store it to the space we reserve 992 // Oop is in an a register we must store it to the space we reserve
990 // on the stack for oop_handles and pass a handle if oop is non-NULL 993 // on the stack for oop_handles and pass a handle if oop is non-NULL
991 994
1009 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 1012 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
1010 int offset = oop_slot*VMRegImpl::stack_slot_size; 1013 int offset = oop_slot*VMRegImpl::stack_slot_size;
1011 1014
1012 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1015 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1013 // Store oop in handle area, may be NULL 1016 // Store oop in handle area, may be NULL
1014 __ movq(Address(rsp, offset), rOop); 1017 __ movptr(Address(rsp, offset), rOop);
1015 if (is_receiver) { 1018 if (is_receiver) {
1016 *receiver_offset = offset; 1019 *receiver_offset = offset;
1017 } 1020 }
1018 1021
1019 __ cmpq(rOop, (int)NULL); 1022 __ cmpptr(rOop, (int32_t)NULL_WORD);
1020 __ leaq(rHandle, Address(rsp, offset)); 1023 __ lea(rHandle, Address(rsp, offset));
1021 // conditionally move a NULL from the handle area where it was just stored 1024 // conditionally move a NULL from the handle area where it was just stored
1022 __ cmovq(Assembler::equal, rHandle, Address(rsp, offset)); 1025 __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
1023 } 1026 }
1024 1027
1025 // If arg is on the stack then place it otherwise it is already in correct reg. 1028 // If arg is on the stack then place it otherwise it is already in correct reg.
1026 if (dst.first()->is_stack()) { 1029 if (dst.first()->is_stack()) {
1027 __ movq(Address(rsp, reg2offset_out(dst.first())), rHandle); 1030 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1028 } 1031 }
1029 } 1032 }
1030 1033
1031 // A float arg may have to do float reg int reg conversion 1034 // A float arg may have to do float reg int reg conversion
1032 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1035 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1037 // This greatly simplifies the cases here compared to sparc. 1040 // This greatly simplifies the cases here compared to sparc.
1038 1041
1039 if (src.first()->is_stack()) { 1042 if (src.first()->is_stack()) {
1040 if (dst.first()->is_stack()) { 1043 if (dst.first()->is_stack()) {
1041 __ movl(rax, Address(rbp, reg2offset_in(src.first()))); 1044 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1042 __ movq(Address(rsp, reg2offset_out(dst.first())), rax); 1045 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1043 } else { 1046 } else {
1044 // stack to reg 1047 // stack to reg
1045 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 1048 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1046 __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()))); 1049 __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
1047 } 1050 }
1066 // This greatly simplifies the cases here compared to sparc. 1069 // This greatly simplifies the cases here compared to sparc.
1067 1070
1068 if (src.is_single_phys_reg() ) { 1071 if (src.is_single_phys_reg() ) {
1069 if (dst.is_single_phys_reg()) { 1072 if (dst.is_single_phys_reg()) {
1070 if (dst.first() != src.first()) { 1073 if (dst.first() != src.first()) {
1071 __ movq(dst.first()->as_Register(), src.first()->as_Register()); 1074 __ mov(dst.first()->as_Register(), src.first()->as_Register());
1072 } 1075 }
1073 } else { 1076 } else {
1074 assert(dst.is_single_reg(), "not a stack pair"); 1077 assert(dst.is_single_reg(), "not a stack pair");
1075 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); 1078 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1076 } 1079 }
1122 case T_DOUBLE: 1125 case T_DOUBLE:
1123 __ movdbl(Address(rbp, -wordSize), xmm0); 1126 __ movdbl(Address(rbp, -wordSize), xmm0);
1124 break; 1127 break;
1125 case T_VOID: break; 1128 case T_VOID: break;
1126 default: { 1129 default: {
1127 __ movq(Address(rbp, -wordSize), rax); 1130 __ movptr(Address(rbp, -wordSize), rax);
1128 } 1131 }
1129 } 1132 }
1130 } 1133 }
1131 1134
1132 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1135 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1139 case T_DOUBLE: 1142 case T_DOUBLE:
1140 __ movdbl(xmm0, Address(rbp, -wordSize)); 1143 __ movdbl(xmm0, Address(rbp, -wordSize));
1141 break; 1144 break;
1142 case T_VOID: break; 1145 case T_VOID: break;
1143 default: { 1146 default: {
1144 __ movq(rax, Address(rbp, -wordSize)); 1147 __ movptr(rax, Address(rbp, -wordSize));
1145 } 1148 }
1146 } 1149 }
1147 } 1150 }
1148 1151
1149 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 1152 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1150 for ( int i = first_arg ; i < arg_count ; i++ ) { 1153 for ( int i = first_arg ; i < arg_count ; i++ ) {
1151 if (args[i].first()->is_Register()) { 1154 if (args[i].first()->is_Register()) {
1152 __ pushq(args[i].first()->as_Register()); 1155 __ push(args[i].first()->as_Register());
1153 } else if (args[i].first()->is_XMMRegister()) { 1156 } else if (args[i].first()->is_XMMRegister()) {
1154 __ subq(rsp, 2*wordSize); 1157 __ subptr(rsp, 2*wordSize);
1155 __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister()); 1158 __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
1156 } 1159 }
1157 } 1160 }
1158 } 1161 }
1159 1162
1160 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 1163 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1161 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) { 1164 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1162 if (args[i].first()->is_Register()) { 1165 if (args[i].first()->is_Register()) {
1163 __ popq(args[i].first()->as_Register()); 1166 __ pop(args[i].first()->as_Register());
1164 } else if (args[i].first()->is_XMMRegister()) { 1167 } else if (args[i].first()->is_XMMRegister()) {
1165 __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0)); 1168 __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1166 __ addq(rsp, 2*wordSize); 1169 __ addptr(rsp, 2*wordSize);
1167 } 1170 }
1168 } 1171 }
1169 } 1172 }
1170 1173
1171 // --------------------------------------------------------------------------- 1174 // ---------------------------------------------------------------------------
1301 1304
1302 Label ok; 1305 Label ok;
1303 Label exception_pending; 1306 Label exception_pending;
1304 1307
1305 __ verify_oop(receiver); 1308 __ verify_oop(receiver);
1306 __ pushq(tmp); // spill (any other registers free here???) 1309 __ push(tmp); // spill (any other registers free here???)
1307 __ load_klass(tmp, receiver); 1310 __ load_klass(tmp, receiver);
1308 __ cmpq(ic_reg, tmp); 1311 __ cmpq(ic_reg, tmp);
1309 __ jcc(Assembler::equal, ok); 1312 __ jcc(Assembler::equal, ok);
1310 1313
1311 __ popq(tmp); 1314 __ pop(tmp);
1312 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1315 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1313 1316
1314 __ bind(ok); 1317 __ bind(ok);
1315 __ popq(tmp); 1318 __ pop(tmp);
1316 1319
1317 // Verified entry point must be aligned 1320 // Verified entry point must be aligned
1318 __ align(8); 1321 __ align(8);
1319 1322
1320 int vep_offset = ((intptr_t)__ pc()) - start; 1323 int vep_offset = ((intptr_t)__ pc()) - start;
1333 } 1336 }
1334 1337
1335 // Generate a new frame for the wrapper. 1338 // Generate a new frame for the wrapper.
1336 __ enter(); 1339 __ enter();
1337 // -2 because return address is already present and so is saved rbp 1340 // -2 because return address is already present and so is saved rbp
1338 __ subq(rsp, stack_size - 2*wordSize); 1341 __ subptr(rsp, stack_size - 2*wordSize);
1339 1342
1340 // Frame is now completed as far as size and linkage. 1343 // Frame is now completed as far as size and linkage.
1341 1344
1342 int frame_complete = ((intptr_t)__ pc()) - start; 1345 int frame_complete = ((intptr_t)__ pc()) - start;
1343 1346
1344 #ifdef ASSERT 1347 #ifdef ASSERT
1345 { 1348 {
1346 Label L; 1349 Label L;
1347 __ movq(rax, rsp); 1350 __ mov(rax, rsp);
1348 __ andq(rax, -16); // must be 16 byte boundry (see amd64 ABI) 1351 __ andptr(rax, -16); // must be 16 byte boundry (see amd64 ABI)
1349 __ cmpq(rax, rsp); 1352 __ cmpptr(rax, rsp);
1350 __ jcc(Assembler::equal, L); 1353 __ jcc(Assembler::equal, L);
1351 __ stop("improperly aligned stack"); 1354 __ stop("improperly aligned stack");
1352 __ bind(L); 1355 __ bind(L);
1353 } 1356 }
1354 #endif /* ASSERT */ 1357 #endif /* ASSERT */
1465 1468
1466 // load oop into a register 1469 // load oop into a register
1467 __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror())); 1470 __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
1468 1471
1469 // Now handlize the static class mirror it's known not-null. 1472 // Now handlize the static class mirror it's known not-null.
1470 __ movq(Address(rsp, klass_offset), oop_handle_reg); 1473 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
1471 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 1474 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1472 1475
1473 // Now get the handle 1476 // Now get the handle
1474 __ leaq(oop_handle_reg, Address(rsp, klass_offset)); 1477 __ lea(oop_handle_reg, Address(rsp, klass_offset));
1475 // store the klass handle as second argument 1478 // store the klass handle as second argument
1476 __ movq(c_rarg1, oop_handle_reg); 1479 __ movptr(c_rarg1, oop_handle_reg);
1477 // and protect the arg if we must spill 1480 // and protect the arg if we must spill
1478 c_arg--; 1481 c_arg--;
1479 } 1482 }
1480 1483
1481 // Change state to native (we save the return address in the thread, since it might not 1484 // Change state to native (we save the return address in the thread, since it might not
1519 1522
1520 1523
1521 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes(); 1524 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1522 1525
1523 // Get the handle (the 2nd argument) 1526 // Get the handle (the 2nd argument)
1524 __ movq(oop_handle_reg, c_rarg1); 1527 __ mov(oop_handle_reg, c_rarg1);
1525 1528
1526 // Get address of the box 1529 // Get address of the box
1527 1530
1528 __ leaq(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size)); 1531 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
1529 1532
1530 // Load the oop from the handle 1533 // Load the oop from the handle
1531 __ movq(obj_reg, Address(oop_handle_reg, 0)); 1534 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1532 1535
1533 if (UseBiasedLocking) { 1536 if (UseBiasedLocking) {
1534 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock); 1537 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
1535 } 1538 }
1536 1539
1537 // Load immediate 1 into swap_reg %rax 1540 // Load immediate 1 into swap_reg %rax
1538 __ movl(swap_reg, 1); 1541 __ movl(swap_reg, 1);
1539 1542
1540 // Load (object->mark() | 1) into swap_reg %rax 1543 // Load (object->mark() | 1) into swap_reg %rax
1541 __ orq(swap_reg, Address(obj_reg, 0)); 1544 __ orptr(swap_reg, Address(obj_reg, 0));
1542 1545
1543 // Save (object->mark() | 1) into BasicLock's displaced header 1546 // Save (object->mark() | 1) into BasicLock's displaced header
1544 __ movq(Address(lock_reg, mark_word_offset), swap_reg); 1547 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1545 1548
1546 if (os::is_MP()) { 1549 if (os::is_MP()) {
1547 __ lock(); 1550 __ lock();
1548 } 1551 }
1549 1552
1550 // src -> dest iff dest == rax else rax <- dest 1553 // src -> dest iff dest == rax else rax <- dest
1551 __ cmpxchgq(lock_reg, Address(obj_reg, 0)); 1554 __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
1552 __ jcc(Assembler::equal, lock_done); 1555 __ jcc(Assembler::equal, lock_done);
1553 1556
1554 // Hmm should this move to the slow path code area??? 1557 // Hmm should this move to the slow path code area???
1555 1558
1556 // Test if the oopMark is an obvious stack pointer, i.e., 1559 // Test if the oopMark is an obvious stack pointer, i.e.,
1560 // expression: ((mark - rsp) & (3 - os::vm_page_size())), 1563 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1561 // assuming both stack pointer and pagesize have their 1564 // assuming both stack pointer and pagesize have their
1562 // least significant 2 bits clear. 1565 // least significant 2 bits clear.
1563 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg 1566 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
1564 1567
1565 __ subq(swap_reg, rsp); 1568 __ subptr(swap_reg, rsp);
1566 __ andq(swap_reg, 3 - os::vm_page_size()); 1569 __ andptr(swap_reg, 3 - os::vm_page_size());
1567 1570
1568 // Save the test result, for recursive case, the result is zero 1571 // Save the test result, for recursive case, the result is zero
1569 __ movq(Address(lock_reg, mark_word_offset), swap_reg); 1572 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1570 __ jcc(Assembler::notEqual, slow_path_lock); 1573 __ jcc(Assembler::notEqual, slow_path_lock);
1571 1574
1572 // Slow path will re-enter here 1575 // Slow path will re-enter here
1573 1576
1574 __ bind(lock_done); 1577 __ bind(lock_done);
1578 // Finally just about ready to make the JNI call 1581 // Finally just about ready to make the JNI call
1579 1582
1580 1583
1581 // get JNIEnv* which is first argument to native 1584 // get JNIEnv* which is first argument to native
1582 1585
1583 __ leaq(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset()))); 1586 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
1584 1587
1585 // Now set thread in native 1588 // Now set thread in native
1586 __ mov64(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native); 1589 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
1587 1590
1588 __ call(RuntimeAddress(method->native_function())); 1591 __ call(RuntimeAddress(method->native_function()));
1589 1592
1590 // Either restore the MXCSR register after returning from the JNI Call 1593 // Either restore the MXCSR register after returning from the JNI Call
1591 // or verify that it wasn't changed. 1594 // or verify that it wasn't changed.
1592 if (RestoreMXCSROnJNICalls) { 1595 if (RestoreMXCSROnJNICalls) {
1593 __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std())); 1596 __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
1594 1597
1595 } 1598 }
1596 else if (CheckJNICalls ) { 1599 else if (CheckJNICalls ) {
1597 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::amd64::verify_mxcsr_entry()))); 1600 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
1598 } 1601 }
1599 1602
1600 1603
1601 // Unpack native results. 1604 // Unpack native results.
1602 switch (ret_type) { 1605 switch (ret_type) {
1622 // state is not atomic w.r.t. GC, as this scenario demonstrates: 1625 // state is not atomic w.r.t. GC, as this scenario demonstrates:
1623 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 1626 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1624 // VM thread changes sync state to synchronizing and suspends threads for GC. 1627 // VM thread changes sync state to synchronizing and suspends threads for GC.
1625 // Thread A is resumed to finish this native method, but doesn't block here since it 1628 // Thread A is resumed to finish this native method, but doesn't block here since it
1626 // didn't see any synchronization is progress, and escapes. 1629 // didn't see any synchronization is progress, and escapes.
1627 __ mov64(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans); 1630 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
1628 1631
1629 if(os::is_MP()) { 1632 if(os::is_MP()) {
1630 if (UseMembar) { 1633 if (UseMembar) {
1631 // Force this write out before the read below 1634 // Force this write out before the read below
1632 __ membar(Assembler::Membar_mask_bits( 1635 __ membar(Assembler::Membar_mask_bits(
1660 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are 1663 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1661 // preserved and correspond to the bcp/locals pointers. So we do a runtime call 1664 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
1662 // by hand. 1665 // by hand.
1663 // 1666 //
1664 save_native_result(masm, ret_type, stack_slots); 1667 save_native_result(masm, ret_type, stack_slots);
1665 __ movq(c_rarg0, r15_thread); 1668 __ mov(c_rarg0, r15_thread);
1666 __ movq(r12, rsp); // remember sp 1669 __ mov(r12, rsp); // remember sp
1667 __ subq(rsp, frame::arg_reg_save_area_bytes); // windows 1670 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1668 __ andq(rsp, -16); // align stack as required by ABI 1671 __ andptr(rsp, -16); // align stack as required by ABI
1669 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1672 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
1670 __ movq(rsp, r12); // restore sp 1673 __ mov(rsp, r12); // restore sp
1671 __ reinit_heapbase(); 1674 __ reinit_heapbase();
1672 // Restore any method result value 1675 // Restore any method result value
1673 restore_native_result(masm, ret_type, stack_slots); 1676 restore_native_result(masm, ret_type, stack_slots);
1674 __ bind(Continue); 1677 __ bind(Continue);
1675 } 1678 }
1689 Label unlock_done; 1692 Label unlock_done;
1690 Label slow_path_unlock; 1693 Label slow_path_unlock;
1691 if (method->is_synchronized()) { 1694 if (method->is_synchronized()) {
1692 1695
1693 // Get locked oop from the handle we passed to jni 1696 // Get locked oop from the handle we passed to jni
1694 __ movq(obj_reg, Address(oop_handle_reg, 0)); 1697 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1695 1698
1696 Label done; 1699 Label done;
1697 1700
1698 if (UseBiasedLocking) { 1701 if (UseBiasedLocking) {
1699 __ biased_locking_exit(obj_reg, old_hdr, done); 1702 __ biased_locking_exit(obj_reg, old_hdr, done);
1700 } 1703 }
1701 1704
1702 // Simple recursive lock? 1705 // Simple recursive lock?
1703 1706
1704 __ cmpq(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int)NULL_WORD); 1707 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
1705 __ jcc(Assembler::equal, done); 1708 __ jcc(Assembler::equal, done);
1706 1709
1707 // Must save rax if if it is live now because cmpxchg must use it 1710 // Must save rax if if it is live now because cmpxchg must use it
1708 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { 1711 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1709 save_native_result(masm, ret_type, stack_slots); 1712 save_native_result(masm, ret_type, stack_slots);
1710 } 1713 }
1711 1714
1712 1715
1713 // get address of the stack lock 1716 // get address of the stack lock
1714 __ leaq(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size)); 1717 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
1715 // get old displaced header 1718 // get old displaced header
1716 __ movq(old_hdr, Address(rax, 0)); 1719 __ movptr(old_hdr, Address(rax, 0));
1717 1720
1718 // Atomic swap old header if oop still contains the stack lock 1721 // Atomic swap old header if oop still contains the stack lock
1719 if (os::is_MP()) { 1722 if (os::is_MP()) {
1720 __ lock(); 1723 __ lock();
1721 } 1724 }
1722 __ cmpxchgq(old_hdr, Address(obj_reg, 0)); 1725 __ cmpxchgptr(old_hdr, Address(obj_reg, 0));
1723 __ jcc(Assembler::notEqual, slow_path_unlock); 1726 __ jcc(Assembler::notEqual, slow_path_unlock);
1724 1727
1725 // slow path re-enters here 1728 // slow path re-enters here
1726 __ bind(unlock_done); 1729 __ bind(unlock_done);
1727 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { 1730 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1744 __ reset_last_Java_frame(false, true); 1747 __ reset_last_Java_frame(false, true);
1745 1748
1746 // Unpack oop result 1749 // Unpack oop result
1747 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 1750 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
1748 Label L; 1751 Label L;
1749 __ testq(rax, rax); 1752 __ testptr(rax, rax);
1750 __ jcc(Assembler::zero, L); 1753 __ jcc(Assembler::zero, L);
1751 __ movq(rax, Address(rax, 0)); 1754 __ movptr(rax, Address(rax, 0));
1752 __ bind(L); 1755 __ bind(L);
1753 __ verify_oop(rax); 1756 __ verify_oop(rax);
1754 } 1757 }
1755 1758
1756 // reset handle block 1759 // reset handle block
1757 __ movq(rcx, Address(r15_thread, JavaThread::active_handles_offset())); 1760 __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
1758 __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int)NULL_WORD); 1761 __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
1759 1762
1760 // pop our frame 1763 // pop our frame
1761 1764
1762 __ leave(); 1765 __ leave();
1763 1766
1764 // Any exception pending? 1767 // Any exception pending?
1765 __ cmpq(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD); 1768 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
1766 __ jcc(Assembler::notEqual, exception_pending); 1769 __ jcc(Assembler::notEqual, exception_pending);
1767 1770
1768 // Return 1771 // Return
1769 1772
1770 __ ret(0); 1773 __ ret(0);
1788 // args are (oop obj, BasicLock* lock, JavaThread* thread) 1791 // args are (oop obj, BasicLock* lock, JavaThread* thread)
1789 1792
1790 // protect the args we've loaded 1793 // protect the args we've loaded
1791 save_args(masm, total_c_args, c_arg, out_regs); 1794 save_args(masm, total_c_args, c_arg, out_regs);
1792 1795
1793 __ movq(c_rarg0, obj_reg); 1796 __ mov(c_rarg0, obj_reg);
1794 __ movq(c_rarg1, lock_reg); 1797 __ mov(c_rarg1, lock_reg);
1795 __ movq(c_rarg2, r15_thread); 1798 __ mov(c_rarg2, r15_thread);
1796 1799
1797 // Not a leaf but we have last_Java_frame setup as we want 1800 // Not a leaf but we have last_Java_frame setup as we want
1798 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3); 1801 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
1799 restore_args(masm, total_c_args, c_arg, out_regs); 1802 restore_args(masm, total_c_args, c_arg, out_regs);
1800 1803
1801 #ifdef ASSERT 1804 #ifdef ASSERT
1802 { Label L; 1805 { Label L;
1803 __ cmpq(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD); 1806 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
1804 __ jcc(Assembler::equal, L); 1807 __ jcc(Assembler::equal, L);
1805 __ stop("no pending exception allowed on exit from monitorenter"); 1808 __ stop("no pending exception allowed on exit from monitorenter");
1806 __ bind(L); 1809 __ bind(L);
1807 } 1810 }
1808 #endif 1811 #endif
1818 1821
1819 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { 1822 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1820 save_native_result(masm, ret_type, stack_slots); 1823 save_native_result(masm, ret_type, stack_slots);
1821 } 1824 }
1822 1825
1823 __ leaq(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size)); 1826 __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
1824 1827
1825 __ movq(c_rarg0, obj_reg); 1828 __ mov(c_rarg0, obj_reg);
1826 __ movq(r12, rsp); // remember sp 1829 __ mov(r12, rsp); // remember sp
1827 __ subq(rsp, frame::arg_reg_save_area_bytes); // windows 1830 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1828 __ andq(rsp, -16); // align stack as required by ABI 1831 __ andptr(rsp, -16); // align stack as required by ABI
1829 1832
1830 // Save pending exception around call to VM (which contains an EXCEPTION_MARK) 1833 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1831 // NOTE that obj_reg == rbx currently 1834 // NOTE that obj_reg == rbx currently
1832 __ movq(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset()))); 1835 __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
1833 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD); 1836 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
1834 1837
1835 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C))); 1838 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
1836 __ movq(rsp, r12); // restore sp 1839 __ mov(rsp, r12); // restore sp
1837 __ reinit_heapbase(); 1840 __ reinit_heapbase();
1838 #ifdef ASSERT 1841 #ifdef ASSERT
1839 { 1842 {
1840 Label L; 1843 Label L;
1841 __ cmpq(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD); 1844 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
1842 __ jcc(Assembler::equal, L); 1845 __ jcc(Assembler::equal, L);
1843 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C"); 1846 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
1844 __ bind(L); 1847 __ bind(L);
1845 } 1848 }
1846 #endif /* ASSERT */ 1849 #endif /* ASSERT */
1847 1850
1848 __ movq(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx); 1851 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
1849 1852
1850 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { 1853 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1851 restore_native_result(masm, ret_type, stack_slots); 1854 restore_native_result(masm, ret_type, stack_slots);
1852 } 1855 }
1853 __ jmp(unlock_done); 1856 __ jmp(unlock_done);
1858 1861
1859 // SLOW PATH Reguard the stack if needed 1862 // SLOW PATH Reguard the stack if needed
1860 1863
1861 __ bind(reguard); 1864 __ bind(reguard);
1862 save_native_result(masm, ret_type, stack_slots); 1865 save_native_result(masm, ret_type, stack_slots);
1863 __ movq(r12, rsp); // remember sp 1866 __ mov(r12, rsp); // remember sp
1864 __ subq(rsp, frame::arg_reg_save_area_bytes); // windows 1867 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1865 __ andq(rsp, -16); // align stack as required by ABI 1868 __ andptr(rsp, -16); // align stack as required by ABI
1866 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1869 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1867 __ movq(rsp, r12); // restore sp 1870 __ mov(rsp, r12); // restore sp
1868 __ reinit_heapbase(); 1871 __ reinit_heapbase();
1869 restore_native_result(masm, ret_type, stack_slots); 1872 restore_native_result(masm, ret_type, stack_slots);
1870 // and continue 1873 // and continue
1871 __ jmp(reguard_done); 1874 __ jmp(reguard_done);
1872 1875
2572 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 2575 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2573 2576
2574 // Normal deoptimization. Save exec mode for unpack_frames. 2577 // Normal deoptimization. Save exec mode for unpack_frames.
2575 __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved 2578 __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
2576 __ jmp(cont); 2579 __ jmp(cont);
2580
2581 int reexecute_offset = __ pc() - start;
2582
2583 // Reexecute case
2584 // return address is the pc describes what bci to do re-execute at
2585
2586 // No need to update map as each call to save_live_registers will produce identical oopmap
2587 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2588
2589 __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
2590 __ jmp(cont);
2591
2577 int exception_offset = __ pc() - start; 2592 int exception_offset = __ pc() - start;
2578 2593
2579 // Prolog for exception case 2594 // Prolog for exception case
2580 2595
2581 // Push throwing pc as return address 2596 // all registers are dead at this entry point, except for rax, and
2582 __ pushq(rdx); 2597 // rdx which contain the exception oop and exception pc
2598 // respectively. Set them in TLS and fall thru to the
2599 // unpack_with_exception_in_tls entry point.
2600
2601 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
2602 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
2603
2604 int exception_in_tls_offset = __ pc() - start;
2605
2606 // new implementation because exception oop is now passed in JavaThread
2607
2608 // Prolog for exception case
2609 // All registers must be preserved because they might be used by LinearScan
2610 // Exceptiop oop and throwing PC are passed in JavaThread
2611 // tos: stack at point of call to method that threw the exception (i.e. only
2612 // args are on the stack, no return address)
2613
2614 // make room on stack for the return address
2615 // It will be patched later with the throwing pc. The correct value is not
2616 // available now because loading it from memory would destroy registers.
2617 __ push(0);
2583 2618
2584 // Save everything in sight. 2619 // Save everything in sight.
2585 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 2620 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2586 2621
2622 // Now it is safe to overwrite any register
2623
2587 // Deopt during an exception. Save exec mode for unpack_frames. 2624 // Deopt during an exception. Save exec mode for unpack_frames.
2588 __ movl(r14, Deoptimization::Unpack_exception); // callee-saved 2625 __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
2626
2627 // load throwing pc from JavaThread and patch it as the return address
2628 // of the current frame. Then clear the field in JavaThread
2629
2630 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
2631 __ movptr(Address(rbp, wordSize), rdx);
2632 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
2633
2634 #ifdef ASSERT
2635 // verify that there is really an exception oop in JavaThread
2636 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
2637 __ verify_oop(rax);
2638
2639 // verify that there is no pending exception
2640 Label no_pending_exception;
2641 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
2642 __ testptr(rax, rax);
2643 __ jcc(Assembler::zero, no_pending_exception);
2644 __ stop("must not have pending exception here");
2645 __ bind(no_pending_exception);
2646 #endif
2589 2647
2590 __ bind(cont); 2648 __ bind(cont);
2591 2649
2592 // Call C code. Need thread and this frame, but NOT official VM entry 2650 // Call C code. Need thread and this frame, but NOT official VM entry
2593 // crud. We cannot block on this call, no GC can happen. 2651 // crud. We cannot block on this call, no GC can happen.
2597 // fetch_unroll_info needs to call last_java_frame(). 2655 // fetch_unroll_info needs to call last_java_frame().
2598 2656
2599 __ set_last_Java_frame(noreg, noreg, NULL); 2657 __ set_last_Java_frame(noreg, noreg, NULL);
2600 #ifdef ASSERT 2658 #ifdef ASSERT
2601 { Label L; 2659 { Label L;
2602 __ cmpq(Address(r15_thread, 2660 __ cmpptr(Address(r15_thread,
2603 JavaThread::last_Java_fp_offset()), 2661 JavaThread::last_Java_fp_offset()),
2604 0); 2662 (int32_t)0);
2605 __ jcc(Assembler::equal, L); 2663 __ jcc(Assembler::equal, L);
2606 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared"); 2664 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2607 __ bind(L); 2665 __ bind(L);
2608 } 2666 }
2609 #endif // ASSERT 2667 #endif // ASSERT
2610 __ movq(c_rarg0, r15_thread); 2668 __ mov(c_rarg0, r15_thread);
2611 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info))); 2669 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2612 2670
2613 // Need to have an oopmap that tells fetch_unroll_info where to 2671 // Need to have an oopmap that tells fetch_unroll_info where to
2614 // find any register it might need. 2672 // find any register it might need.
2615 oop_maps->add_gc_map(__ pc() - start, map); 2673 oop_maps->add_gc_map(__ pc() - start, map);
2616 2674
2617 __ reset_last_Java_frame(false, false); 2675 __ reset_last_Java_frame(false, false);
2618 2676
2619 // Load UnrollBlock* into rdi 2677 // Load UnrollBlock* into rdi
2620 __ movq(rdi, rax); 2678 __ mov(rdi, rax);
2679
2680 Label noException;
2681 __ cmpl(r12, Deoptimization::Unpack_exception); // Was exception pending?
2682 __ jcc(Assembler::notEqual, noException);
2683 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
2684 // QQQ this is useless it was NULL above
2685 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
2686 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
2687 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
2688
2689 __ verify_oop(rax);
2690
2691 // Overwrite the result registers with the exception results.
2692 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
2693 // I think this is useless
2694 __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
2695
2696 __ bind(noException);
2621 2697
2622 // Only register save data is on the stack. 2698 // Only register save data is on the stack.
2623 // Now restore the result registers. Everything else is either dead 2699 // Now restore the result registers. Everything else is either dead
2624 // or captured in the vframeArray. 2700 // or captured in the vframeArray.
2625 RegisterSaver::restore_result_registers(masm); 2701 RegisterSaver::restore_result_registers(masm);
2638 // and using the size of frame 2 to adjust the stack 2714 // and using the size of frame 2 to adjust the stack
2639 // when we are done the return to frame 3 will still be on the stack. 2715 // when we are done the return to frame 3 will still be on the stack.
2640 2716
2641 // Pop deoptimized frame 2717 // Pop deoptimized frame
2642 __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes())); 2718 __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2643 __ addq(rsp, rcx); 2719 __ addptr(rsp, rcx);
2644 2720
2645 // rsp should be pointing at the return address to the caller (3) 2721 // rsp should be pointing at the return address to the caller (3)
2646 2722
2647 // Stack bang to make sure there's enough room for these interpreter frames. 2723 // Stack bang to make sure there's enough room for these interpreter frames.
2648 if (UseStackBanging) { 2724 if (UseStackBanging) {
2649 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); 2725 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2650 __ bang_stack_size(rbx, rcx); 2726 __ bang_stack_size(rbx, rcx);
2651 } 2727 }
2652 2728
2653 // Load address of array of frame pcs into rcx 2729 // Load address of array of frame pcs into rcx
2654 __ movq(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); 2730 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2655 2731
2656 // Trash the old pc 2732 // Trash the old pc
2657 __ addq(rsp, wordSize); 2733 __ addptr(rsp, wordSize);
2658 2734
2659 // Load address of array of frame sizes into rsi 2735 // Load address of array of frame sizes into rsi
2660 __ movq(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes())); 2736 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2661 2737
2662 // Load counter into rdx 2738 // Load counter into rdx
2663 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes())); 2739 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2664 2740
2665 // Pick up the initial fp we should save 2741 // Pick up the initial fp we should save
2666 __ movq(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes())); 2742 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
2667 2743
2668 // Now adjust the caller's stack to make up for the extra locals 2744 // Now adjust the caller's stack to make up for the extra locals
2669 // but record the original sp so that we can save it in the skeletal interpreter 2745 // but record the original sp so that we can save it in the skeletal interpreter
2670 // frame and the stack walking of interpreter_sender will get the unextended sp 2746 // frame and the stack walking of interpreter_sender will get the unextended sp
2671 // value and not the "real" sp value. 2747 // value and not the "real" sp value.
2672 2748
2673 const Register sender_sp = r8; 2749 const Register sender_sp = r8;
2674 2750
2675 __ movq(sender_sp, rsp); 2751 __ mov(sender_sp, rsp);
2676 __ movl(rbx, Address(rdi, 2752 __ movl(rbx, Address(rdi,
2677 Deoptimization::UnrollBlock:: 2753 Deoptimization::UnrollBlock::
2678 caller_adjustment_offset_in_bytes())); 2754 caller_adjustment_offset_in_bytes()));
2679 __ subq(rsp, rbx); 2755 __ subptr(rsp, rbx);
2680 2756
2681 // Push interpreter frames in a loop 2757 // Push interpreter frames in a loop
2682 Label loop; 2758 Label loop;
2683 __ bind(loop); 2759 __ bind(loop);
2684 __ movq(rbx, Address(rsi, 0)); // Load frame size 2760 __ movptr(rbx, Address(rsi, 0)); // Load frame size
2685 __ subq(rbx, 2*wordSize); // We'll push pc and ebp by hand 2761 #ifdef CC_INTERP
2686 __ pushq(Address(rcx, 0)); // Save return address 2762 __ subptr(rbx, 4*wordSize); // we'll push pc and ebp by hand and
2763 #ifdef ASSERT
2764 __ push(0xDEADDEAD); // Make a recognizable pattern
2765 __ push(0xDEADDEAD);
2766 #else /* ASSERT */
2767 __ subptr(rsp, 2*wordSize); // skip the "static long no_param"
2768 #endif /* ASSERT */
2769 #else
2770 __ subptr(rbx, 2*wordSize); // We'll push pc and ebp by hand
2771 #endif // CC_INTERP
2772 __ pushptr(Address(rcx, 0)); // Save return address
2687 __ enter(); // Save old & set new ebp 2773 __ enter(); // Save old & set new ebp
2688 __ subq(rsp, rbx); // Prolog 2774 __ subptr(rsp, rbx); // Prolog
2689 __ movq(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), 2775 #ifdef CC_INTERP
2690 sender_sp); // Make it walkable 2776 __ movptr(Address(rbp,
2777 -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
2778 sender_sp); // Make it walkable
2779 #else /* CC_INTERP */
2691 // This value is corrected by layout_activation_impl 2780 // This value is corrected by layout_activation_impl
2692 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int)NULL_WORD ); 2781 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
2693 __ movq(sender_sp, rsp); // Pass sender_sp to next frame 2782 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
2694 __ addq(rsi, wordSize); // Bump array pointer (sizes) 2783 #endif /* CC_INTERP */
2695 __ addq(rcx, wordSize); // Bump array pointer (pcs) 2784 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
2785 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
2786 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
2696 __ decrementl(rdx); // Decrement counter 2787 __ decrementl(rdx); // Decrement counter
2697 __ jcc(Assembler::notZero, loop); 2788 __ jcc(Assembler::notZero, loop);
2698 __ pushq(Address(rcx, 0)); // Save final return address 2789 __ pushptr(Address(rcx, 0)); // Save final return address
2699 2790
2700 // Re-push self-frame 2791 // Re-push self-frame
2701 __ enter(); // Save old & set new ebp 2792 __ enter(); // Save old & set new ebp
2702 2793
2703 // Allocate a full sized register save area. 2794 // Allocate a full sized register save area.
2704 // Return address and rbp are in place, so we allocate two less words. 2795 // Return address and rbp are in place, so we allocate two less words.
2705 __ subq(rsp, (frame_size_in_words - 2) * wordSize); 2796 __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
2706 2797
2707 // Restore frame locals after moving the frame 2798 // Restore frame locals after moving the frame
2708 __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0); 2799 __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
2709 __ movq(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax); 2800 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
2710 2801
2711 // Call C code. Need thread but NOT official VM entry 2802 // Call C code. Need thread but NOT official VM entry
2712 // crud. We cannot block on this call, no GC can happen. Call should 2803 // crud. We cannot block on this call, no GC can happen. Call should
2713 // restore return values to their stack-slots with the new SP. 2804 // restore return values to their stack-slots with the new SP.
2714 // 2805 //
2715 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode) 2806 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2716 2807
2717 // Use rbp because the frames look interpreted now 2808 // Use rbp because the frames look interpreted now
2718 __ set_last_Java_frame(noreg, rbp, NULL); 2809 __ set_last_Java_frame(noreg, rbp, NULL);
2719 2810
2720 __ movq(c_rarg0, r15_thread); 2811 __ mov(c_rarg0, r15_thread);
2721 __ movl(c_rarg1, r14); // second arg: exec_mode 2812 __ movl(c_rarg1, r14); // second arg: exec_mode
2722 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames))); 2813 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2723 2814
2724 // Set an oopmap for the call site 2815 // Set an oopmap for the call site
2725 oop_maps->add_gc_map(__ pc() - start, 2816 oop_maps->add_gc_map(__ pc() - start,
2727 2818
2728 __ reset_last_Java_frame(true, false); 2819 __ reset_last_Java_frame(true, false);
2729 2820
2730 // Collect return values 2821 // Collect return values
2731 __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes())); 2822 __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
2732 __ movq(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes())); 2823 __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
2824 // I think this is useless (throwing pc?)
2825 __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
2733 2826
2734 // Pop self-frame. 2827 // Pop self-frame.
2735 __ leave(); // Epilog 2828 __ leave(); // Epilog
2736 2829
2737 // Jump to interpreter 2830 // Jump to interpreter
2738 __ ret(0); 2831 __ ret(0);
2739 2832
2740 // Make sure all code is generated 2833 // Make sure all code is generated
2741 masm->flush(); 2834 masm->flush();
2742 2835
2743 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 0, frame_size_in_words); 2836 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2837 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2744 } 2838 }
2745 2839
2746 #ifdef COMPILER2 2840 #ifdef COMPILER2
2747 //------------------------------generate_uncommon_trap_blob-------------------- 2841 //------------------------------generate_uncommon_trap_blob--------------------
2748 void SharedRuntime::generate_uncommon_trap_blob() { 2842 void SharedRuntime::generate_uncommon_trap_blob() {
2756 2850
2757 address start = __ pc(); 2851 address start = __ pc();
2758 2852
2759 // Push self-frame. We get here with a return address on the 2853 // Push self-frame. We get here with a return address on the
2760 // stack, so rsp is 8-byte aligned until we allocate our frame. 2854 // stack, so rsp is 8-byte aligned until we allocate our frame.
2761 __ subq(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog! 2855 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
2762 2856
2763 // No callee saved registers. rbp is assumed implicitly saved 2857 // No callee saved registers. rbp is assumed implicitly saved
2764 __ movq(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp); 2858 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
2765 2859
2766 // compiler left unloaded_class_index in j_rarg0 move to where the 2860 // compiler left unloaded_class_index in j_rarg0 move to where the
2767 // runtime expects it. 2861 // runtime expects it.
2768 __ movl(c_rarg1, j_rarg0); 2862 __ movl(c_rarg1, j_rarg0);
2769 2863
2774 // capture callee-saved registers as well as return values. 2868 // capture callee-saved registers as well as return values.
2775 // Thread is in rdi already. 2869 // Thread is in rdi already.
2776 // 2870 //
2777 // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index); 2871 // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
2778 2872
2779 __ movq(c_rarg0, r15_thread); 2873 __ mov(c_rarg0, r15_thread);
2780 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap))); 2874 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2781 2875
2782 // Set an oopmap for the call site 2876 // Set an oopmap for the call site
2783 OopMapSet* oop_maps = new OopMapSet(); 2877 OopMapSet* oop_maps = new OopMapSet();
2784 OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0); 2878 OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
2788 oop_maps->add_gc_map(__ pc() - start, map); 2882 oop_maps->add_gc_map(__ pc() - start, map);
2789 2883
2790 __ reset_last_Java_frame(false, false); 2884 __ reset_last_Java_frame(false, false);
2791 2885
2792 // Load UnrollBlock* into rdi 2886 // Load UnrollBlock* into rdi
2793 __ movq(rdi, rax); 2887 __ mov(rdi, rax);
2794 2888
2795 // Pop all the frames we must move/replace. 2889 // Pop all the frames we must move/replace.
2796 // 2890 //
2797 // Frame picture (youngest to oldest) 2891 // Frame picture (youngest to oldest)
2798 // 1: self-frame (no frame link) 2892 // 1: self-frame (no frame link)
2799 // 2: deopting frame (no frame link) 2893 // 2: deopting frame (no frame link)
2800 // 3: caller of deopting frame (could be compiled/interpreted). 2894 // 3: caller of deopting frame (could be compiled/interpreted).
2801 2895
2802 // Pop self-frame. We have no frame, and must rely only on rax and rsp. 2896 // Pop self-frame. We have no frame, and must rely only on rax and rsp.
2803 __ addq(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog! 2897 __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
2804 2898
2805 // Pop deoptimized frame (int) 2899 // Pop deoptimized frame (int)
2806 __ movl(rcx, Address(rdi, 2900 __ movl(rcx, Address(rdi,
2807 Deoptimization::UnrollBlock:: 2901 Deoptimization::UnrollBlock::
2808 size_of_deoptimized_frame_offset_in_bytes())); 2902 size_of_deoptimized_frame_offset_in_bytes()));
2809 __ addq(rsp, rcx); 2903 __ addptr(rsp, rcx);
2810 2904
2811 // rsp should be pointing at the return address to the caller (3) 2905 // rsp should be pointing at the return address to the caller (3)
2812 2906
2813 // Stack bang to make sure there's enough room for these interpreter frames. 2907 // Stack bang to make sure there's enough room for these interpreter frames.
2814 if (UseStackBanging) { 2908 if (UseStackBanging) {
2815 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); 2909 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2816 __ bang_stack_size(rbx, rcx); 2910 __ bang_stack_size(rbx, rcx);
2817 } 2911 }
2818 2912
2819 // Load address of array of frame pcs into rcx (address*) 2913 // Load address of array of frame pcs into rcx (address*)
2820 __ movq(rcx, 2914 __ movptr(rcx,
2821 Address(rdi, 2915 Address(rdi,
2822 Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); 2916 Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2823 2917
2824 // Trash the return pc 2918 // Trash the return pc
2825 __ addq(rsp, wordSize); 2919 __ addptr(rsp, wordSize);
2826 2920
2827 // Load address of array of frame sizes into rsi (intptr_t*) 2921 // Load address of array of frame sizes into rsi (intptr_t*)
2828 __ movq(rsi, Address(rdi, 2922 __ movptr(rsi, Address(rdi,
2829 Deoptimization::UnrollBlock:: 2923 Deoptimization::UnrollBlock::
2830 frame_sizes_offset_in_bytes())); 2924 frame_sizes_offset_in_bytes()));
2831 2925
2832 // Counter 2926 // Counter
2833 __ movl(rdx, Address(rdi, 2927 __ movl(rdx, Address(rdi,
2834 Deoptimization::UnrollBlock:: 2928 Deoptimization::UnrollBlock::
2835 number_of_frames_offset_in_bytes())); // (int) 2929 number_of_frames_offset_in_bytes())); // (int)
2836 2930
2837 // Pick up the initial fp we should save 2931 // Pick up the initial fp we should save
2838 __ movq(rbp, 2932 __ movptr(rbp,
2839 Address(rdi, 2933 Address(rdi,
2840 Deoptimization::UnrollBlock::initial_fp_offset_in_bytes())); 2934 Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
2841 2935
2842 // Now adjust the caller's stack to make up for the extra locals but 2936 // Now adjust the caller's stack to make up for the extra locals but
2843 // record the original sp so that we can save it in the skeletal 2937 // record the original sp so that we can save it in the skeletal
2844 // interpreter frame and the stack walking of interpreter_sender 2938 // interpreter frame and the stack walking of interpreter_sender
2845 // will get the unextended sp value and not the "real" sp value. 2939 // will get the unextended sp value and not the "real" sp value.
2846 2940
2847 const Register sender_sp = r8; 2941 const Register sender_sp = r8;
2848 2942
2849 __ movq(sender_sp, rsp); 2943 __ mov(sender_sp, rsp);
2850 __ movl(rbx, Address(rdi, 2944 __ movl(rbx, Address(rdi,
2851 Deoptimization::UnrollBlock:: 2945 Deoptimization::UnrollBlock::
2852 caller_adjustment_offset_in_bytes())); // (int) 2946 caller_adjustment_offset_in_bytes())); // (int)
2853 __ subq(rsp, rbx); 2947 __ subptr(rsp, rbx);
2854 2948
2855 // Push interpreter frames in a loop 2949 // Push interpreter frames in a loop
2856 Label loop; 2950 Label loop;
2857 __ bind(loop); 2951 __ bind(loop);
2858 __ movq(rbx, Address(rsi, 0)); // Load frame size 2952 __ movptr(rbx, Address(rsi, 0)); // Load frame size
2859 __ subq(rbx, 2 * wordSize); // We'll push pc and rbp by hand 2953 __ subptr(rbx, 2 * wordSize); // We'll push pc and rbp by hand
2860 __ pushq(Address(rcx, 0)); // Save return address 2954 __ pushptr(Address(rcx, 0)); // Save return address
2861 __ enter(); // Save old & set new rbp 2955 __ enter(); // Save old & set new rbp
2862 __ subq(rsp, rbx); // Prolog 2956 __ subptr(rsp, rbx); // Prolog
2863 __ movq(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), 2957 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
2864 sender_sp); // Make it walkable 2958 sender_sp); // Make it walkable
2865 // This value is corrected by layout_activation_impl 2959 // This value is corrected by layout_activation_impl
2866 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int)NULL_WORD ); 2960 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
2867 __ movq(sender_sp, rsp); // Pass sender_sp to next frame 2961 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
2868 __ addq(rsi, wordSize); // Bump array pointer (sizes) 2962 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
2869 __ addq(rcx, wordSize); // Bump array pointer (pcs) 2963 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
2870 __ decrementl(rdx); // Decrement counter 2964 __ decrementl(rdx); // Decrement counter
2871 __ jcc(Assembler::notZero, loop); 2965 __ jcc(Assembler::notZero, loop);
2872 __ pushq(Address(rcx, 0)); // Save final return address 2966 __ pushptr(Address(rcx, 0)); // Save final return address
2873 2967
2874 // Re-push self-frame 2968 // Re-push self-frame
2875 __ enter(); // Save old & set new rbp 2969 __ enter(); // Save old & set new rbp
2876 __ subq(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt); 2970 __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
2877 // Prolog 2971 // Prolog
2878 2972
2879 // Use rbp because the frames look interpreted now 2973 // Use rbp because the frames look interpreted now
2880 __ set_last_Java_frame(noreg, rbp, NULL); 2974 __ set_last_Java_frame(noreg, rbp, NULL);
2881 2975
2884 // restore return values to their stack-slots with the new SP. 2978 // restore return values to their stack-slots with the new SP.
2885 // Thread is in rdi already. 2979 // Thread is in rdi already.
2886 // 2980 //
2887 // BasicType unpack_frames(JavaThread* thread, int exec_mode); 2981 // BasicType unpack_frames(JavaThread* thread, int exec_mode);
2888 2982
2889 __ movq(c_rarg0, r15_thread); 2983 __ mov(c_rarg0, r15_thread);
2890 __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap); 2984 __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
2891 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames))); 2985 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2892 2986
2893 // Set an oopmap for the call site 2987 // Set an oopmap for the call site
2894 oop_maps->add_gc_map(__ pc() - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); 2988 oop_maps->add_gc_map(__ pc() - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
2931 address call_pc = NULL; 3025 address call_pc = NULL;
2932 int frame_size_in_words; 3026 int frame_size_in_words;
2933 3027
2934 // Make room for return address (or push it again) 3028 // Make room for return address (or push it again)
2935 if (!cause_return) { 3029 if (!cause_return) {
2936 __ pushq(rbx); 3030 __ push(rbx);
2937 } 3031 }
2938 3032
2939 // Save registers, fpu state, and flags 3033 // Save registers, fpu state, and flags
2940 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); 3034 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2941 3035
2948 // The return address must always be correct so that frame constructor never 3042 // The return address must always be correct so that frame constructor never
2949 // sees an invalid pc. 3043 // sees an invalid pc.
2950 3044
2951 if (!cause_return) { 3045 if (!cause_return) {
2952 // overwrite the dummy value we pushed on entry 3046 // overwrite the dummy value we pushed on entry
2953 __ movq(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset())); 3047 __ movptr(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
2954 __ movq(Address(rbp, wordSize), c_rarg0); 3048 __ movptr(Address(rbp, wordSize), c_rarg0);
2955 } 3049 }
2956 3050
2957 // Do the call 3051 // Do the call
2958 __ movq(c_rarg0, r15_thread); 3052 __ mov(c_rarg0, r15_thread);
2959 __ call(RuntimeAddress(call_ptr)); 3053 __ call(RuntimeAddress(call_ptr));
2960 3054
2961 // Set an oopmap for the call site. This oopmap will map all 3055 // Set an oopmap for the call site. This oopmap will map all
2962 // oop-registers and debug-info registers as callee-saved. This 3056 // oop-registers and debug-info registers as callee-saved. This
2963 // will allow deoptimization at this safepoint to find all possible 3057 // will allow deoptimization at this safepoint to find all possible
2967 3061
2968 Label noException; 3062 Label noException;
2969 3063
2970 __ reset_last_Java_frame(false, false); 3064 __ reset_last_Java_frame(false, false);
2971 3065
2972 __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD); 3066 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
2973 __ jcc(Assembler::equal, noException); 3067 __ jcc(Assembler::equal, noException);
2974 3068
2975 // Exception pending 3069 // Exception pending
2976 3070
2977 RegisterSaver::restore_live_registers(masm); 3071 RegisterSaver::restore_live_registers(masm);
3021 3115
3022 int frame_complete = __ offset(); 3116 int frame_complete = __ offset();
3023 3117
3024 __ set_last_Java_frame(noreg, noreg, NULL); 3118 __ set_last_Java_frame(noreg, noreg, NULL);
3025 3119
3026 __ movq(c_rarg0, r15_thread); 3120 __ mov(c_rarg0, r15_thread);
3027 3121
3028 __ call(RuntimeAddress(destination)); 3122 __ call(RuntimeAddress(destination));
3029 3123
3030 3124
3031 // Set an oopmap for the call site. 3125 // Set an oopmap for the call site.
3038 3132
3039 // clear last_Java_sp 3133 // clear last_Java_sp
3040 __ reset_last_Java_frame(false, false); 3134 __ reset_last_Java_frame(false, false);
3041 // check for pending exceptions 3135 // check for pending exceptions
3042 Label pending; 3136 Label pending;
3043 __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD); 3137 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3044 __ jcc(Assembler::notEqual, pending); 3138 __ jcc(Assembler::notEqual, pending);
3045 3139
3046 // get the returned methodOop 3140 // get the returned methodOop
3047 __ movq(rbx, Address(r15_thread, JavaThread::vm_result_offset())); 3141 __ movptr(rbx, Address(r15_thread, JavaThread::vm_result_offset()));
3048 __ movq(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx); 3142 __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
3049 3143
3050 __ movq(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax); 3144 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3051 3145
3052 RegisterSaver::restore_live_registers(masm); 3146 RegisterSaver::restore_live_registers(masm);
3053 3147
3054 // We are back the the original state on entry and ready to go. 3148 // We are back the the original state on entry and ready to go.
3055 3149
3063 3157
3064 // exception pending => remove activation and forward to exception handler 3158 // exception pending => remove activation and forward to exception handler
3065 3159
3066 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD); 3160 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3067 3161
3068 __ movq(rax, Address(r15_thread, Thread::pending_exception_offset())); 3162 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3069 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 3163 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3070 3164
3071 // ------------- 3165 // -------------
3072 // make sure all code is generated 3166 // make sure all code is generated
3073 masm->flush(); 3167 masm->flush();
3152 3246
3153 3247
3154 address start = __ pc(); 3248 address start = __ pc();
3155 3249
3156 // Exception pc is 'return address' for stack walker 3250 // Exception pc is 'return address' for stack walker
3157 __ pushq(rdx); 3251 __ push(rdx);
3158 __ subq(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog 3252 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
3159 3253
3160 // Save callee-saved registers. See x86_64.ad. 3254 // Save callee-saved registers. See x86_64.ad.
3161 3255
3162 // rbp is an implicitly saved callee saved register (i.e. the calling 3256 // rbp is an implicitly saved callee saved register (i.e. the calling
3163 // convention will save restore it in prolog/epilog) Other than that 3257 // convention will save restore it in prolog/epilog) Other than that
3164 // there are no callee save registers now that adapter frames are gone. 3258 // there are no callee save registers now that adapter frames are gone.
3165 3259
3166 __ movq(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp); 3260 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3167 3261
3168 // Store exception in Thread object. We cannot pass any arguments to the 3262 // Store exception in Thread object. We cannot pass any arguments to the
3169 // handle_exception call, since we do not want to make any assumption 3263 // handle_exception call, since we do not want to make any assumption
3170 // about the size of the frame where the exception happened in. 3264 // about the size of the frame where the exception happened in.
3171 // c_rarg0 is either rdi (Linux) or rcx (Windows). 3265 // c_rarg0 is either rdi (Linux) or rcx (Windows).
3172 __ movq(Address(r15_thread, JavaThread::exception_oop_offset()),rax); 3266 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
3173 __ movq(Address(r15_thread, JavaThread::exception_pc_offset()), rdx); 3267 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3174 3268
3175 // This call does all the hard work. It checks if an exception handler 3269 // This call does all the hard work. It checks if an exception handler
3176 // exists in the method. 3270 // exists in the method.
3177 // If so, it returns the handler address. 3271 // If so, it returns the handler address.
3178 // If not, it prepares for stack-unwinding, restoring the callee-save 3272 // If not, it prepares for stack-unwinding, restoring the callee-save
3179 // registers of the frame being removed. 3273 // registers of the frame being removed.
3180 // 3274 //
3181 // address OptoRuntime::handle_exception_C(JavaThread* thread) 3275 // address OptoRuntime::handle_exception_C(JavaThread* thread)
3182 3276
3183 __ set_last_Java_frame(noreg, noreg, NULL); 3277 __ set_last_Java_frame(noreg, noreg, NULL);
3184 __ movq(c_rarg0, r15_thread); 3278 __ mov(c_rarg0, r15_thread);
3185 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C))); 3279 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
3186 3280
3187 // Set an oopmap for the call site. This oopmap will only be used if we 3281 // Set an oopmap for the call site. This oopmap will only be used if we
3188 // are unwinding the stack. Hence, all locations will be dead. 3282 // are unwinding the stack. Hence, all locations will be dead.
3189 // Callee-saved registers will be the same as the frame above (i.e., 3283 // Callee-saved registers will be the same as the frame above (i.e.,
3200 3294
3201 // rbp is an implicitly saved callee saved register (i.e. the calling 3295 // rbp is an implicitly saved callee saved register (i.e. the calling
3202 // convention will save restore it in prolog/epilog) Other than that 3296 // convention will save restore it in prolog/epilog) Other than that
3203 // there are no callee save registers no that adapter frames are gone. 3297 // there are no callee save registers no that adapter frames are gone.
3204 3298
3205 __ movq(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt)); 3299 __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
3206 3300
3207 __ addq(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog 3301 __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
3208 __ popq(rdx); // No need for exception pc anymore 3302 __ pop(rdx); // No need for exception pc anymore
3209 3303
3210 // rax: exception handler 3304 // rax: exception handler
3211 3305
3212 // We have a handler in rax (could be deopt blob). 3306 // We have a handler in rax (could be deopt blob).
3213 __ movq(r8, rax); 3307 __ mov(r8, rax);
3214 3308
3215 // Get the exception oop 3309 // Get the exception oop
3216 __ movq(rax, Address(r15_thread, JavaThread::exception_oop_offset())); 3310 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3217 // Get the exception pc in case we are deoptimized 3311 // Get the exception pc in case we are deoptimized
3218 __ movq(rdx, Address(r15_thread, JavaThread::exception_pc_offset())); 3312 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3219 #ifdef ASSERT 3313 #ifdef ASSERT
3220 __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD); 3314 __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD);
3221 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD); 3315 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
3222 #endif 3316 #endif
3223 // Clear the exception oop so GC no longer processes it as a root. 3317 // Clear the exception oop so GC no longer processes it as a root.