comparison src/cpu/x86/vm/macroAssembler_x86.hpp @ 18041:52b4284cb496

Merge with jdk8u20-b26
author Gilles Duboscq <duboscq@ssw.jku.at>
date Wed, 15 Oct 2014 16:02:50 +0200
parents 4ca6dc0799b6 0bf37f737702
children 7848fc12602b
comparison
equal deleted inserted replaced
17606:45d7b2c7029d 18041:52b4284cb496
25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP 25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP
27 27
28 #include "asm/assembler.hpp" 28 #include "asm/assembler.hpp"
29 #include "utilities/macros.hpp" 29 #include "utilities/macros.hpp"
30 #include "runtime/rtmLocking.hpp"
30 31
31 32
32 // MacroAssembler extends Assembler by frequently used macros. 33 // MacroAssembler extends Assembler by frequently used macros.
33 // 34 //
34 // Instructions for which a 'better' code sequence exists depending 35 // Instructions for which a 'better' code sequence exists depending
109 unsigned char op = branch[0]; 110 unsigned char op = branch[0];
110 assert(op == 0xE8 /* call */ || 111 assert(op == 0xE8 /* call */ ||
111 op == 0xE9 /* jmp */ || 112 op == 0xE9 /* jmp */ ||
112 op == 0xEB /* short jmp */ || 113 op == 0xEB /* short jmp */ ||
113 (op & 0xF0) == 0x70 /* short jcc */ || 114 (op & 0xF0) == 0x70 /* short jcc */ ||
114 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */, 115 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
116 op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
115 "Invalid opcode at patch point"); 117 "Invalid opcode at patch point");
116 118
117 if (op == 0xEB || (op & 0xF0) == 0x70) { 119 if (op == 0xEB || (op & 0xF0) == 0x70) {
118 // short offset operators (jmp and jcc) 120 // short offset operators (jmp and jcc)
119 char* disp = (char*) &branch[1]; 121 char* disp = (char*) &branch[1];
120 int imm8 = target - (address) &disp[1]; 122 int imm8 = target - (address) &disp[1];
121 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset"); 123 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
122 *disp = imm8; 124 *disp = imm8;
123 } else { 125 } else {
124 int* disp = (int*) &branch[(op == 0x0F)? 2: 1]; 126 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
125 int imm32 = target - (address) &disp[1]; 127 int imm32 = target - (address) &disp[1];
126 *disp = imm32; 128 *disp = imm32;
127 } 129 }
128 } 130 }
129 131
158 void incrementl(Address dst, int value = 1); 160 void incrementl(Address dst, int value = 1);
159 void incrementl(Register reg, int value = 1); 161 void incrementl(Register reg, int value = 1);
160 162
161 void incrementq(Register reg, int value = 1); 163 void incrementq(Register reg, int value = 1);
162 void incrementq(Address dst, int value = 1); 164 void incrementq(Address dst, int value = 1);
163
164 165
165 // Support optimal SSE move instructions. 166 // Support optimal SSE move instructions.
166 void movflt(XMMRegister dst, XMMRegister src) { 167 void movflt(XMMRegister dst, XMMRegister src) {
167 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 168 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
168 else { movss (dst, src); return; } 169 else { movss (dst, src); return; }
184 } 185 }
185 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 186 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
186 187
187 void incrementl(AddressLiteral dst); 188 void incrementl(AddressLiteral dst);
188 void incrementl(ArrayAddress dst); 189 void incrementl(ArrayAddress dst);
190
191 void incrementq(AddressLiteral dst);
189 192
190 // Alignment 193 // Alignment
191 void align(int modulus); 194 void align(int modulus);
192 195
193 // A 5 byte nop that is safe for patching (see patch_verified_entry) 196 // A 5 byte nop that is safe for patching (see patch_verified_entry)
649 Register swap_reg, Register tmp_reg, 652 Register swap_reg, Register tmp_reg,
650 bool swap_reg_contains_mark, 653 bool swap_reg_contains_mark,
651 Label& done, Label* slow_case = NULL, 654 Label& done, Label* slow_case = NULL,
652 BiasedLockingCounters* counters = NULL); 655 BiasedLockingCounters* counters = NULL);
653 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); 656 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
654 657 #ifdef COMPILER2
658 // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
659 // See full desription in macroAssembler_x86.cpp.
660 void fast_lock(Register obj, Register box, Register tmp,
661 Register scr, Register cx1, Register cx2,
662 BiasedLockingCounters* counters,
663 RTMLockingCounters* rtm_counters,
664 RTMLockingCounters* stack_rtm_counters,
665 Metadata* method_data,
666 bool use_rtm, bool profile_rtm);
667 void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
668 #if INCLUDE_RTM_OPT
669 void rtm_counters_update(Register abort_status, Register rtm_counters);
670 void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
671 void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg,
672 RTMLockingCounters* rtm_counters,
673 Metadata* method_data);
674 void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
675 RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
676 void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel);
677 void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel);
678 void rtm_stack_locking(Register obj, Register tmp, Register scr,
679 Register retry_on_abort_count,
680 RTMLockingCounters* stack_rtm_counters,
681 Metadata* method_data, bool profile_rtm,
682 Label& DONE_LABEL, Label& IsInflated);
683 void rtm_inflated_locking(Register obj, Register box, Register tmp,
684 Register scr, Register retry_on_busy_count,
685 Register retry_on_abort_count,
686 RTMLockingCounters* rtm_counters,
687 Metadata* method_data, bool profile_rtm,
688 Label& DONE_LABEL);
689 #endif
690 #endif
655 691
656 Condition negate_condition(Condition cond); 692 Condition negate_condition(Condition cond);
657 693
658 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 694 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
659 // operands. In general the names are modified to avoid hiding the instruction in Assembler 695 // operands. In general the names are modified to avoid hiding the instruction in Assembler
714 750
715 void locked_cmpxchgptr(Register reg, AddressLiteral adr); 751 void locked_cmpxchgptr(Register reg, AddressLiteral adr);
716 752
717 753
718 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 754 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
755 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
719 756
720 757
721 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 758 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
722 759
723 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 760 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
755 792
756 // Helper functions for statistics gathering. 793 // Helper functions for statistics gathering.
757 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 794 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
758 void cond_inc32(Condition cond, AddressLiteral counter_addr); 795 void cond_inc32(Condition cond, AddressLiteral counter_addr);
759 // Unconditional atomic increment. 796 // Unconditional atomic increment.
760 void atomic_incl(AddressLiteral counter_addr); 797 void atomic_incl(Address counter_addr);
798 void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1);
799 #ifdef _LP64
800 void atomic_incq(Address counter_addr);
801 void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1);
802 #endif
803 void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; }
804 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
761 805
762 void lea(Register dst, AddressLiteral adr); 806 void lea(Register dst, AddressLiteral adr);
763 void lea(Address dst, AddressLiteral adr); 807 void lea(Address dst, AddressLiteral adr);
764 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 808 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
765 809
1067 // can this do an lea? 1111 // can this do an lea?
1068 void movptr(Register dst, ArrayAddress src); 1112 void movptr(Register dst, ArrayAddress src);
1069 1113
1070 void movptr(Register dst, Address src); 1114 void movptr(Register dst, Address src);
1071 1115
1072 void movptr(Register dst, AddressLiteral src); 1116 #ifdef _LP64
1117 void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1);
1118 #else
1119 void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit
1120 #endif
1073 1121
1074 void movptr(Register dst, intptr_t src); 1122 void movptr(Register dst, intptr_t src);
1075 void movptr(Register dst, Register src); 1123 void movptr(Register dst, Register src);
1076 void movptr(Address dst, intptr_t src); 1124 void movptr(Address dst, intptr_t src);
1077 1125
1120 // sign extend as need a l to ptr sized element 1168 // sign extend as need a l to ptr sized element
1121 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 1169 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1122 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 1170 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1123 1171
1124 // C2 compiled method's prolog code. 1172 // C2 compiled method's prolog code.
1125 void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b); 1173 void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b);
1126 1174
1127 // clear memory of size 'cnt' qwords, starting at 'base'. 1175 // clear memory of size 'cnt' qwords, starting at 'base'.
1128 void clear_mem(Register base, Register cnt, Register rtmp); 1176 void clear_mem(Register base, Register cnt, Register rtmp);
1129 1177
1130 // IndexOf strings. 1178 // IndexOf strings.