Mercurial > hg > truffle
annotate src/cpu/x86/vm/macroAssembler_x86.hpp @ 7780:550c952f5d3f
Merge
author | Christian Humer <christian.humer@gmail.com> |
---|---|
date | Tue, 12 Feb 2013 16:06:20 +0100 |
parents | b30b3c2a0cf2 |
children | 8391fdd36e1f |
rev | line source |
---|---|
7199 | 1 /* |
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | |
20 * or visit www.oracle.com if you need additional information or have any | |
21 * questions. | |
22 * | |
23 */ | |
24 | |
25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP | |
26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP | |
27 | |
28 #include "asm/assembler.hpp" | |
29 | |
30 | |
31 // MacroAssembler extends Assembler by frequently used macros. | |
32 // | |
33 // Instructions for which a 'better' code sequence exists depending | |
34 // on arguments should also go in here. | |
35 | |
36 class MacroAssembler: public Assembler { | |
37 friend class LIR_Assembler; | |
38 friend class Runtime1; // as_Address() | |
39 | |
40 protected: | |
41 | |
42 Address as_Address(AddressLiteral adr); | |
43 Address as_Address(ArrayAddress adr); | |
44 | |
45 // Support for VM calls | |
46 // | |
47 // This is the base routine called by the different versions of call_VM_leaf. The interpreter | |
48 // may customize this version by overriding it for its purposes (e.g., to save/restore | |
49 // additional registers when doing a VM call). | |
50 #ifdef CC_INTERP | |
51 // c++ interpreter never wants to use interp_masm version of call_VM | |
52 #define VIRTUAL | |
53 #else | |
54 #define VIRTUAL virtual | |
55 #endif | |
56 | |
57 VIRTUAL void call_VM_leaf_base( | |
58 address entry_point, // the entry point | |
59 int number_of_arguments // the number of arguments to pop after the call | |
60 ); | |
61 | |
62 // This is the base routine called by the different versions of call_VM. The interpreter | |
63 // may customize this version by overriding it for its purposes (e.g., to save/restore | |
64 // additional registers when doing a VM call). | |
65 // | |
66 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base | |
67 // returns the register which contains the thread upon return. If a thread register has been | |
68 // specified, the return value will correspond to that register. If no last_java_sp is specified | |
69 // (noreg) than rsp will be used instead. | |
70 VIRTUAL void call_VM_base( // returns the register containing the thread upon return | |
71 Register oop_result, // where an oop-result ends up if any; use noreg otherwise | |
72 Register java_thread, // the thread if computed before ; use noreg otherwise | |
73 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise | |
74 address entry_point, // the entry point | |
75 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call | |
76 bool check_exceptions // whether to check for pending exceptions after return | |
77 ); | |
78 | |
79 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. | |
80 // The implementation is only non-empty for the InterpreterMacroAssembler, | |
81 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. | |
82 virtual void check_and_handle_popframe(Register java_thread); | |
83 virtual void check_and_handle_earlyret(Register java_thread); | |
84 | |
85 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); | |
86 | |
87 // helpers for FPU flag access | |
88 // tmp is a temporary register, if none is available use noreg | |
89 void save_rax (Register tmp); | |
90 void restore_rax(Register tmp); | |
91 | |
92 public: | |
93 MacroAssembler(CodeBuffer* code) : Assembler(code) {} | |
94 | |
95 // Support for NULL-checks | |
96 // | |
97 // Generates code that causes a NULL OS exception if the content of reg is NULL. | |
98 // If the accessed location is M[reg + offset] and the offset is known, provide the | |
99 // offset. No explicit code generation is needed if the offset is within a certain | |
100 // range (0 <= offset <= page_size). | |
101 | |
102 void null_check(Register reg, int offset = -1); | |
103 static bool needs_explicit_null_check(intptr_t offset); | |
104 | |
105 // Required platform-specific helpers for Label::patch_instructions. | |
106 // They _shadow_ the declarations in AbstractAssembler, which are undefined. | |
107 void pd_patch_instruction(address branch, address target) { | |
108 unsigned char op = branch[0]; | |
109 assert(op == 0xE8 /* call */ || | |
110 op == 0xE9 /* jmp */ || | |
111 op == 0xEB /* short jmp */ || | |
112 (op & 0xF0) == 0x70 /* short jcc */ || | |
113 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */, | |
114 "Invalid opcode at patch point"); | |
115 | |
116 if (op == 0xEB || (op & 0xF0) == 0x70) { | |
117 // short offset operators (jmp and jcc) | |
118 char* disp = (char*) &branch[1]; | |
119 int imm8 = target - (address) &disp[1]; | |
120 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset"); | |
121 *disp = imm8; | |
122 } else { | |
123 int* disp = (int*) &branch[(op == 0x0F)? 2: 1]; | |
124 int imm32 = target - (address) &disp[1]; | |
125 *disp = imm32; | |
126 } | |
127 } | |
128 | |
129 // The following 4 methods return the offset of the appropriate move instruction | |
130 | |
131 // Support for fast byte/short loading with zero extension (depending on particular CPU) | |
132 int load_unsigned_byte(Register dst, Address src); | |
133 int load_unsigned_short(Register dst, Address src); | |
134 | |
135 // Support for fast byte/short loading with sign extension (depending on particular CPU) | |
136 int load_signed_byte(Register dst, Address src); | |
137 int load_signed_short(Register dst, Address src); | |
138 | |
139 // Support for sign-extension (hi:lo = extend_sign(lo)) | |
140 void extend_sign(Register hi, Register lo); | |
141 | |
142 // Load and store values by size and signed-ness | |
143 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); | |
144 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); | |
145 | |
146 // Support for inc/dec with optimal instruction selection depending on value | |
147 | |
148 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } | |
149 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } | |
150 | |
151 void decrementl(Address dst, int value = 1); | |
152 void decrementl(Register reg, int value = 1); | |
153 | |
154 void decrementq(Register reg, int value = 1); | |
155 void decrementq(Address dst, int value = 1); | |
156 | |
157 void incrementl(Address dst, int value = 1); | |
158 void incrementl(Register reg, int value = 1); | |
159 | |
160 void incrementq(Register reg, int value = 1); | |
161 void incrementq(Address dst, int value = 1); | |
162 | |
163 | |
164 // Support optimal SSE move instructions. | |
165 void movflt(XMMRegister dst, XMMRegister src) { | |
166 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } | |
167 else { movss (dst, src); return; } | |
168 } | |
169 void movflt(XMMRegister dst, Address src) { movss(dst, src); } | |
170 void movflt(XMMRegister dst, AddressLiteral src); | |
171 void movflt(Address dst, XMMRegister src) { movss(dst, src); } | |
172 | |
173 void movdbl(XMMRegister dst, XMMRegister src) { | |
174 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } | |
175 else { movsd (dst, src); return; } | |
176 } | |
177 | |
178 void movdbl(XMMRegister dst, AddressLiteral src); | |
179 | |
180 void movdbl(XMMRegister dst, Address src) { | |
181 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } | |
182 else { movlpd(dst, src); return; } | |
183 } | |
184 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } | |
185 | |
186 void incrementl(AddressLiteral dst); | |
187 void incrementl(ArrayAddress dst); | |
188 | |
189 // Alignment | |
190 void align(int modulus); | |
191 | |
192 // A 5 byte nop that is safe for patching (see patch_verified_entry) | |
193 void fat_nop(); | |
194 | |
195 // Stack frame creation/removal | |
196 void enter(); | |
197 void leave(); | |
198 | |
199 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) | |
200 // The pointer will be loaded into the thread register. | |
201 void get_thread(Register thread); | |
202 | |
203 | |
204 // Support for VM calls | |
205 // | |
206 // It is imperative that all calls into the VM are handled via the call_VM macros. | |
207 // They make sure that the stack linkage is setup correctly. call_VM's correspond | |
208 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. | |
209 | |
210 | |
211 void call_VM(Register oop_result, | |
212 address entry_point, | |
213 bool check_exceptions = true); | |
214 void call_VM(Register oop_result, | |
215 address entry_point, | |
216 Register arg_1, | |
217 bool check_exceptions = true); | |
218 void call_VM(Register oop_result, | |
219 address entry_point, | |
220 Register arg_1, Register arg_2, | |
221 bool check_exceptions = true); | |
222 void call_VM(Register oop_result, | |
223 address entry_point, | |
224 Register arg_1, Register arg_2, Register arg_3, | |
225 bool check_exceptions = true); | |
226 | |
227 // Overloadings with last_Java_sp | |
228 void call_VM(Register oop_result, | |
229 Register last_java_sp, | |
230 address entry_point, | |
231 int number_of_arguments = 0, | |
232 bool check_exceptions = true); | |
233 void call_VM(Register oop_result, | |
234 Register last_java_sp, | |
235 address entry_point, | |
236 Register arg_1, bool | |
237 check_exceptions = true); | |
238 void call_VM(Register oop_result, | |
239 Register last_java_sp, | |
240 address entry_point, | |
241 Register arg_1, Register arg_2, | |
242 bool check_exceptions = true); | |
243 void call_VM(Register oop_result, | |
244 Register last_java_sp, | |
245 address entry_point, | |
246 Register arg_1, Register arg_2, Register arg_3, | |
247 bool check_exceptions = true); | |
248 | |
249 void get_vm_result (Register oop_result, Register thread); | |
250 void get_vm_result_2(Register metadata_result, Register thread); | |
251 | |
252 // These always tightly bind to MacroAssembler::call_VM_base | |
253 // bypassing the virtual implementation | |
254 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); | |
255 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); | |
256 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); | |
257 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); | |
258 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); | |
259 | |
260 void call_VM_leaf(address entry_point, | |
261 int number_of_arguments = 0); | |
262 void call_VM_leaf(address entry_point, | |
263 Register arg_1); | |
264 void call_VM_leaf(address entry_point, | |
265 Register arg_1, Register arg_2); | |
266 void call_VM_leaf(address entry_point, | |
267 Register arg_1, Register arg_2, Register arg_3); | |
268 | |
269 // These always tightly bind to MacroAssembler::call_VM_leaf_base | |
270 // bypassing the virtual implementation | |
271 void super_call_VM_leaf(address entry_point); | |
272 void super_call_VM_leaf(address entry_point, Register arg_1); | |
273 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); | |
274 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); | |
275 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); | |
276 | |
277 // last Java Frame (fills frame anchor) | |
278 void set_last_Java_frame(Register thread, | |
279 Register last_java_sp, | |
280 Register last_java_fp, | |
281 address last_java_pc); | |
282 | |
283 // thread in the default location (r15_thread on 64bit) | |
284 void set_last_Java_frame(Register last_java_sp, | |
285 Register last_java_fp, | |
286 address last_java_pc); | |
287 | |
288 void reset_last_Java_frame(Register thread, bool clear_fp, bool clear_pc); | |
289 | |
290 // thread in the default location (r15_thread on 64bit) | |
291 void reset_last_Java_frame(bool clear_fp, bool clear_pc); | |
292 | |
293 // Stores | |
294 void store_check(Register obj); // store check for obj - register is destroyed afterwards | |
295 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed) | |
296 | |
297 #ifndef SERIALGC | |
298 | |
299 void g1_write_barrier_pre(Register obj, | |
300 Register pre_val, | |
301 Register thread, | |
302 Register tmp, | |
303 bool tosca_live, | |
304 bool expand_call); | |
305 | |
306 void g1_write_barrier_post(Register store_addr, | |
307 Register new_val, | |
308 Register thread, | |
309 Register tmp, | |
310 Register tmp2); | |
311 | |
312 #endif // SERIALGC | |
313 | |
314 // split store_check(Register obj) to enhance instruction interleaving | |
315 void store_check_part_1(Register obj); | |
316 void store_check_part_2(Register obj); | |
317 | |
318 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 | |
319 void c2bool(Register x); | |
320 | |
321 // C++ bool manipulation | |
322 | |
323 void movbool(Register dst, Address src); | |
324 void movbool(Address dst, bool boolconst); | |
325 void movbool(Address dst, Register src); | |
326 void testbool(Register dst); | |
327 | |
328 // oop manipulations | |
329 void load_klass(Register dst, Register src); | |
330 void store_klass(Register dst, Register src); | |
331 | |
332 void load_heap_oop(Register dst, Address src); | |
333 void load_heap_oop_not_null(Register dst, Address src); | |
334 void store_heap_oop(Address dst, Register src); | |
335 void cmp_heap_oop(Register src1, Address src2, Register tmp = noreg); | |
336 | |
337 // Used for storing NULL. All other oop constants should be | |
338 // stored using routines that take a jobject. | |
339 void store_heap_oop_null(Address dst); | |
340 | |
341 void load_prototype_header(Register dst, Register src); | |
342 | |
343 #ifdef _LP64 | |
344 void store_klass_gap(Register dst, Register src); | |
345 | |
346 // This dummy is to prevent a call to store_heap_oop from | |
347 // converting a zero (like NULL) into a Register by giving | |
348 // the compiler two choices it can't resolve | |
349 | |
350 void store_heap_oop(Address dst, void* dummy); | |
351 | |
352 void encode_heap_oop(Register r); | |
353 void decode_heap_oop(Register r); | |
354 void encode_heap_oop_not_null(Register r); | |
355 void decode_heap_oop_not_null(Register r); | |
356 void encode_heap_oop_not_null(Register dst, Register src); | |
357 void decode_heap_oop_not_null(Register dst, Register src); | |
358 | |
359 void set_narrow_oop(Register dst, jobject obj); | |
360 void set_narrow_oop(Address dst, jobject obj); | |
361 void cmp_narrow_oop(Register dst, jobject obj); | |
362 void cmp_narrow_oop(Address dst, jobject obj); | |
363 | |
364 void encode_klass_not_null(Register r); | |
365 void decode_klass_not_null(Register r); | |
366 void encode_klass_not_null(Register dst, Register src); | |
367 void decode_klass_not_null(Register dst, Register src); | |
368 void set_narrow_klass(Register dst, Klass* k); | |
369 void set_narrow_klass(Address dst, Klass* k); | |
370 void cmp_narrow_klass(Register dst, Klass* k); | |
371 void cmp_narrow_klass(Address dst, Klass* k); | |
372 | |
373 // if heap base register is used - reinit it with the correct value | |
374 void reinit_heapbase(); | |
375 | |
376 DEBUG_ONLY(void verify_heapbase(const char* msg);) | |
377 | |
378 #endif // _LP64 | |
379 | |
380 // Int division/remainder for Java | |
381 // (as idivl, but checks for special case as described in JVM spec.) | |
382 // returns idivl instruction offset for implicit exception handling | |
383 int corrected_idivl(Register reg); | |
384 | |
385 // Long division/remainder for Java | |
386 // (as idivq, but checks for special case as described in JVM spec.) | |
387 // returns idivq instruction offset for implicit exception handling | |
388 int corrected_idivq(Register reg); | |
389 | |
390 void int3(); | |
391 | |
392 // Long operation macros for a 32bit cpu | |
393 // Long negation for Java | |
394 void lneg(Register hi, Register lo); | |
395 | |
396 // Long multiplication for Java | |
397 // (destroys contents of eax, ebx, ecx and edx) | |
398 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y | |
399 | |
400 // Long shifts for Java | |
401 // (semantics as described in JVM spec.) | |
402 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) | |
403 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) | |
404 | |
405 // Long compare for Java | |
406 // (semantics as described in JVM spec.) | |
407 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) | |
408 | |
409 | |
410 // misc | |
411 | |
412 // Sign extension | |
413 void sign_extend_short(Register reg); | |
414 void sign_extend_byte(Register reg); | |
415 | |
416 // Division by power of 2, rounding towards 0 | |
417 void division_with_shift(Register reg, int shift_value); | |
418 | |
419 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: | |
420 // | |
421 // CF (corresponds to C0) if x < y | |
422 // PF (corresponds to C2) if unordered | |
423 // ZF (corresponds to C3) if x = y | |
424 // | |
425 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). | |
426 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) | |
427 void fcmp(Register tmp); | |
428 // Variant of the above which allows y to be further down the stack | |
429 // and which only pops x and y if specified. If pop_right is | |
430 // specified then pop_left must also be specified. | |
431 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); | |
432 | |
433 // Floating-point comparison for Java | |
434 // Compares the top-most stack entries on the FPU stack and stores the result in dst. | |
435 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). | |
436 // (semantics as described in JVM spec.) | |
437 void fcmp2int(Register dst, bool unordered_is_less); | |
438 // Variant of the above which allows y to be further down the stack | |
439 // and which only pops x and y if specified. If pop_right is | |
440 // specified then pop_left must also be specified. | |
441 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); | |
442 | |
443 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) | |
444 // tmp is a temporary register, if none is available use noreg | |
445 void fremr(Register tmp); | |
446 | |
447 | |
448 // same as fcmp2int, but using SSE2 | |
449 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); | |
450 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); | |
451 | |
452 // Inlined sin/cos generator for Java; must not use CPU instruction | |
453 // directly on Intel as it does not have high enough precision | |
454 // outside of the range [-pi/4, pi/4]. Extra argument indicate the | |
455 // number of FPU stack slots in use; all but the topmost will | |
456 // require saving if a slow case is necessary. Assumes argument is | |
457 // on FP TOS; result is on FP TOS. No cpu registers are changed by | |
458 // this code. | |
459 void trigfunc(char trig, int num_fpu_regs_in_use = 1); | |
460 | |
461 // branch to L if FPU flag C2 is set/not set | |
462 // tmp is a temporary register, if none is available use noreg | |
463 void jC2 (Register tmp, Label& L); | |
464 void jnC2(Register tmp, Label& L); | |
465 | |
466 // Pop ST (ffree & fincstp combined) | |
467 void fpop(); | |
468 | |
469 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack | |
470 void push_fTOS(); | |
471 | |
472 // pops double TOS element from CPU stack and pushes on FPU stack | |
473 void pop_fTOS(); | |
474 | |
475 void empty_FPU_stack(); | |
476 | |
477 void push_IU_state(); | |
478 void pop_IU_state(); | |
479 | |
480 void push_FPU_state(); | |
481 void pop_FPU_state(); | |
482 | |
483 void push_CPU_state(); | |
484 void pop_CPU_state(); | |
485 | |
486 // Round up to a power of two | |
487 void round_to(Register reg, int modulus); | |
488 | |
489 // Callee saved registers handling | |
490 void push_callee_saved_registers(); | |
491 void pop_callee_saved_registers(); | |
492 | |
493 // allocation | |
494 void eden_allocate( | |
495 Register obj, // result: pointer to object after successful allocation | |
496 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise | |
497 int con_size_in_bytes, // object size in bytes if known at compile time | |
498 Register t1, // temp register | |
499 Label& slow_case // continuation point if fast allocation fails | |
500 ); | |
501 void tlab_allocate( | |
502 Register obj, // result: pointer to object after successful allocation | |
503 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise | |
504 int con_size_in_bytes, // object size in bytes if known at compile time | |
505 Register t1, // temp register | |
506 Register t2, // temp register | |
507 Label& slow_case // continuation point if fast allocation fails | |
508 ); | |
509 Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address | |
510 void incr_allocated_bytes(Register thread, | |
511 Register var_size_in_bytes, int con_size_in_bytes, | |
512 Register t1 = noreg); | |
513 | |
514 // interface method calling | |
515 void lookup_interface_method(Register recv_klass, | |
516 Register intf_klass, | |
517 RegisterOrConstant itable_index, | |
518 Register method_result, | |
519 Register scan_temp, | |
520 Label& no_such_interface); | |
521 | |
522 // virtual method calling | |
523 void lookup_virtual_method(Register recv_klass, | |
524 RegisterOrConstant vtable_index, | |
525 Register method_result); | |
526 | |
527 // Test sub_klass against super_klass, with fast and slow paths. | |
528 | |
529 // The fast path produces a tri-state answer: yes / no / maybe-slow. | |
530 // One of the three labels can be NULL, meaning take the fall-through. | |
531 // If super_check_offset is -1, the value is loaded up from super_klass. | |
532 // No registers are killed, except temp_reg. | |
533 void check_klass_subtype_fast_path(Register sub_klass, | |
534 Register super_klass, | |
535 Register temp_reg, | |
536 Label* L_success, | |
537 Label* L_failure, | |
538 Label* L_slow_path, | |
539 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); | |
540 | |
541 // The rest of the type check; must be wired to a corresponding fast path. | |
542 // It does not repeat the fast path logic, so don't use it standalone. | |
543 // The temp_reg and temp2_reg can be noreg, if no temps are available. | |
544 // Updates the sub's secondary super cache as necessary. | |
545 // If set_cond_codes, condition codes will be Z on success, NZ on failure. | |
546 void check_klass_subtype_slow_path(Register sub_klass, | |
547 Register super_klass, | |
548 Register temp_reg, | |
549 Register temp2_reg, | |
550 Label* L_success, | |
551 Label* L_failure, | |
552 bool set_cond_codes = false); | |
553 | |
554 // Simplified, combined version, good for typical uses. | |
555 // Falls through on failure. | |
556 void check_klass_subtype(Register sub_klass, | |
557 Register super_klass, | |
558 Register temp_reg, | |
559 Label& L_success); | |
560 | |
561 // method handles (JSR 292) | |
562 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); | |
563 | |
564 //---- | |
565 void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 | |
566 | |
567 // Debugging | |
568 | |
569 // only if +VerifyOops | |
570 // TODO: Make these macros with file and line like sparc version! | |
571 void verify_oop(Register reg, const char* s = "broken oop"); | |
572 void verify_oop_addr(Address addr, const char * s = "broken oop addr"); | |
573 | |
574 // TODO: verify method and klass metadata (compare against vptr?) | |
575 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} | |
576 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} | |
577 | |
578 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) | |
579 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) | |
580 | |
581 // only if +VerifyFPU | |
582 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); | |
583 | |
584 // prints msg, dumps registers and stops execution | |
585 void stop(const char* msg); | |
586 | |
587 // prints msg and continues | |
588 void warn(const char* msg); | |
589 | |
590 // dumps registers and other state | |
591 void print_state(); | |
592 | |
593 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); | |
594 static void debug64(char* msg, int64_t pc, int64_t regs[]); | |
595 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); | |
596 static void print_state64(int64_t pc, int64_t regs[]); | |
597 | |
598 void os_breakpoint(); | |
599 | |
600 void untested() { stop("untested"); } | |
601 | |
602 void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); } | |
603 | |
604 void should_not_reach_here() { stop("should not reach here"); } | |
605 | |
606 void print_CPU_state(); | |
607 | |
608 // Stack overflow checking | |
609 void bang_stack_with_offset(int offset) { | |
610 // stack grows down, caller passes positive offset | |
611 assert(offset > 0, "must bang with negative offset"); | |
612 movl(Address(rsp, (-offset)), rax); | |
613 } | |
614 | |
615 // Writes to stack successive pages until offset reached to check for | |
616 // stack overflow + shadow pages. Also, clobbers tmp | |
617 void bang_stack_size(Register size, Register tmp); | |
618 | |
619 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, | |
620 Register tmp, | |
621 int offset); | |
622 | |
623 // Support for serializing memory accesses between threads | |
624 void serialize_memory(Register thread, Register tmp); | |
625 | |
626 void verify_tlab(); | |
627 | |
628 // Biased locking support | |
629 // lock_reg and obj_reg must be loaded up with the appropriate values. | |
630 // swap_reg must be rax, and is killed. | |
631 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will | |
632 // be killed; if not supplied, push/pop will be used internally to | |
633 // allocate a temporary (inefficient, avoid if possible). | |
634 // Optional slow case is for implementations (interpreter and C1) which branch to | |
635 // slow case directly. Leaves condition codes set for C2's Fast_Lock node. | |
636 // Returns offset of first potentially-faulting instruction for null | |
637 // check info (currently consumed only by C1). If | |
638 // swap_reg_contains_mark is true then returns -1 as it is assumed | |
639 // the calling code has already passed any potential faults. | |
640 int biased_locking_enter(Register lock_reg, Register obj_reg, | |
641 Register swap_reg, Register tmp_reg, | |
642 bool swap_reg_contains_mark, | |
643 Label& done, Label* slow_case = NULL, | |
644 BiasedLockingCounters* counters = NULL); | |
645 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); | |
646 | |
647 | |
648 Condition negate_condition(Condition cond); | |
649 | |
650 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit | |
651 // operands. In general the names are modified to avoid hiding the instruction in Assembler | |
652 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers | |
653 // here in MacroAssembler. The major exception to this rule is call | |
654 | |
655 // Arithmetics | |
656 | |
657 | |
658 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } | |
659 void addptr(Address dst, Register src); | |
660 | |
661 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } | |
662 void addptr(Register dst, int32_t src); | |
663 void addptr(Register dst, Register src); | |
664 void addptr(Register dst, RegisterOrConstant src) { | |
665 if (src.is_constant()) addptr(dst, (int) src.as_constant()); | |
666 else addptr(dst, src.as_register()); | |
667 } | |
668 | |
669 void andptr(Register dst, int32_t src); | |
670 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } | |
671 | |
672 void cmp8(AddressLiteral src1, int imm); | |
673 | |
674 // renamed to drag out the casting of address to int32_t/intptr_t | |
675 void cmp32(Register src1, int32_t imm); | |
676 | |
677 void cmp32(AddressLiteral src1, int32_t imm); | |
678 // compare reg - mem, or reg - &mem | |
679 void cmp32(Register src1, AddressLiteral src2); | |
680 | |
681 void cmp32(Register src1, Address src2); | |
682 | |
683 #ifndef _LP64 | |
684 void cmpklass(Address dst, Metadata* obj); | |
685 void cmpklass(Register dst, Metadata* obj); | |
686 void cmpoop(Address dst, jobject obj); | |
687 void cmpoop(Register dst, jobject obj); | |
688 #endif // _LP64 | |
689 | |
690 // NOTE src2 must be the lval. This is NOT an mem-mem compare | |
691 void cmpptr(Address src1, AddressLiteral src2); | |
692 | |
693 void cmpptr(Register src1, AddressLiteral src2); | |
694 | |
695 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } | |
696 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } | |
697 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } | |
698 | |
699 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } | |
700 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } | |
701 | |
702 // cmp64 to avoild hiding cmpq | |
703 void cmp64(Register src1, AddressLiteral src); | |
704 | |
705 void cmpxchgptr(Register reg, Address adr); | |
706 | |
707 void locked_cmpxchgptr(Register reg, AddressLiteral adr); | |
708 | |
709 | |
710 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } | |
711 | |
712 | |
713 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } | |
714 | |
715 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } | |
716 | |
717 void shlptr(Register dst, int32_t shift); | |
718 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } | |
719 | |
720 void shrptr(Register dst, int32_t shift); | |
721 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } | |
722 | |
723 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } | |
724 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } | |
725 | |
726 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } | |
727 | |
728 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } | |
729 void subptr(Register dst, int32_t src); | |
730 // Force generation of a 4 byte immediate value even if it fits into 8bit | |
731 void subptr_imm32(Register dst, int32_t src); | |
732 void subptr(Register dst, Register src); | |
733 void subptr(Register dst, RegisterOrConstant src) { | |
734 if (src.is_constant()) subptr(dst, (int) src.as_constant()); | |
735 else subptr(dst, src.as_register()); | |
736 } | |
737 | |
738 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } | |
739 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } | |
740 | |
741 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } | |
742 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } | |
743 | |
744 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } | |
745 | |
746 | |
747 | |
748 // Helper functions for statistics gathering. | |
749 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. | |
750 void cond_inc32(Condition cond, AddressLiteral counter_addr); | |
751 // Unconditional atomic increment. | |
752 void atomic_incl(AddressLiteral counter_addr); | |
753 | |
754 void lea(Register dst, AddressLiteral adr); | |
755 void lea(Address dst, AddressLiteral adr); | |
756 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } | |
757 | |
758 void leal32(Register dst, Address src) { leal(dst, src); } | |
759 | |
760 // Import other testl() methods from the parent class or else | |
761 // they will be hidden by the following overriding declaration. | |
762 using Assembler::testl; | |
763 void testl(Register dst, AddressLiteral src); | |
764 | |
765 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } | |
766 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } | |
767 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } | |
768 | |
769 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } | |
770 void testptr(Register src1, Register src2); | |
771 | |
772 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } | |
773 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } | |
774 | |
775 // Calls | |
776 | |
777 void call(Label& L, relocInfo::relocType rtype); | |
778 void call(Register entry); | |
779 | |
780 // NOTE: this call tranfers to the effective address of entry NOT | |
781 // the address contained by entry. This is because this is more natural | |
782 // for jumps/calls. | |
783 void call(AddressLiteral entry); | |
784 | |
785 // Emit the CompiledIC call idiom | |
786 void ic_call(address entry); | |
787 | |
788 // Jumps | |
789 | |
790 // NOTE: these jumps tranfer to the effective address of dst NOT | |
791 // the address contained by dst. This is because this is more natural | |
792 // for jumps/calls. | |
793 void jump(AddressLiteral dst); | |
794 void jump_cc(Condition cc, AddressLiteral dst); | |
795 | |
796 // 32bit can do a case table jump in one instruction but we no longer allow the base | |
797 // to be installed in the Address class. This jump will tranfers to the address | |
798 // contained in the location described by entry (not the address of entry) | |
799 void jump(ArrayAddress entry); | |
800 | |
801 // Floating | |
802 | |
803 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } | |
804 void andpd(XMMRegister dst, AddressLiteral src); | |
805 | |
806 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } | |
807 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } | |
808 void andps(XMMRegister dst, AddressLiteral src); | |
809 | |
810 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } | |
811 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } | |
812 void comiss(XMMRegister dst, AddressLiteral src); | |
813 | |
814 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } | |
815 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } | |
816 void comisd(XMMRegister dst, AddressLiteral src); | |
817 | |
818 void fadd_s(Address src) { Assembler::fadd_s(src); } | |
819 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } | |
820 | |
821 void fldcw(Address src) { Assembler::fldcw(src); } | |
822 void fldcw(AddressLiteral src); | |
823 | |
824 void fld_s(int index) { Assembler::fld_s(index); } | |
825 void fld_s(Address src) { Assembler::fld_s(src); } | |
826 void fld_s(AddressLiteral src); | |
827 | |
828 void fld_d(Address src) { Assembler::fld_d(src); } | |
829 void fld_d(AddressLiteral src); | |
830 | |
831 void fld_x(Address src) { Assembler::fld_x(src); } | |
832 void fld_x(AddressLiteral src); | |
833 | |
834 void fmul_s(Address src) { Assembler::fmul_s(src); } | |
835 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } | |
836 | |
837 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } | |
838 void ldmxcsr(AddressLiteral src); | |
839 | |
840 // compute pow(x,y) and exp(x) with x86 instructions. Don't cover | |
841 // all corner cases and may result in NaN and require fallback to a | |
842 // runtime call. | |
843 void fast_pow(); | |
844 void fast_exp(); | |
845 void increase_precision(); | |
846 void restore_precision(); | |
847 | |
848 // computes exp(x). Fallback to runtime call included. | |
849 void exp_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(true, num_fpu_regs_in_use); } | |
850 // computes pow(x,y). Fallback to runtime call included. | |
851 void pow_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(false, num_fpu_regs_in_use); } | |
852 | |
853 private: | |
854 | |
855 // call runtime as a fallback for trig functions and pow/exp. | |
856 void fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use); | |
857 | |
858 // computes 2^(Ylog2X); Ylog2X in ST(0) | |
859 void pow_exp_core_encoding(); | |
860 | |
861 // computes pow(x,y) or exp(x). Fallback to runtime call included. | |
862 void pow_or_exp(bool is_exp, int num_fpu_regs_in_use); | |
863 | |
864 // these are private because users should be doing movflt/movdbl | |
865 | |
866 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } | |
867 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } | |
868 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } | |
869 void movss(XMMRegister dst, AddressLiteral src); | |
870 | |
871 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } | |
872 void movlpd(XMMRegister dst, AddressLiteral src); | |
873 | |
874 public: | |
875 | |
876 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } | |
877 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } | |
878 void addsd(XMMRegister dst, AddressLiteral src); | |
879 | |
880 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } | |
881 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } | |
882 void addss(XMMRegister dst, AddressLiteral src); | |
883 | |
884 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } | |
885 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } | |
886 void divsd(XMMRegister dst, AddressLiteral src); | |
887 | |
888 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } | |
889 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } | |
890 void divss(XMMRegister dst, AddressLiteral src); | |
891 | |
892 // Move Unaligned Double Quadword | |
893 void movdqu(Address dst, XMMRegister src) { Assembler::movdqu(dst, src); } | |
894 void movdqu(XMMRegister dst, Address src) { Assembler::movdqu(dst, src); } | |
895 void movdqu(XMMRegister dst, XMMRegister src) { Assembler::movdqu(dst, src); } | |
896 void movdqu(XMMRegister dst, AddressLiteral src); | |
897 | |
898 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } | |
899 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } | |
900 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } | |
901 void movsd(XMMRegister dst, AddressLiteral src); | |
902 | |
903 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } | |
904 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } | |
905 void mulsd(XMMRegister dst, AddressLiteral src); | |
906 | |
907 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } | |
908 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } | |
909 void mulss(XMMRegister dst, AddressLiteral src); | |
910 | |
911 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } | |
912 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } | |
913 void sqrtsd(XMMRegister dst, AddressLiteral src); | |
914 | |
915 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } | |
916 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } | |
917 void sqrtss(XMMRegister dst, AddressLiteral src); | |
918 | |
919 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } | |
920 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } | |
921 void subsd(XMMRegister dst, AddressLiteral src); | |
922 | |
923 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } | |
924 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } | |
925 void subss(XMMRegister dst, AddressLiteral src); | |
926 | |
927 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } | |
928 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } | |
929 void ucomiss(XMMRegister dst, AddressLiteral src); | |
930 | |
931 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } | |
932 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } | |
933 void ucomisd(XMMRegister dst, AddressLiteral src); | |
934 | |
935 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values | |
936 void xorpd(XMMRegister dst, XMMRegister src) { Assembler::xorpd(dst, src); } | |
937 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } | |
938 void xorpd(XMMRegister dst, AddressLiteral src); | |
939 | |
940 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values | |
941 void xorps(XMMRegister dst, XMMRegister src) { Assembler::xorps(dst, src); } | |
942 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } | |
943 void xorps(XMMRegister dst, AddressLiteral src); | |
944 | |
945 // Shuffle Bytes | |
946 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } | |
947 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } | |
948 void pshufb(XMMRegister dst, AddressLiteral src); | |
949 // AVX 3-operands instructions | |
950 | |
951 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } | |
952 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } | |
953 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); | |
954 | |
955 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } | |
956 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } | |
957 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); | |
958 | |
959 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); } | |
960 void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); } | |
961 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256); | |
962 | |
963 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); } | |
964 void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); } | |
965 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256); | |
966 | |
967 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } | |
968 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } | |
969 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); | |
970 | |
971 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } | |
972 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } | |
973 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src); | |
974 | |
975 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } | |
976 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } | |
977 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); | |
978 | |
979 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } | |
980 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } | |
981 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src); | |
982 | |
983 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } | |
984 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } | |
985 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); | |
986 | |
987 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } | |
988 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } | |
989 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src); | |
990 | |
991 // AVX Vector instructions | |
992 | |
993 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); } | |
994 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); } | |
995 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256); | |
996 | |
997 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); } | |
998 void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); } | |
999 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256); | |
1000 | |
1001 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { | |
1002 if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2 | |
1003 Assembler::vpxor(dst, nds, src, vector256); | |
1004 else | |
1005 Assembler::vxorpd(dst, nds, src, vector256); | |
1006 } | |
1007 void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { | |
1008 if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2 | |
1009 Assembler::vpxor(dst, nds, src, vector256); | |
1010 else | |
1011 Assembler::vxorpd(dst, nds, src, vector256); | |
1012 } | |
1013 | |
7477
038dd2875b94
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
7474
diff
changeset
|
1014 // Simple version for AVX2 256bit vectors |
038dd2875b94
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
7474
diff
changeset
|
1015 void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); } |
038dd2875b94
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
7474
diff
changeset
|
1016 void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); } |
038dd2875b94
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
7474
diff
changeset
|
1017 |
7199 | 1018 // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector. |
1019 void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { | |
1020 if (UseAVX > 1) // vinserti128h is available only in AVX2 | |
1021 Assembler::vinserti128h(dst, nds, src); | |
1022 else | |
1023 Assembler::vinsertf128h(dst, nds, src); | |
1024 } | |
1025 | |
1026 // Data | |
1027 | |
1028 void cmov32( Condition cc, Register dst, Address src); | |
1029 void cmov32( Condition cc, Register dst, Register src); | |
1030 | |
1031 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } | |
1032 | |
1033 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } | |
1034 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } | |
1035 | |
1036 void movoop(Register dst, jobject obj); | |
1037 void movoop(Address dst, jobject obj); | |
1038 | |
1039 void mov_metadata(Register dst, Metadata* obj); | |
1040 void mov_metadata(Address dst, Metadata* obj); | |
1041 | |
1042 void movptr(ArrayAddress dst, Register src); | |
1043 // can this do an lea? | |
1044 void movptr(Register dst, ArrayAddress src); | |
1045 | |
1046 void movptr(Register dst, Address src); | |
1047 | |
1048 void movptr(Register dst, AddressLiteral src); | |
1049 | |
1050 void movptr(Register dst, intptr_t src); | |
1051 void movptr(Register dst, Register src); | |
1052 void movptr(Address dst, intptr_t src); | |
1053 | |
1054 void movptr(Address dst, Register src); | |
1055 | |
1056 void movptr(Register dst, RegisterOrConstant src) { | |
1057 if (src.is_constant()) movptr(dst, src.as_constant()); | |
1058 else movptr(dst, src.as_register()); | |
1059 } | |
1060 | |
1061 #ifdef _LP64 | |
1062 // Generally the next two are only used for moving NULL | |
1063 // Although there are situations in initializing the mark word where | |
1064 // they could be used. They are dangerous. | |
1065 | |
1066 // They only exist on LP64 so that int32_t and intptr_t are not the same | |
1067 // and we have ambiguous declarations. | |
1068 | |
1069 void movptr(Address dst, int32_t imm32); | |
1070 void movptr(Register dst, int32_t imm32); | |
1071 #endif // _LP64 | |
1072 | |
1073 // to avoid hiding movl | |
1074 void mov32(AddressLiteral dst, Register src); | |
1075 void mov32(Register dst, AddressLiteral src); | |
1076 | |
1077 // to avoid hiding movb | |
1078 void movbyte(ArrayAddress dst, int src); | |
1079 | |
1080 // Import other mov() methods from the parent class or else | |
1081 // they will be hidden by the following overriding declaration. | |
1082 using Assembler::movdl; | |
1083 using Assembler::movq; | |
1084 void movdl(XMMRegister dst, AddressLiteral src); | |
1085 void movq(XMMRegister dst, AddressLiteral src); | |
1086 | |
1087 // Can push value or effective address | |
1088 void pushptr(AddressLiteral src); | |
1089 | |
1090 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } | |
1091 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } | |
1092 | |
1093 void pushoop(jobject obj); | |
1094 void pushklass(Metadata* obj); | |
1095 | |
1096 // sign extend as need a l to ptr sized element | |
1097 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } | |
1098 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } | |
1099 | |
1100 // C2 compiled method's prolog code. | |
1101 void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b); | |
1102 | |
7474
00af3a3a8df4
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
7420
diff
changeset
|
1103 // clear memory of size 'cnt' qwords, starting at 'base'. |
00af3a3a8df4
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
7420
diff
changeset
|
1104 void clear_mem(Register base, Register cnt, Register rtmp); |
00af3a3a8df4
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
7420
diff
changeset
|
1105 |
7199 | 1106 // IndexOf strings. |
1107 // Small strings are loaded through stack if they cross page boundary. | |
1108 void string_indexof(Register str1, Register str2, | |
1109 Register cnt1, Register cnt2, | |
1110 int int_cnt2, Register result, | |
1111 XMMRegister vec, Register tmp); | |
1112 | |
1113 // IndexOf for constant substrings with size >= 8 elements | |
1114 // which don't need to be loaded through stack. | |
1115 void string_indexofC8(Register str1, Register str2, | |
1116 Register cnt1, Register cnt2, | |
1117 int int_cnt2, Register result, | |
1118 XMMRegister vec, Register tmp); | |
1119 | |
1120 // Smallest code: we don't need to load through stack, | |
1121 // check string tail. | |
1122 | |
1123 // Compare strings. | |
1124 void string_compare(Register str1, Register str2, | |
1125 Register cnt1, Register cnt2, Register result, | |
1126 XMMRegister vec1); | |
1127 | |
1128 // Compare char[] arrays. | |
1129 void char_arrays_equals(bool is_array_equ, Register ary1, Register ary2, | |
1130 Register limit, Register result, Register chr, | |
1131 XMMRegister vec1, XMMRegister vec2); | |
1132 | |
1133 // Fill primitive arrays | |
1134 void generate_fill(BasicType t, bool aligned, | |
1135 Register to, Register value, Register count, | |
1136 Register rtmp, XMMRegister xtmp); | |
1137 | |
7637
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
7477
diff
changeset
|
1138 void encode_iso_array(Register src, Register dst, Register len, |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
7477
diff
changeset
|
1139 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
7477
diff
changeset
|
1140 XMMRegister tmp4, Register tmp5, Register result); |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
7477
diff
changeset
|
1141 |
7199 | 1142 #undef VIRTUAL |
1143 | |
1144 }; | |
1145 | |
1146 /** | |
1147 * class SkipIfEqual: | |
1148 * | |
1149 * Instantiating this class will result in assembly code being output that will | |
1150 * jump around any code emitted between the creation of the instance and it's | |
1151 * automatic destruction at the end of a scope block, depending on the value of | |
1152 * the flag passed to the constructor, which will be checked at run-time. | |
1153 */ | |
1154 class SkipIfEqual { | |
1155 private: | |
1156 MacroAssembler* _masm; | |
1157 Label _label; | |
1158 | |
1159 public: | |
1160 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); | |
1161 ~SkipIfEqual(); | |
1162 }; | |
1163 | |
1164 #endif // CPU_X86_VM_MACROASSEMBLER_X86_HPP |