Mercurial > hg > truffle
annotate src/cpu/x86/vm/nativeInst_x86.hpp @ 15847:9e172511971d
make FixedNodeProbabilityCache behave better in the presence of dead code
author | Lukas Stadler <lukas.stadler@oracle.com> |
---|---|
date | Thu, 22 May 2014 14:04:55 +0200 |
parents | 5335d65fec56 |
children | 0dac22d266d8 |
rev | line source |
---|---|
0 | 1 /* |
2404
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
314
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
314
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
314
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef CPU_X86_VM_NATIVEINST_X86_HPP |
26 #define CPU_X86_VM_NATIVEINST_X86_HPP | |
27 | |
28 #include "asm/assembler.hpp" | |
29 #include "memory/allocation.hpp" | |
30 #include "runtime/icache.hpp" | |
31 #include "runtime/os.hpp" | |
32 #include "utilities/top.hpp" | |
33 | |
0 | 34 // We have interfaces for the following instructions: |
35 // - NativeInstruction | |
36 // - - NativeCall | |
37 // - - NativeMovConstReg | |
38 // - - NativeMovConstRegPatching | |
39 // - - NativeMovRegMem | |
40 // - - NativeMovRegMemPatching | |
41 // - - NativeJump | |
42 // - - NativeIllegalOpCode | |
43 // - - NativeGeneralJump | |
44 // - - NativeReturn | |
45 // - - NativeReturnX (return with argument) | |
46 // - - NativePushConst | |
47 // - - NativeTstRegMem | |
48 | |
49 // The base class for different kinds of native instruction abstractions. | |
50 // Provides the primitive operations to manipulate code relative to this. | |
51 | |
52 class NativeInstruction VALUE_OBJ_CLASS_SPEC { | |
53 friend class Relocation; | |
54 | |
55 public: | |
56 enum Intel_specific_constants { | |
57 nop_instruction_code = 0x90, | |
58 nop_instruction_size = 1 | |
59 }; | |
60 | |
61 bool is_nop() { return ubyte_at(0) == nop_instruction_code; } | |
116
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
0
diff
changeset
|
62 bool is_dtrace_trap(); |
0 | 63 inline bool is_call(); |
5000
b5dc2403c1e7
add option to inline VTable stubs
Lukas Stadler <lukas.stadler@jku.at>
parents:
4970
diff
changeset
|
64 inline bool is_call_reg(); |
0 | 65 inline bool is_illegal(); |
66 inline bool is_return(); | |
67 inline bool is_jump(); | |
68 inline bool is_cond_jump(); | |
69 inline bool is_safepoint_poll(); | |
70 inline bool is_mov_literal64(); | |
71 | |
72 protected: | |
73 address addr_at(int offset) const { return address(this) + offset; } | |
74 | |
75 s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); } | |
76 u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); } | |
77 | |
78 jint int_at(int offset) const { return *(jint*) addr_at(offset); } | |
79 | |
80 intptr_t ptr_at(int offset) const { return *(intptr_t*) addr_at(offset); } | |
81 | |
82 oop oop_at (int offset) const { return *(oop*) addr_at(offset); } | |
83 | |
84 | |
85 void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; wrote(offset); } | |
86 void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; wrote(offset); } | |
87 void set_ptr_at (int offset, intptr_t ptr) { *(intptr_t*) addr_at(offset) = ptr; wrote(offset); } | |
88 void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; wrote(offset); } | |
89 | |
90 // This doesn't really do anything on Intel, but it is the place where | |
91 // cache invalidation belongs, generically: | |
92 void wrote(int offset); | |
93 | |
94 public: | |
95 | |
96 // unit test stuff | |
97 static void test() {} // override for testing | |
98 | |
99 inline friend NativeInstruction* nativeInstruction_at(address address); | |
100 }; | |
101 | |
102 inline NativeInstruction* nativeInstruction_at(address address) { | |
103 NativeInstruction* inst = (NativeInstruction*)address; | |
104 #ifdef ASSERT | |
105 //inst->verify(); | |
106 #endif | |
107 return inst; | |
108 } | |
109 | |
110 inline NativeCall* nativeCall_at(address address); | |
111 // The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off | |
112 // instructions (used to manipulate inline caches, primitive & dll calls, etc.). | |
113 | |
114 class NativeCall: public NativeInstruction { | |
115 public: | |
116 enum Intel_specific_constants { | |
117 instruction_code = 0xE8, | |
118 instruction_size = 5, | |
119 instruction_offset = 0, | |
120 displacement_offset = 1, | |
121 return_address_offset = 5 | |
122 }; | |
123 | |
124 enum { cache_line_size = BytesPerWord }; // conservative estimate! | |
125 | |
126 address instruction_address() const { return addr_at(instruction_offset); } | |
127 address next_instruction_address() const { return addr_at(return_address_offset); } | |
128 int displacement() const { return (jint) int_at(displacement_offset); } | |
129 address displacement_address() const { return addr_at(displacement_offset); } | |
130 address return_address() const { return addr_at(return_address_offset); } | |
131 address destination() const; | |
132 void set_destination(address dest) { | |
133 #ifdef AMD64 | |
134 assert((labs((intptr_t) dest - (intptr_t) return_address()) & | |
135 0xFFFFFFFF00000000) == 0, | |
136 "must be 32bit offset"); | |
137 #endif // AMD64 | |
138 set_int_at(displacement_offset, dest - return_address()); | |
139 } | |
140 void set_destination_mt_safe(address dest); | |
141 | |
142 void verify_alignment() { assert((intptr_t)addr_at(displacement_offset) % BytesPerInt == 0, "must be aligned"); } | |
143 void verify(); | |
144 void print(); | |
145 | |
146 // Creation | |
147 inline friend NativeCall* nativeCall_at(address address); | |
148 inline friend NativeCall* nativeCall_before(address return_address); | |
149 | |
150 static bool is_call_at(address instr) { | |
151 return ((*instr) & 0xFF) == NativeCall::instruction_code; | |
152 } | |
153 | |
154 static bool is_call_before(address return_address) { | |
155 return is_call_at(return_address - NativeCall::return_address_offset); | |
156 } | |
157 | |
158 static bool is_call_to(address instr, address target) { | |
159 return nativeInstruction_at(instr)->is_call() && | |
160 nativeCall_at(instr)->destination() == target; | |
161 } | |
162 | |
163 // MT-safe patching of a call instruction. | |
164 static void insert(address code_pos, address entry); | |
165 | |
166 static void replace_mt_safe(address instr_addr, address code_buffer); | |
167 }; | |
168 | |
169 inline NativeCall* nativeCall_at(address address) { | |
170 NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset); | |
171 #ifdef ASSERT | |
172 call->verify(); | |
173 #endif | |
174 return call; | |
175 } | |
176 | |
177 inline NativeCall* nativeCall_before(address return_address) { | |
178 NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset); | |
179 #ifdef ASSERT | |
180 call->verify(); | |
181 #endif | |
182 return call; | |
183 } | |
184 | |
5000
b5dc2403c1e7
add option to inline VTable stubs
Lukas Stadler <lukas.stadler@jku.at>
parents:
4970
diff
changeset
|
185 class NativeCallReg: public NativeInstruction { |
b5dc2403c1e7
add option to inline VTable stubs
Lukas Stadler <lukas.stadler@jku.at>
parents:
4970
diff
changeset
|
186 public: |
b5dc2403c1e7
add option to inline VTable stubs
Lukas Stadler <lukas.stadler@jku.at>
parents:
4970
diff
changeset
|
187 enum Intel_specific_constants { |
b5dc2403c1e7
add option to inline VTable stubs
Lukas Stadler <lukas.stadler@jku.at>
parents:
4970
diff
changeset
|
188 instruction_code = 0xFF, |
b5dc2403c1e7
add option to inline VTable stubs
Lukas Stadler <lukas.stadler@jku.at>
parents:
4970
diff
changeset
|
189 instruction_offset = 0, |
5840
f565e8d4d200
parsing and patching of variable sized NativeCallReg instructions now works properly
Doug Simon <doug.simon@oracle.com>
parents:
5000
diff
changeset
|
190 return_address_offset_norex = 2, |
f565e8d4d200
parsing and patching of variable sized NativeCallReg instructions now works properly
Doug Simon <doug.simon@oracle.com>
parents:
5000
diff
changeset
|
191 return_address_offset_rex = 3 |
5000
b5dc2403c1e7
add option to inline VTable stubs
Lukas Stadler <lukas.stadler@jku.at>
parents:
4970
diff
changeset
|
192 }; |
b5dc2403c1e7
add option to inline VTable stubs
Lukas Stadler <lukas.stadler@jku.at>
parents:
4970
diff
changeset
|
193 |
5840
f565e8d4d200
parsing and patching of variable sized NativeCallReg instructions now works properly
Doug Simon <doug.simon@oracle.com>
parents:
5000
diff
changeset
|
194 int next_instruction_offset() const { |
f565e8d4d200
parsing and patching of variable sized NativeCallReg instructions now works properly
Doug Simon <doug.simon@oracle.com>
parents:
5000
diff
changeset
|
195 if (ubyte_at(0) == NativeCallReg::instruction_code) { |
f565e8d4d200
parsing and patching of variable sized NativeCallReg instructions now works properly
Doug Simon <doug.simon@oracle.com>
parents:
5000
diff
changeset
|
196 return return_address_offset_norex; |
f565e8d4d200
parsing and patching of variable sized NativeCallReg instructions now works properly
Doug Simon <doug.simon@oracle.com>
parents:
5000
diff
changeset
|
197 } else { |
f565e8d4d200
parsing and patching of variable sized NativeCallReg instructions now works properly
Doug Simon <doug.simon@oracle.com>
parents:
5000
diff
changeset
|
198 return return_address_offset_rex; |
f565e8d4d200
parsing and patching of variable sized NativeCallReg instructions now works properly
Doug Simon <doug.simon@oracle.com>
parents:
5000
diff
changeset
|
199 } |
5000
b5dc2403c1e7
add option to inline VTable stubs
Lukas Stadler <lukas.stadler@jku.at>
parents:
4970
diff
changeset
|
200 } |
b5dc2403c1e7
add option to inline VTable stubs
Lukas Stadler <lukas.stadler@jku.at>
parents:
4970
diff
changeset
|
201 }; |
b5dc2403c1e7
add option to inline VTable stubs
Lukas Stadler <lukas.stadler@jku.at>
parents:
4970
diff
changeset
|
202 |
0 | 203 // An interface for accessing/manipulating native mov reg, imm32 instructions. |
204 // (used to manipulate inlined 32bit data dll calls, etc.) | |
205 class NativeMovConstReg: public NativeInstruction { | |
206 #ifdef AMD64 | |
207 static const bool has_rex = true; | |
208 static const int rex_size = 1; | |
209 #else | |
210 static const bool has_rex = false; | |
211 static const int rex_size = 0; | |
212 #endif // AMD64 | |
213 public: | |
214 enum Intel_specific_constants { | |
215 instruction_code = 0xB8, | |
216 instruction_size = 1 + rex_size + wordSize, | |
217 instruction_offset = 0, | |
218 data_offset = 1 + rex_size, | |
219 next_instruction_offset = instruction_size, | |
220 register_mask = 0x07 | |
221 }; | |
222 | |
223 address instruction_address() const { return addr_at(instruction_offset); } | |
224 address next_instruction_address() const { return addr_at(next_instruction_offset); } | |
225 intptr_t data() const { return ptr_at(data_offset); } | |
226 void set_data(intptr_t x) { set_ptr_at(data_offset, x); } | |
227 | |
228 void verify(); | |
229 void print(); | |
230 | |
231 // unit test stuff | |
232 static void test() {} | |
233 | |
234 // Creation | |
235 inline friend NativeMovConstReg* nativeMovConstReg_at(address address); | |
236 inline friend NativeMovConstReg* nativeMovConstReg_before(address address); | |
237 }; | |
238 | |
239 inline NativeMovConstReg* nativeMovConstReg_at(address address) { | |
240 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset); | |
241 #ifdef ASSERT | |
242 test->verify(); | |
243 #endif | |
244 return test; | |
245 } | |
246 | |
247 inline NativeMovConstReg* nativeMovConstReg_before(address address) { | |
248 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset); | |
249 #ifdef ASSERT | |
250 test->verify(); | |
251 #endif | |
252 return test; | |
253 } | |
254 | |
255 class NativeMovConstRegPatching: public NativeMovConstReg { | |
256 private: | |
257 friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) { | |
258 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset); | |
259 #ifdef ASSERT | |
260 test->verify(); | |
261 #endif | |
262 return test; | |
263 } | |
264 }; | |
265 | |
266 // An interface for accessing/manipulating native moves of the form: | |
304 | 267 // mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem) |
268 // mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg | |
269 // mov[s/z]x[w/b/q] [reg + offset], reg | |
0 | 270 // fld_s [reg+offset] |
271 // fld_d [reg+offset] | |
272 // fstp_s [reg + offset] | |
273 // fstp_d [reg + offset] | |
304 | 274 // mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch) |
0 | 275 // |
276 // Warning: These routines must be able to handle any instruction sequences | |
277 // that are generated as a result of the load/store byte,word,long | |
278 // macros. For example: The load_unsigned_byte instruction generates | |
279 // an xor reg,reg inst prior to generating the movb instruction. This | |
280 // class must skip the xor instruction. | |
281 | |
282 class NativeMovRegMem: public NativeInstruction { | |
283 public: | |
284 enum Intel_specific_constants { | |
304 | 285 instruction_prefix_wide_lo = Assembler::REX, |
286 instruction_prefix_wide_hi = Assembler::REX_WRXB, | |
0 | 287 instruction_code_xor = 0x33, |
288 instruction_extended_prefix = 0x0F, | |
304 | 289 instruction_code_mem2reg_movslq = 0x63, |
0 | 290 instruction_code_mem2reg_movzxb = 0xB6, |
291 instruction_code_mem2reg_movsxb = 0xBE, | |
292 instruction_code_mem2reg_movzxw = 0xB7, | |
293 instruction_code_mem2reg_movsxw = 0xBF, | |
294 instruction_operandsize_prefix = 0x66, | |
304 | 295 instruction_code_reg2mem = 0x89, |
296 instruction_code_mem2reg = 0x8b, | |
0 | 297 instruction_code_reg2memb = 0x88, |
298 instruction_code_mem2regb = 0x8a, | |
299 instruction_code_float_s = 0xd9, | |
300 instruction_code_float_d = 0xdd, | |
301 instruction_code_long_volatile = 0xdf, | |
302 instruction_code_xmm_ss_prefix = 0xf3, | |
303 instruction_code_xmm_sd_prefix = 0xf2, | |
304 instruction_code_xmm_code = 0x0f, | |
305 instruction_code_xmm_load = 0x10, | |
306 instruction_code_xmm_store = 0x11, | |
307 instruction_code_xmm_lpd = 0x12, | |
308 | |
4759 | 309 instruction_VEX_prefix_2bytes = Assembler::VEX_2bytes, |
310 instruction_VEX_prefix_3bytes = Assembler::VEX_3bytes, | |
311 | |
0 | 312 instruction_size = 4, |
313 instruction_offset = 0, | |
314 data_offset = 2, | |
315 next_instruction_offset = 4 | |
316 }; | |
317 | |
304 | 318 // helper |
319 int instruction_start() const; | |
320 | |
321 address instruction_address() const; | |
0 | 322 |
304 | 323 address next_instruction_address() const; |
0 | 324 |
304 | 325 int offset() const; |
326 | |
327 void set_offset(int x); | |
0 | 328 |
329 void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); } | |
330 | |
331 void verify(); | |
332 void print (); | |
333 | |
334 // unit test stuff | |
335 static void test() {} | |
336 | |
337 private: | |
338 inline friend NativeMovRegMem* nativeMovRegMem_at (address address); | |
339 }; | |
340 | |
341 inline NativeMovRegMem* nativeMovRegMem_at (address address) { | |
342 NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset); | |
343 #ifdef ASSERT | |
344 test->verify(); | |
345 #endif | |
346 return test; | |
347 } | |
348 | |
349 class NativeMovRegMemPatching: public NativeMovRegMem { | |
350 private: | |
351 friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) { | |
352 NativeMovRegMemPatching* test = (NativeMovRegMemPatching*)(address - instruction_offset); | |
353 #ifdef ASSERT | |
354 test->verify(); | |
355 #endif | |
356 return test; | |
357 } | |
358 }; | |
359 | |
360 | |
361 | |
362 // An interface for accessing/manipulating native leal instruction of form: | |
363 // leal reg, [reg + offset] | |
364 | |
365 class NativeLoadAddress: public NativeMovRegMem { | |
304 | 366 #ifdef AMD64 |
367 static const bool has_rex = true; | |
368 static const int rex_size = 1; | |
369 #else | |
370 static const bool has_rex = false; | |
371 static const int rex_size = 0; | |
372 #endif // AMD64 | |
0 | 373 public: |
374 enum Intel_specific_constants { | |
304 | 375 instruction_prefix_wide = Assembler::REX_W, |
376 instruction_prefix_wide_extended = Assembler::REX_WB, | |
377 lea_instruction_code = 0x8D, | |
378 mov64_instruction_code = 0xB8 | |
0 | 379 }; |
380 | |
381 void verify(); | |
382 void print (); | |
383 | |
384 // unit test stuff | |
385 static void test() {} | |
386 | |
387 private: | |
388 friend NativeLoadAddress* nativeLoadAddress_at (address address) { | |
389 NativeLoadAddress* test = (NativeLoadAddress*)(address - instruction_offset); | |
390 #ifdef ASSERT | |
391 test->verify(); | |
392 #endif | |
393 return test; | |
394 } | |
395 }; | |
396 | |
397 // jump rel32off | |
398 | |
399 class NativeJump: public NativeInstruction { | |
400 public: | |
401 enum Intel_specific_constants { | |
402 instruction_code = 0xe9, | |
403 instruction_size = 5, | |
404 instruction_offset = 0, | |
405 data_offset = 1, | |
406 next_instruction_offset = 5 | |
407 }; | |
408 | |
409 address instruction_address() const { return addr_at(instruction_offset); } | |
410 address next_instruction_address() const { return addr_at(next_instruction_offset); } | |
411 address jump_destination() const { | |
412 address dest = (int_at(data_offset)+next_instruction_address()); | |
304 | 413 // 32bit used to encode unresolved jmp as jmp -1 |
414 // 64bit can't produce this so it used jump to self. | |
415 // Now 32bit and 64bit use jump to self as the unresolved address | |
416 // which the inline cache code (and relocs) know about | |
417 | |
0 | 418 // return -1 if jump to self |
419 dest = (dest == (address) this) ? (address) -1 : dest; | |
420 return dest; | |
421 } | |
422 | |
423 void set_jump_destination(address dest) { | |
424 intptr_t val = dest - next_instruction_address(); | |
314
3a26e9e4be71
6744422: incorrect handling of -1 in set_jump_destination
never
parents:
304
diff
changeset
|
425 if (dest == (address) -1) { |
3a26e9e4be71
6744422: incorrect handling of -1 in set_jump_destination
never
parents:
304
diff
changeset
|
426 val = -5; // jump to self |
3a26e9e4be71
6744422: incorrect handling of -1 in set_jump_destination
never
parents:
304
diff
changeset
|
427 } |
0 | 428 #ifdef AMD64 |
304 | 429 assert((labs(val) & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1"); |
0 | 430 #endif // AMD64 |
431 set_int_at(data_offset, (jint)val); | |
432 } | |
433 | |
434 // Creation | |
435 inline friend NativeJump* nativeJump_at(address address); | |
436 | |
437 void verify(); | |
438 | |
439 // Unit testing stuff | |
440 static void test() {} | |
441 | |
442 // Insertion of native jump instruction | |
443 static void insert(address code_pos, address entry); | |
444 // MT-safe insertion of native jump at verified method entry | |
445 static void check_verified_entry_alignment(address entry, address verified_entry); | |
446 static void patch_verified_entry(address entry, address verified_entry, address dest); | |
447 }; | |
448 | |
449 inline NativeJump* nativeJump_at(address address) { | |
450 NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset); | |
451 #ifdef ASSERT | |
452 jump->verify(); | |
453 #endif | |
454 return jump; | |
455 } | |
456 | |
457 // Handles all kinds of jump on Intel. Long/far, conditional/unconditional | |
458 class NativeGeneralJump: public NativeInstruction { | |
459 public: | |
460 enum Intel_specific_constants { | |
461 // Constants does not apply, since the lengths and offsets depends on the actual jump | |
462 // used | |
463 // Instruction codes: | |
464 // Unconditional jumps: 0xE9 (rel32off), 0xEB (rel8off) | |
465 // Conditional jumps: 0x0F8x (rel32off), 0x7x (rel8off) | |
466 unconditional_long_jump = 0xe9, | |
467 unconditional_short_jump = 0xeb, | |
468 instruction_size = 5 | |
469 }; | |
470 | |
471 address instruction_address() const { return addr_at(0); } | |
472 address jump_destination() const; | |
473 | |
474 // Creation | |
475 inline friend NativeGeneralJump* nativeGeneralJump_at(address address); | |
476 | |
477 // Insertion of native general jump instruction | |
478 static void insert_unconditional(address code_pos, address entry); | |
479 static void replace_mt_safe(address instr_addr, address code_buffer); | |
480 | |
481 void verify(); | |
482 }; | |
483 | |
484 inline NativeGeneralJump* nativeGeneralJump_at(address address) { | |
485 NativeGeneralJump* jump = (NativeGeneralJump*)(address); | |
486 debug_only(jump->verify();) | |
487 return jump; | |
488 } | |
489 | |
490 class NativePopReg : public NativeInstruction { | |
491 public: | |
492 enum Intel_specific_constants { | |
493 instruction_code = 0x58, | |
494 instruction_size = 1, | |
495 instruction_offset = 0, | |
496 data_offset = 1, | |
497 next_instruction_offset = 1 | |
498 }; | |
499 | |
500 // Insert a pop instruction | |
501 static void insert(address code_pos, Register reg); | |
502 }; | |
503 | |
504 | |
505 class NativeIllegalInstruction: public NativeInstruction { | |
506 public: | |
507 enum Intel_specific_constants { | |
508 instruction_code = 0x0B0F, // Real byte order is: 0x0F, 0x0B | |
509 instruction_size = 2, | |
510 instruction_offset = 0, | |
511 next_instruction_offset = 2 | |
512 }; | |
513 | |
514 // Insert illegal opcode as specific address | |
515 static void insert(address code_pos); | |
516 }; | |
517 | |
518 // return instruction that does not pop values of the stack | |
519 class NativeReturn: public NativeInstruction { | |
520 public: | |
521 enum Intel_specific_constants { | |
522 instruction_code = 0xC3, | |
523 instruction_size = 1, | |
524 instruction_offset = 0, | |
525 next_instruction_offset = 1 | |
526 }; | |
527 }; | |
528 | |
529 // return instruction that does pop values of the stack | |
530 class NativeReturnX: public NativeInstruction { | |
531 public: | |
532 enum Intel_specific_constants { | |
533 instruction_code = 0xC2, | |
534 instruction_size = 2, | |
535 instruction_offset = 0, | |
536 next_instruction_offset = 2 | |
537 }; | |
538 }; | |
539 | |
540 // Simple test vs memory | |
541 class NativeTstRegMem: public NativeInstruction { | |
542 public: | |
543 enum Intel_specific_constants { | |
2404
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
544 instruction_rex_prefix_mask = 0xF0, |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
545 instruction_rex_prefix = Assembler::REX, |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
546 instruction_code_memXregl = 0x85, |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
547 modrm_mask = 0x38, // select reg from the ModRM byte |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
548 modrm_reg = 0x00 // rax |
0 | 549 }; |
550 }; | |
551 | |
552 inline bool NativeInstruction::is_illegal() { return (short)int_at(0) == (short)NativeIllegalInstruction::instruction_code; } | |
553 inline bool NativeInstruction::is_call() { return ubyte_at(0) == NativeCall::instruction_code; } | |
5840
f565e8d4d200
parsing and patching of variable sized NativeCallReg instructions now works properly
Doug Simon <doug.simon@oracle.com>
parents:
5000
diff
changeset
|
554 inline bool NativeInstruction::is_call_reg() { return ubyte_at(0) == NativeCallReg::instruction_code || |
f565e8d4d200
parsing and patching of variable sized NativeCallReg instructions now works properly
Doug Simon <doug.simon@oracle.com>
parents:
5000
diff
changeset
|
555 (ubyte_at(1) == NativeCallReg::instruction_code && |
f565e8d4d200
parsing and patching of variable sized NativeCallReg instructions now works properly
Doug Simon <doug.simon@oracle.com>
parents:
5000
diff
changeset
|
556 (ubyte_at(0) == Assembler::REX || ubyte_at(0) == Assembler::REX_B)); } |
0 | 557 inline bool NativeInstruction::is_return() { return ubyte_at(0) == NativeReturn::instruction_code || |
558 ubyte_at(0) == NativeReturnX::instruction_code; } | |
559 inline bool NativeInstruction::is_jump() { return ubyte_at(0) == NativeJump::instruction_code || | |
560 ubyte_at(0) == 0xEB; /* short jump */ } | |
561 inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ || | |
562 (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ } | |
563 inline bool NativeInstruction::is_safepoint_poll() { | |
564 #ifdef AMD64 | |
2404
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
565 if (Assembler::is_polling_page_far()) { |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
566 // two cases, depending on the choice of the base register in the address. |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
567 if (((ubyte_at(0) & NativeTstRegMem::instruction_rex_prefix_mask) == NativeTstRegMem::instruction_rex_prefix && |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
568 ubyte_at(1) == NativeTstRegMem::instruction_code_memXregl && |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
569 (ubyte_at(2) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) || |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
570 ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
571 (ubyte_at(1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) { |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
572 return true; |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
573 } else { |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
574 return false; |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
575 } |
304 | 576 } else { |
8151
b8f261ba79c6
Minimize diff to plain HotSpot version.
Thomas Wuerthinger <thomas.wuerthinger@oracle.com>
parents:
7943
diff
changeset
|
577 if (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && |
b8f261ba79c6
Minimize diff to plain HotSpot version.
Thomas Wuerthinger <thomas.wuerthinger@oracle.com>
parents:
7943
diff
changeset
|
578 ubyte_at(1) == 0x05) { // 00 rax 101 |
2404
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
579 address fault = addr_at(6) + int_at(2); |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
580 return os::is_poll_address(fault); |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
581 } else { |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
582 return false; |
b40d4fa697bf
6964776: c2 should ensure the polling page is reachable on 64 bit
iveresov
parents:
1972
diff
changeset
|
583 } |
304 | 584 } |
0 | 585 #else |
304 | 586 return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2reg || |
0 | 587 ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl ) && |
588 (ubyte_at(1)&0xC7) == 0x05 && /* Mod R/M == disp32 */ | |
589 (os::is_poll_address((address)int_at(2))); | |
590 #endif // AMD64 | |
591 } | |
592 | |
593 inline bool NativeInstruction::is_mov_literal64() { | |
594 #ifdef AMD64 | |
595 return ((ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB) && | |
596 (ubyte_at(1) & (0xff ^ NativeMovConstReg::register_mask)) == 0xB8); | |
597 #else | |
598 return false; | |
599 #endif // AMD64 | |
600 } | |
1972 | 601 |
602 #endif // CPU_X86_VM_NATIVEINST_X86_HPP |