Mercurial > hg > truffle
annotate src/cpu/x86/vm/nativeInst_x86.cpp @ 18357:09550eb6ddfb
replace use of '==' with .equals()
author | Doug Simon <doug.simon@oracle.com> |
---|---|
date | Wed, 12 Nov 2014 15:33:20 +0100 |
parents | 52b4284cb496 |
children | be896a1983c0 |
rev | line source |
---|---|
0 | 1 /* |
17937
78bbf4d43a14
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
7199
diff
changeset
|
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
304
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
304
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
304
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
7199
cd3d6a6b95d9
8003240: x86: move MacroAssembler into separate file
twisti
parents:
4759
diff
changeset
|
26 #include "asm/macroAssembler.hpp" |
1972 | 27 #include "memory/resourceArea.hpp" |
28 #include "nativeInst_x86.hpp" | |
29 #include "oops/oop.inline.hpp" | |
30 #include "runtime/handles.hpp" | |
31 #include "runtime/sharedRuntime.hpp" | |
32 #include "runtime/stubRoutines.hpp" | |
33 #include "utilities/ostream.hpp" | |
34 #ifdef COMPILER1 | |
35 #include "c1/c1_Runtime1.hpp" | |
36 #endif | |
0 | 37 |
17937
78bbf4d43a14
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
7199
diff
changeset
|
38 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
78bbf4d43a14
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
7199
diff
changeset
|
39 |
0 | 40 void NativeInstruction::wrote(int offset) { |
41 ICache::invalidate_word(addr_at(offset)); | |
42 } | |
43 | |
44 | |
45 void NativeCall::verify() { | |
46 // Make sure code pattern is actually a call imm32 instruction. | |
47 int inst = ubyte_at(0); | |
48 if (inst != instruction_code) { | |
49 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", instruction_address(), | |
50 inst); | |
51 fatal("not a call disp32"); | |
52 } | |
53 } | |
54 | |
55 address NativeCall::destination() const { | |
56 // Getting the destination of a call isn't safe because that call can | |
57 // be getting patched while you're calling this. There's only special | |
58 // places where this can be called but not automatically verifiable by | |
59 // checking which locks are held. The solution is true atomic patching | |
60 // on x86, nyi. | |
61 return return_address() + displacement(); | |
62 } | |
63 | |
64 void NativeCall::print() { | |
65 tty->print_cr(PTR_FORMAT ": call " PTR_FORMAT, | |
66 instruction_address(), destination()); | |
67 } | |
68 | |
69 // Inserts a native call instruction at a given pc | |
70 void NativeCall::insert(address code_pos, address entry) { | |
71 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); | |
72 #ifdef AMD64 | |
73 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); | |
74 #endif // AMD64 | |
75 *code_pos = instruction_code; | |
76 *((int32_t *)(code_pos+1)) = (int32_t) disp; | |
77 ICache::invalidate_range(code_pos, instruction_size); | |
78 } | |
79 | |
80 // MT-safe patching of a call instruction. | |
81 // First patches first word of instruction to two jmp's that jmps to them | |
82 // selfs (spinlock). Then patches the last byte, and then atomicly replaces | |
83 // the jmp's with the first 4 byte of the new instruction. | |
84 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { | |
85 assert(Patching_lock->is_locked() || | |
86 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); | |
87 assert (instr_addr != NULL, "illegal address for code patching"); | |
88 | |
89 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call | |
90 if (os::is_MP()) { | |
91 guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned"); | |
92 } | |
93 | |
94 // First patch dummy jmp in place | |
95 unsigned char patch[4]; | |
96 assert(sizeof(patch)==sizeof(jint), "sanity check"); | |
97 patch[0] = 0xEB; // jmp rel8 | |
98 patch[1] = 0xFE; // jmp to self | |
99 patch[2] = 0xEB; | |
100 patch[3] = 0xFE; | |
101 | |
102 // First patch dummy jmp in place | |
103 *(jint*)instr_addr = *(jint *)patch; | |
104 | |
105 // Invalidate. Opteron requires a flush after every write. | |
106 n_call->wrote(0); | |
107 | |
108 // Patch 4th byte | |
109 instr_addr[4] = code_buffer[4]; | |
110 | |
111 n_call->wrote(4); | |
112 | |
113 // Patch bytes 0-3 | |
114 *(jint*)instr_addr = *(jint *)code_buffer; | |
115 | |
116 n_call->wrote(0); | |
117 | |
118 #ifdef ASSERT | |
119 // verify patching | |
120 for ( int i = 0; i < instruction_size; i++) { | |
121 address ptr = (address)((intptr_t)code_buffer + i); | |
122 int a_byte = (*ptr) & 0xFF; | |
123 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed"); | |
124 } | |
125 #endif | |
126 | |
127 } | |
128 | |
129 | |
130 // Similar to replace_mt_safe, but just changes the destination. The | |
131 // important thing is that free-running threads are able to execute this | |
132 // call instruction at all times. If the displacement field is aligned | |
133 // we can simply rely on atomicity of 32-bit writes to make sure other threads | |
134 // will see no intermediate states. Otherwise, the first two bytes of the | |
135 // call are guaranteed to be aligned, and can be atomically patched to a | |
136 // self-loop to guard the instruction while we change the other bytes. | |
137 | |
138 // We cannot rely on locks here, since the free-running threads must run at | |
139 // full speed. | |
140 // | |
141 // Used in the runtime linkage of calls; see class CompiledIC. | |
142 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.) | |
143 void NativeCall::set_destination_mt_safe(address dest) { | |
144 debug_only(verify()); | |
145 // Make sure patching code is locked. No two threads can patch at the same | |
146 // time but one may be executing this code. | |
147 assert(Patching_lock->is_locked() || | |
148 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); | |
149 // Both C1 and C2 should now be generating code which aligns the patched address | |
150 // to be within a single cache line except that C1 does not do the alignment on | |
151 // uniprocessor systems. | |
152 bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size == | |
153 ((uintptr_t)displacement_address() + 3) / cache_line_size; | |
154 | |
155 guarantee(!os::is_MP() || is_aligned, "destination must be aligned"); | |
156 | |
157 if (is_aligned) { | |
158 // Simple case: The destination lies within a single cache line. | |
159 set_destination(dest); | |
160 } else if ((uintptr_t)instruction_address() / cache_line_size == | |
161 ((uintptr_t)instruction_address()+1) / cache_line_size) { | |
162 // Tricky case: The instruction prefix lies within a single cache line. | |
163 intptr_t disp = dest - return_address(); | |
164 #ifdef AMD64 | |
165 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); | |
166 #endif // AMD64 | |
167 | |
168 int call_opcode = instruction_address()[0]; | |
169 | |
170 // First patch dummy jump in place: | |
171 { | |
172 u_char patch_jump[2]; | |
173 patch_jump[0] = 0xEB; // jmp rel8 | |
174 patch_jump[1] = 0xFE; // jmp to self | |
175 | |
176 assert(sizeof(patch_jump)==sizeof(short), "sanity check"); | |
177 *(short*)instruction_address() = *(short*)patch_jump; | |
178 } | |
179 // Invalidate. Opteron requires a flush after every write. | |
180 wrote(0); | |
181 | |
182 // (Note: We assume any reader which has already started to read | |
183 // the unpatched call will completely read the whole unpatched call | |
184 // without seeing the next writes we are about to make.) | |
185 | |
186 // Next, patch the last three bytes: | |
187 u_char patch_disp[5]; | |
188 patch_disp[0] = call_opcode; | |
189 *(int32_t*)&patch_disp[1] = (int32_t)disp; | |
190 assert(sizeof(patch_disp)==instruction_size, "sanity check"); | |
191 for (int i = sizeof(short); i < instruction_size; i++) | |
192 instruction_address()[i] = patch_disp[i]; | |
193 | |
194 // Invalidate. Opteron requires a flush after every write. | |
195 wrote(sizeof(short)); | |
196 | |
197 // (Note: We assume that any reader which reads the opcode we are | |
198 // about to repatch will also read the writes we just made.) | |
199 | |
200 // Finally, overwrite the jump: | |
201 *(short*)instruction_address() = *(short*)patch_disp; | |
202 // Invalidate. Opteron requires a flush after every write. | |
203 wrote(0); | |
204 | |
205 debug_only(verify()); | |
206 guarantee(destination() == dest, "patch succeeded"); | |
207 } else { | |
208 // Impossible: One or the other must be atomically writable. | |
209 ShouldNotReachHere(); | |
210 } | |
211 } | |
212 | |
213 | |
214 void NativeMovConstReg::verify() { | |
215 #ifdef AMD64 | |
216 // make sure code pattern is actually a mov reg64, imm64 instruction | |
217 if ((ubyte_at(0) != Assembler::REX_W && ubyte_at(0) != Assembler::REX_WB) || | |
218 (ubyte_at(1) & (0xff ^ register_mask)) != 0xB8) { | |
219 print(); | |
220 fatal("not a REX.W[B] mov reg64, imm64"); | |
221 } | |
222 #else | |
223 // make sure code pattern is actually a mov reg, imm32 instruction | |
224 u_char test_byte = *(u_char*)instruction_address(); | |
225 u_char test_byte_2 = test_byte & ( 0xff ^ register_mask); | |
226 if (test_byte_2 != instruction_code) fatal("not a mov reg, imm32"); | |
227 #endif // AMD64 | |
228 } | |
229 | |
230 | |
231 void NativeMovConstReg::print() { | |
232 tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT, | |
233 instruction_address(), data()); | |
234 } | |
235 | |
236 //------------------------------------------------------------------- | |
237 | |
304 | 238 int NativeMovRegMem::instruction_start() const { |
239 int off = 0; | |
240 u_char instr_0 = ubyte_at(off); | |
241 | |
4759 | 242 // See comment in Assembler::locate_operand() about VEX prefixes. |
243 if (instr_0 == instruction_VEX_prefix_2bytes) { | |
244 assert((UseAVX > 0), "shouldn't have VEX prefix"); | |
245 NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions")); | |
246 return 2; | |
247 } | |
248 if (instr_0 == instruction_VEX_prefix_3bytes) { | |
249 assert((UseAVX > 0), "shouldn't have VEX prefix"); | |
250 NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions")); | |
251 return 3; | |
252 } | |
253 | |
304 | 254 // First check to see if we have a (prefixed or not) xor |
4759 | 255 if (instr_0 >= instruction_prefix_wide_lo && // 0x40 |
256 instr_0 <= instruction_prefix_wide_hi) { // 0x4f | |
304 | 257 off++; |
258 instr_0 = ubyte_at(off); | |
259 } | |
260 | |
261 if (instr_0 == instruction_code_xor) { | |
262 off += 2; | |
263 instr_0 = ubyte_at(off); | |
264 } | |
265 | |
266 // Now look for the real instruction and the many prefix/size specifiers. | |
267 | |
268 if (instr_0 == instruction_operandsize_prefix ) { // 0x66 | |
269 off++; // Not SSE instructions | |
270 instr_0 = ubyte_at(off); | |
271 } | |
0 | 272 |
4759 | 273 if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3 |
304 | 274 instr_0 == instruction_code_xmm_sd_prefix) { // 0xf2 |
275 off++; | |
276 instr_0 = ubyte_at(off); | |
277 } | |
278 | |
4759 | 279 if ( instr_0 >= instruction_prefix_wide_lo && // 0x40 |
304 | 280 instr_0 <= instruction_prefix_wide_hi) { // 0x4f |
281 off++; | |
282 instr_0 = ubyte_at(off); | |
283 } | |
284 | |
285 | |
286 if (instr_0 == instruction_extended_prefix ) { // 0x0f | |
287 off++; | |
288 } | |
289 | |
290 return off; | |
291 } | |
292 | |
293 address NativeMovRegMem::instruction_address() const { | |
294 return addr_at(instruction_start()); | |
295 } | |
296 | |
297 address NativeMovRegMem::next_instruction_address() const { | |
298 address ret = instruction_address() + instruction_size; | |
299 u_char instr_0 = *(u_char*) instruction_address(); | |
300 switch (instr_0) { | |
301 case instruction_operandsize_prefix: | |
302 | |
303 fatal("should have skipped instruction_operandsize_prefix"); | |
304 break; | |
0 | 305 |
304 | 306 case instruction_extended_prefix: |
307 fatal("should have skipped instruction_extended_prefix"); | |
308 break; | |
309 | |
310 case instruction_code_mem2reg_movslq: // 0x63 | |
311 case instruction_code_mem2reg_movzxb: // 0xB6 | |
312 case instruction_code_mem2reg_movsxb: // 0xBE | |
313 case instruction_code_mem2reg_movzxw: // 0xB7 | |
314 case instruction_code_mem2reg_movsxw: // 0xBF | |
315 case instruction_code_reg2mem: // 0x89 (q/l) | |
316 case instruction_code_mem2reg: // 0x8B (q/l) | |
317 case instruction_code_reg2memb: // 0x88 | |
318 case instruction_code_mem2regb: // 0x8a | |
319 | |
320 case instruction_code_float_s: // 0xd9 fld_s a | |
321 case instruction_code_float_d: // 0xdd fld_d a | |
322 | |
323 case instruction_code_xmm_load: // 0x10 | |
324 case instruction_code_xmm_store: // 0x11 | |
325 case instruction_code_xmm_lpd: // 0x12 | |
326 { | |
327 // If there is an SIB then instruction is longer than expected | |
328 u_char mod_rm = *(u_char*)(instruction_address() + 1); | |
329 if ((mod_rm & 7) == 0x4) { | |
330 ret++; | |
331 } | |
332 } | |
333 case instruction_code_xor: | |
334 fatal("should have skipped xor lead in"); | |
335 break; | |
336 | |
337 default: | |
338 fatal("not a NativeMovRegMem"); | |
0 | 339 } |
304 | 340 return ret; |
341 | |
342 } | |
0 | 343 |
304 | 344 int NativeMovRegMem::offset() const{ |
345 int off = data_offset + instruction_start(); | |
346 u_char mod_rm = *(u_char*)(instruction_address() + 1); | |
347 // nnnn(r12|rsp) isn't coded as simple mod/rm since that is | |
348 // the encoding to use an SIB byte. Which will have the nnnn | |
349 // field off by one byte | |
350 if ((mod_rm & 7) == 0x4) { | |
351 off++; | |
0 | 352 } |
304 | 353 return int_at(off); |
354 } | |
355 | |
356 void NativeMovRegMem::set_offset(int x) { | |
357 int off = data_offset + instruction_start(); | |
358 u_char mod_rm = *(u_char*)(instruction_address() + 1); | |
359 // nnnn(r12|rsp) isn't coded as simple mod/rm since that is | |
360 // the encoding to use an SIB byte. Which will have the nnnn | |
361 // field off by one byte | |
362 if ((mod_rm & 7) == 0x4) { | |
363 off++; | |
364 } | |
365 set_int_at(off, x); | |
0 | 366 } |
367 | |
368 void NativeMovRegMem::verify() { | |
369 // make sure code pattern is actually a mov [reg+offset], reg instruction | |
370 u_char test_byte = *(u_char*)instruction_address(); | |
304 | 371 switch (test_byte) { |
372 case instruction_code_reg2memb: // 0x88 movb a, r | |
373 case instruction_code_reg2mem: // 0x89 movl a, r (can be movq in 64bit) | |
374 case instruction_code_mem2regb: // 0x8a movb r, a | |
375 case instruction_code_mem2reg: // 0x8b movl r, a (can be movq in 64bit) | |
376 break; | |
377 | |
378 case instruction_code_mem2reg_movslq: // 0x63 movsql r, a | |
379 case instruction_code_mem2reg_movzxb: // 0xb6 movzbl r, a (movzxb) | |
380 case instruction_code_mem2reg_movzxw: // 0xb7 movzwl r, a (movzxw) | |
381 case instruction_code_mem2reg_movsxb: // 0xbe movsbl r, a (movsxb) | |
382 case instruction_code_mem2reg_movsxw: // 0xbf movswl r, a (movsxw) | |
383 break; | |
384 | |
385 case instruction_code_float_s: // 0xd9 fld_s a | |
386 case instruction_code_float_d: // 0xdd fld_d a | |
387 case instruction_code_xmm_load: // 0x10 movsd xmm, a | |
388 case instruction_code_xmm_store: // 0x11 movsd a, xmm | |
389 case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a | |
390 break; | |
391 | |
392 default: | |
0 | 393 fatal ("not a mov [reg+offs], reg instruction"); |
394 } | |
395 } | |
396 | |
397 | |
398 void NativeMovRegMem::print() { | |
399 tty->print_cr("0x%x: mov reg, [reg + %x]", instruction_address(), offset()); | |
400 } | |
401 | |
402 //------------------------------------------------------------------- | |
403 | |
404 void NativeLoadAddress::verify() { | |
405 // make sure code pattern is actually a mov [reg+offset], reg instruction | |
406 u_char test_byte = *(u_char*)instruction_address(); | |
304 | 407 #ifdef _LP64 |
408 if ( (test_byte == instruction_prefix_wide || | |
409 test_byte == instruction_prefix_wide_extended) ) { | |
410 test_byte = *(u_char*)(instruction_address() + 1); | |
411 } | |
412 #endif // _LP64 | |
413 if ( ! ((test_byte == lea_instruction_code) | |
414 LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) { | |
0 | 415 fatal ("not a lea reg, [reg+offs] instruction"); |
416 } | |
417 } | |
418 | |
419 | |
420 void NativeLoadAddress::print() { | |
421 tty->print_cr("0x%x: lea [reg + %x], reg", instruction_address(), offset()); | |
422 } | |
423 | |
424 //-------------------------------------------------------------------------------- | |
425 | |
426 void NativeJump::verify() { | |
427 if (*(u_char*)instruction_address() != instruction_code) { | |
428 fatal("not a jump instruction"); | |
429 } | |
430 } | |
431 | |
432 | |
433 void NativeJump::insert(address code_pos, address entry) { | |
434 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); | |
435 #ifdef AMD64 | |
436 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); | |
437 #endif // AMD64 | |
438 | |
439 *code_pos = instruction_code; | |
440 *((int32_t*)(code_pos + 1)) = (int32_t)disp; | |
441 | |
442 ICache::invalidate_range(code_pos, instruction_size); | |
443 } | |
444 | |
445 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) { | |
446 // Patching to not_entrant can happen while activations of the method are | |
447 // in use. The patching in that instance must happen only when certain | |
448 // alignment restrictions are true. These guarantees check those | |
449 // conditions. | |
450 #ifdef AMD64 | |
451 const int linesize = 64; | |
452 #else | |
453 const int linesize = 32; | |
454 #endif // AMD64 | |
455 | |
456 // Must be wordSize aligned | |
457 guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0, | |
458 "illegal address for code patching 2"); | |
459 // First 5 bytes must be within the same cache line - 4827828 | |
460 guarantee((uintptr_t) verified_entry / linesize == | |
461 ((uintptr_t) verified_entry + 4) / linesize, | |
462 "illegal address for code patching 3"); | |
463 } | |
464 | |
465 | |
466 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie) | |
467 // The problem: jmp <dest> is a 5-byte instruction. Atomical write can be only with 4 bytes. | |
468 // First patches the first word atomically to be a jump to itself. | |
469 // Then patches the last byte and then atomically patches the first word (4-bytes), | |
470 // thus inserting the desired jump | |
471 // This code is mt-safe with the following conditions: entry point is 4 byte aligned, | |
472 // entry point is in same cache line as unverified entry point, and the instruction being | |
473 // patched is >= 5 byte (size of patch). | |
474 // | |
475 // In C2 the 5+ byte sized instruction is enforced by code in MachPrologNode::emit. | |
476 // In C1 the restriction is enforced by CodeEmitter::method_entry | |
13223
78c808233ff1
ensure instruction at verified entry point is safely patchable (GRAAL-605)
Doug Simon <doug.simon@oracle.com>
parents:
7199
diff
changeset
|
477 // In Graal, the restriction is enforced by HotSpotFrameContext.enter(...) |
0 | 478 // |
479 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { | |
480 // complete jump instruction (to be inserted) is in code_buffer; | |
481 unsigned char code_buffer[5]; | |
482 code_buffer[0] = instruction_code; | |
483 intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4); | |
484 #ifdef AMD64 | |
485 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); | |
486 #endif // AMD64 | |
487 *(int32_t*)(code_buffer + 1) = (int32_t)disp; | |
488 | |
489 check_verified_entry_alignment(entry, verified_entry); | |
490 | |
491 // Can't call nativeJump_at() because it's asserts jump exists | |
492 NativeJump* n_jump = (NativeJump*) verified_entry; | |
493 | |
494 //First patch dummy jmp in place | |
495 | |
496 unsigned char patch[4]; | |
497 assert(sizeof(patch)==sizeof(int32_t), "sanity check"); | |
498 patch[0] = 0xEB; // jmp rel8 | |
499 patch[1] = 0xFE; // jmp to self | |
500 patch[2] = 0xEB; | |
501 patch[3] = 0xFE; | |
502 | |
503 // First patch dummy jmp in place | |
504 *(int32_t*)verified_entry = *(int32_t *)patch; | |
505 | |
506 n_jump->wrote(0); | |
507 | |
508 // Patch 5th byte (from jump instruction) | |
509 verified_entry[4] = code_buffer[4]; | |
510 | |
511 n_jump->wrote(4); | |
512 | |
513 // Patch bytes 0-3 (from jump instruction) | |
514 *(int32_t*)verified_entry = *(int32_t *)code_buffer; | |
515 // Invalidate. Opteron requires a flush after every write. | |
516 n_jump->wrote(0); | |
517 | |
518 } | |
519 | |
520 void NativePopReg::insert(address code_pos, Register reg) { | |
521 assert(reg->encoding() < 8, "no space for REX"); | |
522 assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update"); | |
523 *code_pos = (u_char)(instruction_code | reg->encoding()); | |
524 ICache::invalidate_range(code_pos, instruction_size); | |
525 } | |
526 | |
527 | |
528 void NativeIllegalInstruction::insert(address code_pos) { | |
529 assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update"); | |
530 *(short *)code_pos = instruction_code; | |
531 ICache::invalidate_range(code_pos, instruction_size); | |
532 } | |
533 | |
534 void NativeGeneralJump::verify() { | |
535 assert(((NativeInstruction *)this)->is_jump() || | |
536 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction"); | |
537 } | |
538 | |
539 | |
540 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { | |
541 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); | |
542 #ifdef AMD64 | |
543 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); | |
544 #endif // AMD64 | |
545 | |
546 *code_pos = unconditional_long_jump; | |
547 *((int32_t *)(code_pos+1)) = (int32_t) disp; | |
548 ICache::invalidate_range(code_pos, instruction_size); | |
549 } | |
550 | |
551 | |
552 // MT-safe patching of a long jump instruction. | |
553 // First patches first word of instruction to two jmp's that jmps to them | |
554 // selfs (spinlock). Then patches the last byte, and then atomicly replaces | |
555 // the jmp's with the first 4 byte of the new instruction. | |
556 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { | |
557 assert (instr_addr != NULL, "illegal address for code patching (4)"); | |
558 NativeGeneralJump* n_jump = nativeGeneralJump_at (instr_addr); // checking that it is a jump | |
559 | |
560 // Temporary code | |
561 unsigned char patch[4]; | |
562 assert(sizeof(patch)==sizeof(int32_t), "sanity check"); | |
563 patch[0] = 0xEB; // jmp rel8 | |
564 patch[1] = 0xFE; // jmp to self | |
565 patch[2] = 0xEB; | |
566 patch[3] = 0xFE; | |
567 | |
568 // First patch dummy jmp in place | |
569 *(int32_t*)instr_addr = *(int32_t *)patch; | |
570 n_jump->wrote(0); | |
571 | |
572 // Patch 4th byte | |
573 instr_addr[4] = code_buffer[4]; | |
574 | |
575 n_jump->wrote(4); | |
576 | |
577 // Patch bytes 0-3 | |
578 *(jint*)instr_addr = *(jint *)code_buffer; | |
579 | |
580 n_jump->wrote(0); | |
581 | |
582 #ifdef ASSERT | |
583 // verify patching | |
584 for ( int i = 0; i < instruction_size; i++) { | |
585 address ptr = (address)((intptr_t)code_buffer + i); | |
586 int a_byte = (*ptr) & 0xFF; | |
587 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed"); | |
588 } | |
589 #endif | |
590 | |
591 } | |
592 | |
593 | |
594 | |
595 address NativeGeneralJump::jump_destination() const { | |
596 int op_code = ubyte_at(0); | |
597 bool is_rel32off = (op_code == 0xE9 || op_code == 0x0F); | |
598 int offset = (op_code == 0x0F) ? 2 : 1; | |
599 int length = offset + ((is_rel32off) ? 4 : 1); | |
600 | |
601 if (is_rel32off) | |
602 return addr_at(0) + length + int_at(offset); | |
603 else | |
604 return addr_at(0) + length + sbyte_at(offset); | |
605 } | |
116
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
0
diff
changeset
|
606 |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
0
diff
changeset
|
607 bool NativeInstruction::is_dtrace_trap() { |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
0
diff
changeset
|
608 return (*(int32_t*)this & 0xff) == 0xcc; |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
0
diff
changeset
|
609 } |