0
|
1 /*
|
|
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 # include "incls/_precompiled.incl"
|
|
26 # include "incls/_nativeInst_x86.cpp.incl"
|
|
27
|
|
28 void NativeInstruction::wrote(int offset) {
|
|
29 ICache::invalidate_word(addr_at(offset));
|
|
30 }
|
|
31
|
|
32
|
|
33 void NativeCall::verify() {
|
|
34 // Make sure code pattern is actually a call imm32 instruction.
|
|
35 int inst = ubyte_at(0);
|
|
36 if (inst != instruction_code) {
|
|
37 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", instruction_address(),
|
|
38 inst);
|
|
39 fatal("not a call disp32");
|
|
40 }
|
|
41 }
|
|
42
|
|
43 address NativeCall::destination() const {
|
|
44 // Getting the destination of a call isn't safe because that call can
|
|
45 // be getting patched while you're calling this. There's only special
|
|
46 // places where this can be called but not automatically verifiable by
|
|
47 // checking which locks are held. The solution is true atomic patching
|
|
48 // on x86, nyi.
|
|
49 return return_address() + displacement();
|
|
50 }
|
|
51
|
|
52 void NativeCall::print() {
|
|
53 tty->print_cr(PTR_FORMAT ": call " PTR_FORMAT,
|
|
54 instruction_address(), destination());
|
|
55 }
|
|
56
|
|
57 // Inserts a native call instruction at a given pc
|
|
58 void NativeCall::insert(address code_pos, address entry) {
|
|
59 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
|
|
60 #ifdef AMD64
|
|
61 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
|
|
62 #endif // AMD64
|
|
63 *code_pos = instruction_code;
|
|
64 *((int32_t *)(code_pos+1)) = (int32_t) disp;
|
|
65 ICache::invalidate_range(code_pos, instruction_size);
|
|
66 }
|
|
67
|
|
68 // MT-safe patching of a call instruction.
|
|
69 // First patches first word of instruction to two jmp's that jmps to them
|
|
70 // selfs (spinlock). Then patches the last byte, and then atomicly replaces
|
|
71 // the jmp's with the first 4 byte of the new instruction.
|
|
72 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
|
|
73 assert(Patching_lock->is_locked() ||
|
|
74 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
|
|
75 assert (instr_addr != NULL, "illegal address for code patching");
|
|
76
|
|
77 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call
|
|
78 if (os::is_MP()) {
|
|
79 guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned");
|
|
80 }
|
|
81
|
|
82 // First patch dummy jmp in place
|
|
83 unsigned char patch[4];
|
|
84 assert(sizeof(patch)==sizeof(jint), "sanity check");
|
|
85 patch[0] = 0xEB; // jmp rel8
|
|
86 patch[1] = 0xFE; // jmp to self
|
|
87 patch[2] = 0xEB;
|
|
88 patch[3] = 0xFE;
|
|
89
|
|
90 // First patch dummy jmp in place
|
|
91 *(jint*)instr_addr = *(jint *)patch;
|
|
92
|
|
93 // Invalidate. Opteron requires a flush after every write.
|
|
94 n_call->wrote(0);
|
|
95
|
|
96 // Patch 4th byte
|
|
97 instr_addr[4] = code_buffer[4];
|
|
98
|
|
99 n_call->wrote(4);
|
|
100
|
|
101 // Patch bytes 0-3
|
|
102 *(jint*)instr_addr = *(jint *)code_buffer;
|
|
103
|
|
104 n_call->wrote(0);
|
|
105
|
|
106 #ifdef ASSERT
|
|
107 // verify patching
|
|
108 for ( int i = 0; i < instruction_size; i++) {
|
|
109 address ptr = (address)((intptr_t)code_buffer + i);
|
|
110 int a_byte = (*ptr) & 0xFF;
|
|
111 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed");
|
|
112 }
|
|
113 #endif
|
|
114
|
|
115 }
|
|
116
|
|
117
|
|
118 // Similar to replace_mt_safe, but just changes the destination. The
|
|
119 // important thing is that free-running threads are able to execute this
|
|
120 // call instruction at all times. If the displacement field is aligned
|
|
121 // we can simply rely on atomicity of 32-bit writes to make sure other threads
|
|
122 // will see no intermediate states. Otherwise, the first two bytes of the
|
|
123 // call are guaranteed to be aligned, and can be atomically patched to a
|
|
124 // self-loop to guard the instruction while we change the other bytes.
|
|
125
|
|
126 // We cannot rely on locks here, since the free-running threads must run at
|
|
127 // full speed.
|
|
128 //
|
|
129 // Used in the runtime linkage of calls; see class CompiledIC.
|
|
130 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
|
|
131 void NativeCall::set_destination_mt_safe(address dest) {
|
|
132 debug_only(verify());
|
|
133 // Make sure patching code is locked. No two threads can patch at the same
|
|
134 // time but one may be executing this code.
|
|
135 assert(Patching_lock->is_locked() ||
|
|
136 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
|
|
137 // Both C1 and C2 should now be generating code which aligns the patched address
|
|
138 // to be within a single cache line except that C1 does not do the alignment on
|
|
139 // uniprocessor systems.
|
|
140 bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size ==
|
|
141 ((uintptr_t)displacement_address() + 3) / cache_line_size;
|
|
142
|
|
143 guarantee(!os::is_MP() || is_aligned, "destination must be aligned");
|
|
144
|
|
145 if (is_aligned) {
|
|
146 // Simple case: The destination lies within a single cache line.
|
|
147 set_destination(dest);
|
|
148 } else if ((uintptr_t)instruction_address() / cache_line_size ==
|
|
149 ((uintptr_t)instruction_address()+1) / cache_line_size) {
|
|
150 // Tricky case: The instruction prefix lies within a single cache line.
|
|
151 intptr_t disp = dest - return_address();
|
|
152 #ifdef AMD64
|
|
153 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
|
|
154 #endif // AMD64
|
|
155
|
|
156 int call_opcode = instruction_address()[0];
|
|
157
|
|
158 // First patch dummy jump in place:
|
|
159 {
|
|
160 u_char patch_jump[2];
|
|
161 patch_jump[0] = 0xEB; // jmp rel8
|
|
162 patch_jump[1] = 0xFE; // jmp to self
|
|
163
|
|
164 assert(sizeof(patch_jump)==sizeof(short), "sanity check");
|
|
165 *(short*)instruction_address() = *(short*)patch_jump;
|
|
166 }
|
|
167 // Invalidate. Opteron requires a flush after every write.
|
|
168 wrote(0);
|
|
169
|
|
170 // (Note: We assume any reader which has already started to read
|
|
171 // the unpatched call will completely read the whole unpatched call
|
|
172 // without seeing the next writes we are about to make.)
|
|
173
|
|
174 // Next, patch the last three bytes:
|
|
175 u_char patch_disp[5];
|
|
176 patch_disp[0] = call_opcode;
|
|
177 *(int32_t*)&patch_disp[1] = (int32_t)disp;
|
|
178 assert(sizeof(patch_disp)==instruction_size, "sanity check");
|
|
179 for (int i = sizeof(short); i < instruction_size; i++)
|
|
180 instruction_address()[i] = patch_disp[i];
|
|
181
|
|
182 // Invalidate. Opteron requires a flush after every write.
|
|
183 wrote(sizeof(short));
|
|
184
|
|
185 // (Note: We assume that any reader which reads the opcode we are
|
|
186 // about to repatch will also read the writes we just made.)
|
|
187
|
|
188 // Finally, overwrite the jump:
|
|
189 *(short*)instruction_address() = *(short*)patch_disp;
|
|
190 // Invalidate. Opteron requires a flush after every write.
|
|
191 wrote(0);
|
|
192
|
|
193 debug_only(verify());
|
|
194 guarantee(destination() == dest, "patch succeeded");
|
|
195 } else {
|
|
196 // Impossible: One or the other must be atomically writable.
|
|
197 ShouldNotReachHere();
|
|
198 }
|
|
199 }
|
|
200
|
|
201
|
|
202 void NativeMovConstReg::verify() {
|
|
203 #ifdef AMD64
|
|
204 // make sure code pattern is actually a mov reg64, imm64 instruction
|
|
205 if ((ubyte_at(0) != Assembler::REX_W && ubyte_at(0) != Assembler::REX_WB) ||
|
|
206 (ubyte_at(1) & (0xff ^ register_mask)) != 0xB8) {
|
|
207 print();
|
|
208 fatal("not a REX.W[B] mov reg64, imm64");
|
|
209 }
|
|
210 #else
|
|
211 // make sure code pattern is actually a mov reg, imm32 instruction
|
|
212 u_char test_byte = *(u_char*)instruction_address();
|
|
213 u_char test_byte_2 = test_byte & ( 0xff ^ register_mask);
|
|
214 if (test_byte_2 != instruction_code) fatal("not a mov reg, imm32");
|
|
215 #endif // AMD64
|
|
216 }
|
|
217
|
|
218
|
|
219 void NativeMovConstReg::print() {
|
|
220 tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
|
|
221 instruction_address(), data());
|
|
222 }
|
|
223
|
|
224 //-------------------------------------------------------------------
|
|
225
|
|
226 #ifndef AMD64
|
|
227
|
|
228 void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
|
|
229 int inst_size = instruction_size;
|
|
230
|
|
231 // See if there's an instruction size prefix override.
|
|
232 if ( *(address(this)) == instruction_operandsize_prefix &&
|
|
233 *(address(this)+1) != instruction_code_xmm_code ) { // Not SSE instr
|
|
234 inst_size += 1;
|
|
235 }
|
|
236 if ( *(address(this)) == instruction_extended_prefix ) inst_size += 1;
|
|
237
|
|
238 for (int i = 0; i < instruction_size; i++) {
|
|
239 *(new_instruction_address + i) = *(address(this) + i);
|
|
240 }
|
|
241 }
|
|
242
|
|
243 void NativeMovRegMem::verify() {
|
|
244 // make sure code pattern is actually a mov [reg+offset], reg instruction
|
|
245 u_char test_byte = *(u_char*)instruction_address();
|
|
246 if ( ! ( (test_byte == instruction_code_reg2memb)
|
|
247 || (test_byte == instruction_code_mem2regb)
|
|
248 || (test_byte == instruction_code_mem2regl)
|
|
249 || (test_byte == instruction_code_reg2meml)
|
|
250 || (test_byte == instruction_code_mem2reg_movzxb )
|
|
251 || (test_byte == instruction_code_mem2reg_movzxw )
|
|
252 || (test_byte == instruction_code_mem2reg_movsxb )
|
|
253 || (test_byte == instruction_code_mem2reg_movsxw )
|
|
254 || (test_byte == instruction_code_float_s)
|
|
255 || (test_byte == instruction_code_float_d)
|
|
256 || (test_byte == instruction_code_long_volatile) ) )
|
|
257 {
|
|
258 u_char byte1 = ((u_char*)instruction_address())[1];
|
|
259 u_char byte2 = ((u_char*)instruction_address())[2];
|
|
260 if ((test_byte != instruction_code_xmm_ss_prefix &&
|
|
261 test_byte != instruction_code_xmm_sd_prefix &&
|
|
262 test_byte != instruction_operandsize_prefix) ||
|
|
263 byte1 != instruction_code_xmm_code ||
|
|
264 (byte2 != instruction_code_xmm_load &&
|
|
265 byte2 != instruction_code_xmm_lpd &&
|
|
266 byte2 != instruction_code_xmm_store)) {
|
|
267 fatal ("not a mov [reg+offs], reg instruction");
|
|
268 }
|
|
269 }
|
|
270 }
|
|
271
|
|
272
|
|
273 void NativeMovRegMem::print() {
|
|
274 tty->print_cr("0x%x: mov reg, [reg + %x]", instruction_address(), offset());
|
|
275 }
|
|
276
|
|
277 //-------------------------------------------------------------------
|
|
278
|
|
279 void NativeLoadAddress::verify() {
|
|
280 // make sure code pattern is actually a mov [reg+offset], reg instruction
|
|
281 u_char test_byte = *(u_char*)instruction_address();
|
|
282 if ( ! (test_byte == instruction_code) ) {
|
|
283 fatal ("not a lea reg, [reg+offs] instruction");
|
|
284 }
|
|
285 }
|
|
286
|
|
287
|
|
288 void NativeLoadAddress::print() {
|
|
289 tty->print_cr("0x%x: lea [reg + %x], reg", instruction_address(), offset());
|
|
290 }
|
|
291
|
|
292 #endif // !AMD64
|
|
293
|
|
294 //--------------------------------------------------------------------------------
|
|
295
|
|
296 void NativeJump::verify() {
|
|
297 if (*(u_char*)instruction_address() != instruction_code) {
|
|
298 fatal("not a jump instruction");
|
|
299 }
|
|
300 }
|
|
301
|
|
302
|
|
303 void NativeJump::insert(address code_pos, address entry) {
|
|
304 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
|
|
305 #ifdef AMD64
|
|
306 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
|
|
307 #endif // AMD64
|
|
308
|
|
309 *code_pos = instruction_code;
|
|
310 *((int32_t*)(code_pos + 1)) = (int32_t)disp;
|
|
311
|
|
312 ICache::invalidate_range(code_pos, instruction_size);
|
|
313 }
|
|
314
|
|
315 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
|
|
316 // Patching to not_entrant can happen while activations of the method are
|
|
317 // in use. The patching in that instance must happen only when certain
|
|
318 // alignment restrictions are true. These guarantees check those
|
|
319 // conditions.
|
|
320 #ifdef AMD64
|
|
321 const int linesize = 64;
|
|
322 #else
|
|
323 const int linesize = 32;
|
|
324 #endif // AMD64
|
|
325
|
|
326 // Must be wordSize aligned
|
|
327 guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0,
|
|
328 "illegal address for code patching 2");
|
|
329 // First 5 bytes must be within the same cache line - 4827828
|
|
330 guarantee((uintptr_t) verified_entry / linesize ==
|
|
331 ((uintptr_t) verified_entry + 4) / linesize,
|
|
332 "illegal address for code patching 3");
|
|
333 }
|
|
334
|
|
335
|
|
336 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
|
|
337 // The problem: jmp <dest> is a 5-byte instruction. Atomical write can be only with 4 bytes.
|
|
338 // First patches the first word atomically to be a jump to itself.
|
|
339 // Then patches the last byte and then atomically patches the first word (4-bytes),
|
|
340 // thus inserting the desired jump
|
|
341 // This code is mt-safe with the following conditions: entry point is 4 byte aligned,
|
|
342 // entry point is in same cache line as unverified entry point, and the instruction being
|
|
343 // patched is >= 5 byte (size of patch).
|
|
344 //
|
|
345 // In C2 the 5+ byte sized instruction is enforced by code in MachPrologNode::emit.
|
|
346 // In C1 the restriction is enforced by CodeEmitter::method_entry
|
|
347 //
|
|
348 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
|
|
349 // complete jump instruction (to be inserted) is in code_buffer;
|
|
350 unsigned char code_buffer[5];
|
|
351 code_buffer[0] = instruction_code;
|
|
352 intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4);
|
|
353 #ifdef AMD64
|
|
354 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
|
|
355 #endif // AMD64
|
|
356 *(int32_t*)(code_buffer + 1) = (int32_t)disp;
|
|
357
|
|
358 check_verified_entry_alignment(entry, verified_entry);
|
|
359
|
|
360 // Can't call nativeJump_at() because it's asserts jump exists
|
|
361 NativeJump* n_jump = (NativeJump*) verified_entry;
|
|
362
|
|
363 //First patch dummy jmp in place
|
|
364
|
|
365 unsigned char patch[4];
|
|
366 assert(sizeof(patch)==sizeof(int32_t), "sanity check");
|
|
367 patch[0] = 0xEB; // jmp rel8
|
|
368 patch[1] = 0xFE; // jmp to self
|
|
369 patch[2] = 0xEB;
|
|
370 patch[3] = 0xFE;
|
|
371
|
|
372 // First patch dummy jmp in place
|
|
373 *(int32_t*)verified_entry = *(int32_t *)patch;
|
|
374
|
|
375 n_jump->wrote(0);
|
|
376
|
|
377 // Patch 5th byte (from jump instruction)
|
|
378 verified_entry[4] = code_buffer[4];
|
|
379
|
|
380 n_jump->wrote(4);
|
|
381
|
|
382 // Patch bytes 0-3 (from jump instruction)
|
|
383 *(int32_t*)verified_entry = *(int32_t *)code_buffer;
|
|
384 // Invalidate. Opteron requires a flush after every write.
|
|
385 n_jump->wrote(0);
|
|
386
|
|
387 }
|
|
388
|
|
389 void NativePopReg::insert(address code_pos, Register reg) {
|
|
390 assert(reg->encoding() < 8, "no space for REX");
|
|
391 assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update");
|
|
392 *code_pos = (u_char)(instruction_code | reg->encoding());
|
|
393 ICache::invalidate_range(code_pos, instruction_size);
|
|
394 }
|
|
395
|
|
396
|
|
397 void NativeIllegalInstruction::insert(address code_pos) {
|
|
398 assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update");
|
|
399 *(short *)code_pos = instruction_code;
|
|
400 ICache::invalidate_range(code_pos, instruction_size);
|
|
401 }
|
|
402
|
|
403 void NativeGeneralJump::verify() {
|
|
404 assert(((NativeInstruction *)this)->is_jump() ||
|
|
405 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
|
|
406 }
|
|
407
|
|
408
|
|
409 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
|
|
410 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
|
|
411 #ifdef AMD64
|
|
412 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
|
|
413 #endif // AMD64
|
|
414
|
|
415 *code_pos = unconditional_long_jump;
|
|
416 *((int32_t *)(code_pos+1)) = (int32_t) disp;
|
|
417 ICache::invalidate_range(code_pos, instruction_size);
|
|
418 }
|
|
419
|
|
420
|
|
421 // MT-safe patching of a long jump instruction.
|
|
422 // First patches first word of instruction to two jmp's that jmps to them
|
|
423 // selfs (spinlock). Then patches the last byte, and then atomicly replaces
|
|
424 // the jmp's with the first 4 byte of the new instruction.
|
|
425 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
|
|
426 assert (instr_addr != NULL, "illegal address for code patching (4)");
|
|
427 NativeGeneralJump* n_jump = nativeGeneralJump_at (instr_addr); // checking that it is a jump
|
|
428
|
|
429 // Temporary code
|
|
430 unsigned char patch[4];
|
|
431 assert(sizeof(patch)==sizeof(int32_t), "sanity check");
|
|
432 patch[0] = 0xEB; // jmp rel8
|
|
433 patch[1] = 0xFE; // jmp to self
|
|
434 patch[2] = 0xEB;
|
|
435 patch[3] = 0xFE;
|
|
436
|
|
437 // First patch dummy jmp in place
|
|
438 *(int32_t*)instr_addr = *(int32_t *)patch;
|
|
439 n_jump->wrote(0);
|
|
440
|
|
441 // Patch 4th byte
|
|
442 instr_addr[4] = code_buffer[4];
|
|
443
|
|
444 n_jump->wrote(4);
|
|
445
|
|
446 // Patch bytes 0-3
|
|
447 *(jint*)instr_addr = *(jint *)code_buffer;
|
|
448
|
|
449 n_jump->wrote(0);
|
|
450
|
|
451 #ifdef ASSERT
|
|
452 // verify patching
|
|
453 for ( int i = 0; i < instruction_size; i++) {
|
|
454 address ptr = (address)((intptr_t)code_buffer + i);
|
|
455 int a_byte = (*ptr) & 0xFF;
|
|
456 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed");
|
|
457 }
|
|
458 #endif
|
|
459
|
|
460 }
|
|
461
|
|
462
|
|
463
|
|
464 address NativeGeneralJump::jump_destination() const {
|
|
465 int op_code = ubyte_at(0);
|
|
466 bool is_rel32off = (op_code == 0xE9 || op_code == 0x0F);
|
|
467 int offset = (op_code == 0x0F) ? 2 : 1;
|
|
468 int length = offset + ((is_rel32off) ? 4 : 1);
|
|
469
|
|
470 if (is_rel32off)
|
|
471 return addr_at(0) + length + int_at(offset);
|
|
472 else
|
|
473 return addr_at(0) + length + sbyte_at(offset);
|
|
474 }
|