comparison src/cpu/x86/vm/nativeInst_x86.hpp @ 304:dc7f315e41f7

5108146: Merge i486 and amd64 cpu directories 6459804: Want client (c1) compiler for x86_64 (amd64) for faster start-up Reviewed-by: kvn
author never
date Wed, 27 Aug 2008 00:21:55 -0700
parents d1605aabd0a1
children 3a26e9e4be71
comparison
equal deleted inserted replaced
303:fa4d1d240383 304:dc7f315e41f7
233 #endif 233 #endif
234 return test; 234 return test;
235 } 235 }
236 }; 236 };
237 237
238 #ifndef AMD64
239
240 // An interface for accessing/manipulating native moves of the form: 238 // An interface for accessing/manipulating native moves of the form:
241 // mov[b/w/l] [reg + offset], reg (instruction_code_reg2mem) 239 // mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem)
242 // mov[b/w/l] reg, [reg+offset] (instruction_code_mem2reg 240 // mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg
243 // mov[s/z]x[w/b] [reg + offset], reg 241 // mov[s/z]x[w/b/q] [reg + offset], reg
244 // fld_s [reg+offset] 242 // fld_s [reg+offset]
245 // fld_d [reg+offset] 243 // fld_d [reg+offset]
246 // fstp_s [reg + offset] 244 // fstp_s [reg + offset]
247 // fstp_d [reg + offset] 245 // fstp_d [reg + offset]
246 // mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch)
248 // 247 //
249 // Warning: These routines must be able to handle any instruction sequences 248 // Warning: These routines must be able to handle any instruction sequences
250 // that are generated as a result of the load/store byte,word,long 249 // that are generated as a result of the load/store byte,word,long
251 // macros. For example: The load_unsigned_byte instruction generates 250 // macros. For example: The load_unsigned_byte instruction generates
252 // an xor reg,reg inst prior to generating the movb instruction. This 251 // an xor reg,reg inst prior to generating the movb instruction. This
253 // class must skip the xor instruction. 252 // class must skip the xor instruction.
254 253
255 class NativeMovRegMem: public NativeInstruction { 254 class NativeMovRegMem: public NativeInstruction {
256 public: 255 public:
257 enum Intel_specific_constants { 256 enum Intel_specific_constants {
257 instruction_prefix_wide_lo = Assembler::REX,
258 instruction_prefix_wide_hi = Assembler::REX_WRXB,
258 instruction_code_xor = 0x33, 259 instruction_code_xor = 0x33,
259 instruction_extended_prefix = 0x0F, 260 instruction_extended_prefix = 0x0F,
261 instruction_code_mem2reg_movslq = 0x63,
260 instruction_code_mem2reg_movzxb = 0xB6, 262 instruction_code_mem2reg_movzxb = 0xB6,
261 instruction_code_mem2reg_movsxb = 0xBE, 263 instruction_code_mem2reg_movsxb = 0xBE,
262 instruction_code_mem2reg_movzxw = 0xB7, 264 instruction_code_mem2reg_movzxw = 0xB7,
263 instruction_code_mem2reg_movsxw = 0xBF, 265 instruction_code_mem2reg_movsxw = 0xBF,
264 instruction_operandsize_prefix = 0x66, 266 instruction_operandsize_prefix = 0x66,
265 instruction_code_reg2meml = 0x89, 267 instruction_code_reg2mem = 0x89,
266 instruction_code_mem2regl = 0x8b, 268 instruction_code_mem2reg = 0x8b,
267 instruction_code_reg2memb = 0x88, 269 instruction_code_reg2memb = 0x88,
268 instruction_code_mem2regb = 0x8a, 270 instruction_code_mem2regb = 0x8a,
269 instruction_code_float_s = 0xd9, 271 instruction_code_float_s = 0xd9,
270 instruction_code_float_d = 0xdd, 272 instruction_code_float_d = 0xdd,
271 instruction_code_long_volatile = 0xdf, 273 instruction_code_long_volatile = 0xdf,
280 instruction_offset = 0, 282 instruction_offset = 0,
281 data_offset = 2, 283 data_offset = 2,
282 next_instruction_offset = 4 284 next_instruction_offset = 4
283 }; 285 };
284 286
285 address instruction_address() const { 287 // helper
286 if (*addr_at(instruction_offset) == instruction_operandsize_prefix && 288 int instruction_start() const;
287 *addr_at(instruction_offset+1) != instruction_code_xmm_code) { 289
288 return addr_at(instruction_offset+1); // Not SSE instructions 290 address instruction_address() const;
289 } 291
290 else if (*addr_at(instruction_offset) == instruction_extended_prefix) { 292 address next_instruction_address() const;
291 return addr_at(instruction_offset+1); 293
292 } 294 int offset() const;
293 else if (*addr_at(instruction_offset) == instruction_code_xor) { 295
294 return addr_at(instruction_offset+2); 296 void set_offset(int x);
295 }
296 else return addr_at(instruction_offset);
297 }
298
299 address next_instruction_address() const {
300 switch (*addr_at(instruction_offset)) {
301 case instruction_operandsize_prefix:
302 if (*addr_at(instruction_offset+1) == instruction_code_xmm_code)
303 return instruction_address() + instruction_size; // SSE instructions
304 case instruction_extended_prefix:
305 return instruction_address() + instruction_size + 1;
306 case instruction_code_reg2meml:
307 case instruction_code_mem2regl:
308 case instruction_code_reg2memb:
309 case instruction_code_mem2regb:
310 case instruction_code_xor:
311 return instruction_address() + instruction_size + 2;
312 default:
313 return instruction_address() + instruction_size;
314 }
315 }
316 int offset() const{
317 if (*addr_at(instruction_offset) == instruction_operandsize_prefix &&
318 *addr_at(instruction_offset+1) != instruction_code_xmm_code) {
319 return int_at(data_offset+1); // Not SSE instructions
320 }
321 else if (*addr_at(instruction_offset) == instruction_extended_prefix) {
322 return int_at(data_offset+1);
323 }
324 else if (*addr_at(instruction_offset) == instruction_code_xor ||
325 *addr_at(instruction_offset) == instruction_code_xmm_ss_prefix ||
326 *addr_at(instruction_offset) == instruction_code_xmm_sd_prefix ||
327 *addr_at(instruction_offset) == instruction_operandsize_prefix) {
328 return int_at(data_offset+2);
329 }
330 else return int_at(data_offset);
331 }
332
333 void set_offset(int x) {
334 if (*addr_at(instruction_offset) == instruction_operandsize_prefix &&
335 *addr_at(instruction_offset+1) != instruction_code_xmm_code) {
336 set_int_at(data_offset+1, x); // Not SSE instructions
337 }
338 else if (*addr_at(instruction_offset) == instruction_extended_prefix) {
339 set_int_at(data_offset+1, x);
340 }
341 else if (*addr_at(instruction_offset) == instruction_code_xor ||
342 *addr_at(instruction_offset) == instruction_code_xmm_ss_prefix ||
343 *addr_at(instruction_offset) == instruction_code_xmm_sd_prefix ||
344 *addr_at(instruction_offset) == instruction_operandsize_prefix) {
345 set_int_at(data_offset+2, x);
346 }
347 else set_int_at(data_offset, x);
348 }
349 297
350 void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); } 298 void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); }
351 void copy_instruction_to(address new_instruction_address);
352 299
353 void verify(); 300 void verify();
354 void print (); 301 void print ();
355 302
356 // unit test stuff 303 // unit test stuff
383 330
384 // An interface for accessing/manipulating native leal instruction of form: 331 // An interface for accessing/manipulating native leal instruction of form:
385 // leal reg, [reg + offset] 332 // leal reg, [reg + offset]
386 333
387 class NativeLoadAddress: public NativeMovRegMem { 334 class NativeLoadAddress: public NativeMovRegMem {
388 public: 335 #ifdef AMD64
389 enum Intel_specific_constants { 336 static const bool has_rex = true;
390 instruction_code = 0x8D 337 static const int rex_size = 1;
338 #else
339 static const bool has_rex = false;
340 static const int rex_size = 0;
341 #endif // AMD64
342 public:
343 enum Intel_specific_constants {
344 instruction_prefix_wide = Assembler::REX_W,
345 instruction_prefix_wide_extended = Assembler::REX_WB,
346 lea_instruction_code = 0x8D,
347 mov64_instruction_code = 0xB8
391 }; 348 };
392 349
393 void verify(); 350 void verify();
394 void print (); 351 void print ();
395 352
404 #endif 361 #endif
405 return test; 362 return test;
406 } 363 }
407 }; 364 };
408 365
409 #endif // AMD64
410
411 // jump rel32off 366 // jump rel32off
412 367
413 class NativeJump: public NativeInstruction { 368 class NativeJump: public NativeInstruction {
414 public: 369 public:
415 enum Intel_specific_constants { 370 enum Intel_specific_constants {
422 377
423 address instruction_address() const { return addr_at(instruction_offset); } 378 address instruction_address() const { return addr_at(instruction_offset); }
424 address next_instruction_address() const { return addr_at(next_instruction_offset); } 379 address next_instruction_address() const { return addr_at(next_instruction_offset); }
425 address jump_destination() const { 380 address jump_destination() const {
426 address dest = (int_at(data_offset)+next_instruction_address()); 381 address dest = (int_at(data_offset)+next_instruction_address());
427 #ifdef AMD64 // What is this about? 382 // 32bit used to encode unresolved jmp as jmp -1
383 // 64bit can't produce this so it used jump to self.
384 // Now 32bit and 64bit use jump to self as the unresolved address
385 // which the inline cache code (and relocs) know about
386
428 // return -1 if jump to self 387 // return -1 if jump to self
429 dest = (dest == (address) this) ? (address) -1 : dest; 388 dest = (dest == (address) this) ? (address) -1 : dest;
430 #endif // AMD64
431 return dest; 389 return dest;
432 } 390 }
433 391
434 void set_jump_destination(address dest) { 392 void set_jump_destination(address dest) {
435 intptr_t val = dest - next_instruction_address(); 393 intptr_t val = dest - next_instruction_address();
436 #ifdef AMD64 394 #ifdef AMD64
437 if (dest == (address) -1) { // can't encode jump to -1 395 assert((labs(val) & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1");
438 val = -5; // jump to self
439 } else {
440 assert((labs(val) & 0xFFFFFFFF00000000) == 0,
441 "must be 32bit offset");
442 }
443 #endif // AMD64 396 #endif // AMD64
444 set_int_at(data_offset, (jint)val); 397 set_int_at(data_offset, (jint)val);
445 } 398 }
446 399
447 // Creation 400 // Creation
566 ubyte_at(0) == 0xEB; /* short jump */ } 519 ubyte_at(0) == 0xEB; /* short jump */ }
567 inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ || 520 inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ ||
568 (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ } 521 (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ }
569 inline bool NativeInstruction::is_safepoint_poll() { 522 inline bool NativeInstruction::is_safepoint_poll() {
570 #ifdef AMD64 523 #ifdef AMD64
571 return ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && 524 if ( ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
572 ubyte_at(1) == 0x05 && // 00 rax 101 525 ubyte_at(1) == 0x05 ) { // 00 rax 101
573 ((intptr_t) addr_at(6)) + int_at(2) == (intptr_t) os::get_polling_page(); 526 address fault = addr_at(6) + int_at(2);
527 return os::is_poll_address(fault);
528 } else {
529 return false;
530 }
574 #else 531 #else
575 return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2regl || 532 return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2reg ||
576 ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl ) && 533 ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl ) &&
577 (ubyte_at(1)&0xC7) == 0x05 && /* Mod R/M == disp32 */ 534 (ubyte_at(1)&0xC7) == 0x05 && /* Mod R/M == disp32 */
578 (os::is_poll_address((address)int_at(2))); 535 (os::is_poll_address((address)int_at(2)));
579 #endif // AMD64 536 #endif // AMD64
580 } 537 }