diff src/cpu/x86/vm/c1_MacroAssembler_x86.cpp @ 304:dc7f315e41f7

5108146: Merge i486 and amd64 cpu directories 6459804: Want client (c1) compiler for x86_64 (amd64) for faster start-up Reviewed-by: kvn
author never
date Wed, 27 Aug 2008 00:21:55 -0700
parents d1605aabd0a1
children c96bf21b756f
line wrap: on
line diff
--- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Tue Aug 26 15:49:40 2008 -0700
+++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Wed Aug 27 00:21:55 2008 -0700
@@ -26,18 +26,17 @@
 #include "incls/_c1_MacroAssembler_x86.cpp.incl"
 
 int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) {
-  const int aligned_mask = 3;
+  const int aligned_mask = BytesPerWord -1;
   const int hdr_offset = oopDesc::mark_offset_in_bytes();
   assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction");
   assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
-  assert(BytesPerWord == 4, "adjust aligned_mask and code");
   Label done;
   int null_check_offset = -1;
 
   verify_oop(obj);
 
   // save object being locked into the BasicObjectLock
-  movl(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj);
+  movptr(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj);
 
   if (UseBiasedLocking) {
     assert(scratch != noreg, "should have scratch register at this point");
@@ -47,16 +46,16 @@
   }
 
   // Load object header
-  movl(hdr, Address(obj, hdr_offset));
+  movptr(hdr, Address(obj, hdr_offset));
   // and mark it as unlocked
-  orl(hdr, markOopDesc::unlocked_value);
+  orptr(hdr, markOopDesc::unlocked_value);
   // save unlocked object header into the displaced header location on the stack
-  movl(Address(disp_hdr, 0), hdr);
+  movptr(Address(disp_hdr, 0), hdr);
   // test if object header is still the same (i.e. unlocked), and if so, store the
   // displaced header address in the object header - if it is not the same, get the
   // object header instead
   if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
-  cmpxchg(disp_hdr, Address(obj, hdr_offset));
+  cmpxchgptr(disp_hdr, Address(obj, hdr_offset));
   // if the object header was the same, we're done
   if (PrintBiasedLockingStatistics) {
     cond_inc32(Assembler::equal,
@@ -76,11 +75,11 @@
   //
   // assuming both the stack pointer and page_size have their least
   // significant 2 bits cleared and page_size is a power of 2
-  subl(hdr, rsp);
-  andl(hdr, aligned_mask - os::vm_page_size());
+  subptr(hdr, rsp);
+  andptr(hdr, aligned_mask - os::vm_page_size());
   // for recursive locking, the result is zero => save it in the displaced header
   // location (NULL in the displaced hdr location indicates recursive locking)
-  movl(Address(disp_hdr, 0), hdr);
+  movptr(Address(disp_hdr, 0), hdr);
   // otherwise we don't care about the result and handle locking via runtime call
   jcc(Assembler::notZero, slow_case);
   // done
@@ -90,35 +89,34 @@
 
 
 void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
-  const int aligned_mask = 3;
+  const int aligned_mask = BytesPerWord -1;
   const int hdr_offset = oopDesc::mark_offset_in_bytes();
   assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction");
   assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
-  assert(BytesPerWord == 4, "adjust aligned_mask and code");
   Label done;
 
   if (UseBiasedLocking) {
     // load object
-    movl(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
+    movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
     biased_locking_exit(obj, hdr, done);
   }
 
   // load displaced header
-  movl(hdr, Address(disp_hdr, 0));
+  movptr(hdr, Address(disp_hdr, 0));
   // if the loaded hdr is NULL we had recursive locking
-  testl(hdr, hdr);
+  testptr(hdr, hdr);
   // if we had recursive locking, we are done
   jcc(Assembler::zero, done);
   if (!UseBiasedLocking) {
     // load object
-    movl(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
+    movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
   }
   verify_oop(obj);
   // test if object header is pointing to the displaced header, and if so, restore
   // the displaced header in the object - if the object header is not pointing to
   // the displaced header, get the object header instead
   if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
-  cmpxchg(hdr, Address(obj, hdr_offset));
+  cmpxchgptr(hdr, Address(obj, hdr_offset));
   // if the object header was not pointing to the displaced header,
   // we do unlocking via runtime call
   jcc(Assembler::notEqual, slow_case);
@@ -141,13 +139,14 @@
   assert_different_registers(obj, klass, len);
   if (UseBiasedLocking && !len->is_valid()) {
     assert_different_registers(obj, klass, len, t1, t2);
-    movl(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
-    movl(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
+    movptr(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+    movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
   } else {
-    movl(Address(obj, oopDesc::mark_offset_in_bytes ()), (int)markOopDesc::prototype());
+    // This assumes that all prototype bits fit in an int32_t
+    movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
   }
 
-  movl(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
+  movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
   if (len->is_valid()) {
     movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
   }
@@ -160,25 +159,27 @@
   assert(obj != len_in_bytes && obj != t1 && t1 != len_in_bytes, "registers must be different");
   assert((hdr_size_in_bytes & (BytesPerWord - 1)) == 0, "header size is not a multiple of BytesPerWord");
   Register index = len_in_bytes;
-  subl(index, hdr_size_in_bytes);
+  // index is positive and ptr sized
+  subptr(index, hdr_size_in_bytes);
   jcc(Assembler::zero, done);
   // initialize topmost word, divide index by 2, check if odd and test if zero
   // note: for the remaining code to work, index must be a multiple of BytesPerWord
 #ifdef ASSERT
   { Label L;
-    testl(index, BytesPerWord - 1);
+    testptr(index, BytesPerWord - 1);
     jcc(Assembler::zero, L);
     stop("index is not a multiple of BytesPerWord");
     bind(L);
   }
 #endif
-  xorl(t1, t1);      // use _zero reg to clear memory (shorter code)
+  xorptr(t1, t1);    // use _zero reg to clear memory (shorter code)
   if (UseIncDec) {
-    shrl(index, 3);  // divide by 8 and set carry flag if bit 2 was set
+    shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
   } else {
-    shrl(index, 2);  // use 2 instructions to avoid partial flag stall
-    shrl(index, 1);
+    shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
+    shrptr(index, 1);
   }
+#ifndef _LP64
   // index could have been not a multiple of 8 (i.e., bit 2 was set)
   { Label even;
     // note: if index was a multiple of 8, than it cannot
@@ -186,16 +187,17 @@
     //       => if it is even, we don't need to check for 0 again
     jcc(Assembler::carryClear, even);
     // clear topmost word (no jump needed if conditional assignment would work here)
-    movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 0*BytesPerWord), t1);
+    movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 0*BytesPerWord), t1);
     // index could be 0 now, need to check again
     jcc(Assembler::zero, done);
     bind(even);
   }
+#endif // !_LP64
   // initialize remaining object fields: rdx is a multiple of 2 now
   { Label loop;
     bind(loop);
-    movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 1*BytesPerWord), t1);
-    movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 2*BytesPerWord), t1);
+    movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 1*BytesPerWord), t1);
+    NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 2*BytesPerWord), t1);)
     decrement(index);
     jcc(Assembler::notZero, loop);
   }
@@ -227,30 +229,30 @@
   const Register index = t2;
   const int threshold = 6 * BytesPerWord;   // approximate break even point for code size (see comments below)
   if (var_size_in_bytes != noreg) {
-    movl(index, var_size_in_bytes);
+    mov(index, var_size_in_bytes);
     initialize_body(obj, index, hdr_size_in_bytes, t1_zero);
   } else if (con_size_in_bytes <= threshold) {
     // use explicit null stores
     // code size = 2 + 3*n bytes (n = number of fields to clear)
-    xorl(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
+    xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
     for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord)
-      movl(Address(obj, i), t1_zero);
+      movptr(Address(obj, i), t1_zero);
   } else if (con_size_in_bytes > hdr_size_in_bytes) {
     // use loop to null out the fields
     // code size = 16 bytes for even n (n = number of fields to clear)
     // initialize last object field first if odd number of fields
-    xorl(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
-    movl(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3);
+    xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
+    movptr(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3);
     // initialize last object field if constant size is odd
     if (((con_size_in_bytes - hdr_size_in_bytes) & 4) != 0)
-      movl(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero);
+      movptr(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero);
     // initialize remaining object fields: rdx is a multiple of 2
     { Label loop;
       bind(loop);
-      movl(Address(obj, index, Address::times_8,
-        hdr_size_in_bytes - (1*BytesPerWord)), t1_zero);
-      movl(Address(obj, index, Address::times_8,
-        hdr_size_in_bytes - (2*BytesPerWord)), t1_zero);
+      movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (1*BytesPerWord)),
+             t1_zero);
+      NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (2*BytesPerWord)),
+             t1_zero);)
       decrement(index);
       jcc(Assembler::notZero, loop);
     }
@@ -269,17 +271,17 @@
   assert_different_registers(obj, len, t1, t2, klass);
 
   // determine alignment mask
-  assert(BytesPerWord == 4, "must be a multiple of 2 for masking code to work");
+  assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
 
   // check for negative or excessive length
-  cmpl(len, max_array_allocation_length);
+  cmpptr(len, (int32_t)max_array_allocation_length);
   jcc(Assembler::above, slow_case);
 
   const Register arr_size = t2; // okay to be the same
   // align object end
-  movl(arr_size, header_size * BytesPerWord + MinObjAlignmentInBytesMask);
-  leal(arr_size, Address(arr_size, len, f));
-  andl(arr_size, ~MinObjAlignmentInBytesMask);
+  movptr(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask);
+  lea(arr_size, Address(arr_size, len, f));
+  andptr(arr_size, ~MinObjAlignmentInBytesMask);
 
   try_allocate(obj, arr_size, 0, t1, t2, slow_case);
 
@@ -305,12 +307,13 @@
   // check against inline cache
   assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
   int start_offset = offset();
-  cmpl(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
+  cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
   // if icache check fails, then jump to runtime routine
   // Note: RECEIVER must still contain the receiver!
   jump_cc(Assembler::notEqual,
           RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
-  assert(offset() - start_offset == 9, "check alignment in emit_method_entry");
+  const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
+  assert(offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
 }
 
 
@@ -364,7 +367,7 @@
 void C1_MacroAssembler::verify_not_null_oop(Register r) {
   if (!VerifyOops) return;
   Label not_null;
-  testl(r, r);
+  testptr(r, r);
   jcc(Assembler::notZero, not_null);
   stop("non-null oop required");
   bind(not_null);
@@ -373,12 +376,12 @@
 
 void C1_MacroAssembler::invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) {
 #ifdef ASSERT
-  if (inv_rax) movl(rax, 0xDEAD);
-  if (inv_rbx) movl(rbx, 0xDEAD);
-  if (inv_rcx) movl(rcx, 0xDEAD);
-  if (inv_rdx) movl(rdx, 0xDEAD);
-  if (inv_rsi) movl(rsi, 0xDEAD);
-  if (inv_rdi) movl(rdi, 0xDEAD);
+  if (inv_rax) movptr(rax, 0xDEAD);
+  if (inv_rbx) movptr(rbx, 0xDEAD);
+  if (inv_rcx) movptr(rcx, 0xDEAD);
+  if (inv_rdx) movptr(rdx, 0xDEAD);
+  if (inv_rsi) movptr(rsi, 0xDEAD);
+  if (inv_rdi) movptr(rdi, 0xDEAD);
 #endif
 }