diff src/cpu/x86/vm/interp_masm_x86_64.cpp @ 6725:da91efe96a93

6964458: Reimplement class meta-data storage to use native memory Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author coleenp
date Sat, 01 Sep 2012 13:25:18 -0400
parents 1d7922586cf6
children e522a00b91aa f34d701e952e
line wrap: on
line diff
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp	Fri Aug 31 16:39:35 2012 -0700
+++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp	Sat Sep 01 13:25:18 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,8 +28,8 @@
 #include "interpreter/interpreterRuntime.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/markOop.hpp"
-#include "oops/methodDataOop.hpp"
-#include "oops/methodOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "prims/jvmtiRedefineClassesTrace.hpp"
 #include "prims/jvmtiThreadState.hpp"
@@ -221,10 +221,9 @@
     // Check if the secondary index definition is still ~x, otherwise
     // we have to change the following assembler code to calculate the
     // plain index.
-    assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
+    assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
     notl(index);  // convert to plain index
   } else if (index_size == sizeof(u1)) {
-    assert(EnableInvokeDynamic, "tiny index used only for JSR 292");
     load_unsigned_byte(index, Address(r13, bcp_offset));
   } else {
     ShouldNotReachHere();
@@ -241,6 +240,7 @@
   movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
   // convert from field index to ConstantPoolCacheEntry index
+  assert(exact_log2(in_words(ConstantPoolCacheEntry::size())) == 2, "else change next line");
   shll(index, 2);
 }
 
@@ -254,7 +254,7 @@
   get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
   // We use a 32-bit load here since the layout of 64-bit words on
   // little-endian machines allow us that.
-  movl(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
+  movl(bytecode, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
   const int shift_count = (1 + byte_no) * BitsPerByte;
   assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
          (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),
@@ -274,13 +274,32 @@
   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
   // convert from field index to ConstantPoolCacheEntry index
   // and from word offset to byte offset
+  assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
   shll(tmp, 2 + LogBytesPerWord);
   movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
   // skip past the header
-  addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
+  addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
   addptr(cache, tmp);  // construct pointer to cache entry
 }
 
+// Load object from cpool->resolved_references(index)
+void InterpreterMacroAssembler::load_resolved_reference_at_index(
+                                           Register result, Register index) {
+  assert_different_registers(result, index);
+  // convert from field index to resolved_references() index and from
+  // word index to byte offset. Since this is a java object, it can be compressed
+  Register tmp = index;  // reuse
+  shll(tmp, LogBytesPerHeapOop);
+
+  get_constant_pool(result);
+  // load pointer for resolved_references[] objArray
+  movptr(result, Address(result, ConstantPool::resolved_references_offset_in_bytes()));
+  // JNIHandles::resolve(obj);
+  movptr(result, Address(result, 0));
+  // Add in the index
+  addptr(result, tmp);
+  load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+}
 
 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 // subtype of super_klass.
@@ -426,11 +445,11 @@
     // Is a cmpl faster?
     cmpb(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0);
     jccb(Assembler::zero, run_compiled_code);
-    jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
+    jmp(Address(method, Method::interpreter_entry_offset()));
     bind(run_compiled_code);
   }
 
-  jmp(Address(method, methodOopDesc::from_interpreted_offset()));
+  jmp(Address(method, Method::from_interpreted_offset()));
 
 }
 
@@ -526,7 +545,7 @@
 
  // get method access flags
   movptr(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
-  movl(rcx, Address(rbx, methodOopDesc::access_flags_offset()));
+  movl(rcx, Address(rbx, Method::access_flags_offset()));
   testl(rcx, JVM_ACC_SYNCHRONIZED);
   jcc(Assembler::zero, unlocked);
 
@@ -834,7 +853,7 @@
 
   get_method(rbx);
   // Test MDO to avoid the call if it is NULL.
-  movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
+  movptr(rax, Address(rbx, in_bytes(Method::method_data_offset())));
   testptr(rax, rax);
   jcc(Assembler::zero, set_mdp);
   // rbx: method
@@ -842,8 +861,8 @@
   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13);
   // rax: mdi
   // mdo is guaranteed to be non-zero here, we checked for it before the call.
-  movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
-  addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
+  movptr(rbx, Address(rbx, in_bytes(Method::method_data_offset())));
+  addptr(rbx, in_bytes(MethodData::data_offset()));
   addptr(rax, rbx);
   bind(set_mdp);
   movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
@@ -866,8 +885,8 @@
   // consistent with the bcp.  The converse is highly probable also.
   load_unsigned_short(c_rarg2,
                       Address(c_rarg3, in_bytes(DataLayout::bci_offset())));
-  addptr(c_rarg2, Address(rbx, methodOopDesc::const_offset()));
-  lea(c_rarg2, Address(c_rarg2, constMethodOopDesc::codes_offset()));
+  addptr(c_rarg2, Address(rbx, Method::const_offset()));
+  lea(c_rarg2, Address(c_rarg2, ConstMethod::codes_offset()));
   cmpptr(c_rarg2, r13);
   jcc(Assembler::equal, verify_continue);
   // rbx: method