diff src/cpu/x86/vm/templateTable_x86_64.cpp @ 6725:da91efe96a93

6964458: Reimplement class meta-data storage to use native memory Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author coleenp
date Sat, 01 Sep 2012 13:25:18 -0400
parents 1d7922586cf6
children 75f33eecc1b3
line wrap: on
line diff
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Fri Aug 31 16:39:35 2012 -0700
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Sat Sep 01 13:25:18 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/templateTable.hpp"
 #include "memory/universe.inline.hpp"
-#include "oops/methodDataOop.hpp"
+#include "oops/methodData.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/methodHandles.hpp"
@@ -367,16 +367,12 @@
   }
 
   __ get_cpool_and_tags(rcx, rax);
-  const int base_offset = constantPoolOopDesc::header_size() * wordSize;
-  const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
+  const int base_offset = ConstantPool::header_size() * wordSize;
+  const int tags_offset = Array<u1>::base_offset_in_bytes();
 
   // get type
   __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
 
-  // unresolved string - get the resolved string
-  __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
-  __ jccb(Assembler::equal, call_ldc);
-
   // unresolved class - get the resolved class
   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
   __ jccb(Assembler::equal, call_ldc);
@@ -411,76 +407,46 @@
     Label L;
     __ cmpl(rdx, JVM_CONSTANT_Integer);
     __ jcc(Assembler::equal, L);
-    __ cmpl(rdx, JVM_CONSTANT_String);
-    __ jcc(Assembler::equal, L);
-    __ cmpl(rdx, JVM_CONSTANT_Object);
-    __ jcc(Assembler::equal, L);
+    // String and Object are rewritten to fast_aldc
     __ stop("unexpected tag type in ldc");
     __ bind(L);
   }
 #endif
-  // atos and itos
-  Label isOop;
-  __ cmpl(rdx, JVM_CONSTANT_Integer);
-  __ jcc(Assembler::notEqual, isOop);
+  // itos JVM_CONSTANT_Integer only
   __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset));
   __ push_i(rax);
-  __ jmp(Done);
-
-  __ bind(isOop);
-  __ movptr(rax, Address(rcx, rbx, Address::times_8, base_offset));
-  __ push_ptr(rax);
-
-  if (VerifyOops) {
-    __ verify_oop(rax);
-  }
-
   __ bind(Done);
 }
 
 // Fast path for caching oop constants.
-// %%% We should use this to handle Class and String constants also.
-// %%% It will simplify the ldc/primitive path considerably.
 void TemplateTable::fast_aldc(bool wide) {
   transition(vtos, atos);
 
-  if (!EnableInvokeDynamic) {
-    // We should not encounter this bytecode if !EnableInvokeDynamic.
-    // The verifier will stop it.  However, if we get past the verifier,
-    // this will stop the thread in a reasonable way, without crashing the JVM.
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address,
-                     InterpreterRuntime::throw_IncompatibleClassChangeError));
-    // the call_VM checks for exception, so we should never return here.
-    __ should_not_reach_here();
-    return;
-  }
-
-  const Register cache = rcx;
-  const Register index = rdx;
-
-  resolve_cache_and_index(f12_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
+  Register result = rax;
+  Register tmp = rdx;
+  int index_size = wide ? sizeof(u2) : sizeof(u1);
+
+  Label resolved;
+
+  // We are resolved if the resolved reference cache entry contains a
+  // non-null object (String, MethodType, etc.)
+  assert_different_registers(result, tmp);
+  __ get_cache_index_at_bcp(tmp, 1, index_size);
+  __ load_resolved_reference_at_index(result, tmp);
+  __ testl(result, result);
+  __ jcc(Assembler::notZero, resolved);
+
+  address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
+
+  // first time invocation - must resolve first
+  __ movl(tmp, (int)bytecode());
+  __ call_VM(result, entry, tmp);
+
+  __ bind(resolved);
+
   if (VerifyOops) {
-    __ verify_oop(rax);
+    __ verify_oop(result);
   }
-
-  Label L_done, L_throw_exception;
-  const Register con_klass_temp = rcx;  // same as cache
-  const Register array_klass_temp = rdx;  // same as index
-  __ load_klass(con_klass_temp, rax);
-  __ lea(array_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
-  __ cmpptr(con_klass_temp, Address(array_klass_temp, 0));
-  __ jcc(Assembler::notEqual, L_done);
-  __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
-  __ jcc(Assembler::notEqual, L_throw_exception);
-  __ xorptr(rax, rax);
-  __ jmp(L_done);
-
-  // Load the exception from the system-array which wraps it:
-  __ bind(L_throw_exception);
-  __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
-  __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
-
-  __ bind(L_done);
 }
 
 void TemplateTable::ldc2_w() {
@@ -489,8 +455,8 @@
   __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 
   __ get_cpool_and_tags(rcx, rax);
-  const int base_offset = constantPoolOopDesc::header_size() * wordSize;
-  const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
+  const int base_offset = ConstantPool::header_size() * wordSize;
+  const int tags_offset = Array<u1>::base_offset_in_bytes();
 
   // get type
   __ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
@@ -1596,9 +1562,9 @@
   __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
                                      // holds bumped taken count
 
-  const ByteSize be_offset = methodOopDesc::backedge_counter_offset() +
+  const ByteSize be_offset = Method::backedge_counter_offset() +
                              InvocationCounter::counter_offset();
-  const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() +
+  const ByteSize inv_offset = Method::invocation_counter_offset() +
                               InvocationCounter::counter_offset();
   const int method_offset = frame::interpreter_frame_method_offset * wordSize;
 
@@ -1620,8 +1586,8 @@
 
     // compute return address as bci in rax
     __ lea(rax, at_bcp((is_wide ? 5 : 3) -
-                        in_bytes(constMethodOopDesc::codes_offset())));
-    __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
+                        in_bytes(ConstMethod::codes_offset())));
+    __ subptr(rax, Address(rcx, Method::const_offset()));
     // Adjust the bcp in r13 by the displacement in rdx
     __ addptr(r13, rdx);
     // jsr returns atos that is not an oop
@@ -1656,18 +1622,18 @@
       int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
       if (ProfileInterpreter) {
         // Are we profiling?
-        __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
+        __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
         __ testptr(rbx, rbx);
         __ jccb(Assembler::zero, no_mdo);
         // Increment the MDO backedge counter
-        const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
+        const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
                                            in_bytes(InvocationCounter::counter_offset()));
         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
                                    rax, false, Assembler::zero, &backedge_counter_overflow);
         __ jmp(dispatch);
       }
       __ bind(no_mdo);
-      // Increment backedge counter in methodOop
+      // Increment backedge counter in Method*
       __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
                                  rax, false, Assembler::zero, &backedge_counter_overflow);
     } else {
@@ -1696,7 +1662,7 @@
           __ jcc(Assembler::below, dispatch);
 
           // When ProfileInterpreter is on, the backedge_count comes
-          // from the methodDataOop, which value does not get reset on
+          // from the MethodData*, which value does not get reset on
           // the call to frequency_counter_overflow().  To avoid
           // excessive calls to the overflow routine while the method is
           // being compiled, add a second test to make sure the overflow
@@ -1854,9 +1820,9 @@
   __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp
   __ profile_ret(rbx, rcx);
   __ get_method(rax);
-  __ movptr(r13, Address(rax, methodOopDesc::const_offset()));
+  __ movptr(r13, Address(rax, Method::const_offset()));
   __ lea(r13, Address(r13, rbx, Address::times_1,
-                      constMethodOopDesc::codes_offset()));
+                      ConstMethod::codes_offset()));
   __ dispatch_next(vtos);
 }
 
@@ -1866,8 +1832,8 @@
   __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
   __ profile_ret(rbx, rcx);
   __ get_method(rax);
-  __ movptr(r13, Address(rax, methodOopDesc::const_offset()));
-  __ lea(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
+  __ movptr(r13, Address(rax, Method::const_offset()));
+  __ lea(r13, Address(r13, rbx, Address::times_1, ConstMethod::codes_offset()));
   __ dispatch_next(vtos);
 }
 
@@ -2117,31 +2083,17 @@
 }
 
 void TemplateTable::resolve_cache_and_index(int byte_no,
-                                            Register result,
                                             Register Rcache,
                                             Register index,
                                             size_t index_size) {
   const Register temp = rbx;
-  assert_different_registers(result, Rcache, index, temp);
+  assert_different_registers(Rcache, index, temp);
 
   Label resolved;
-  if (byte_no == f12_oop) {
-    // We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.)
-    // This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because
-    // there is a 1-1 relation between bytecode type and CP entry type.
-    // The caller will also load a methodOop from f2.
-    assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
-    __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
-    __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
-    __ testptr(result, result);
-    __ jcc(Assembler::notEqual, resolved);
-  } else {
     assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
-    assert(result == noreg, "");  //else change code for setting result
     __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
     __ cmpl(temp, (int) bytecode());  // have we resolved this bytecode?
     __ jcc(Assembler::equal, resolved);
-  }
 
   // resolve first time through
   address entry;
@@ -2164,12 +2116,6 @@
   case Bytecodes::_invokedynamic:
     entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
     break;
-  case Bytecodes::_fast_aldc:
-    entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
-    break;
-  case Bytecodes::_fast_aldc_w:
-    entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
-    break;
   default:
     fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
     break;
@@ -2179,8 +2125,6 @@
 
   // Update registers with resolved info
   __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
-  if (result != noreg)
-    __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
   __ bind(resolved);
 }
 
@@ -2193,7 +2137,7 @@
                                               bool is_static = false) {
   assert_different_registers(cache, index, flags, off);
 
-  ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
+  ByteSize cp_base_offset = ConstantPoolCache::base_offset();
   // Field offset
   __ movptr(off, Address(cache, index, Address::times_ptr,
                          in_bytes(cp_base_offset +
@@ -2208,6 +2152,8 @@
     __ movptr(obj, Address(cache, index, Address::times_ptr,
                            in_bytes(cp_base_offset +
                                     ConstantPoolCacheEntry::f1_offset())));
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+    __ movptr(obj, Address(obj, mirror_offset));
   }
 }
 
@@ -2228,38 +2174,27 @@
   // determine constant pool cache field offsets
   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
   const int method_offset = in_bytes(
-    constantPoolCacheOopDesc::base_offset() +
+    ConstantPoolCache::base_offset() +
       ((byte_no == f2_byte)
        ? ConstantPoolCacheEntry::f2_offset()
        : ConstantPoolCacheEntry::f1_offset()));
-  const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
+  const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
                                     ConstantPoolCacheEntry::flags_offset());
   // access constant pool cache fields
-  const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
+  const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
                                     ConstantPoolCacheEntry::f2_offset());
 
-  if (byte_no == f12_oop) {
-    // Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'.
-    // Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset).
-    // See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle.
     size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
-    resolve_cache_and_index(byte_no, itable_index, cache, index, index_size);
-    __ movptr(method, Address(cache, index, Address::times_ptr, index_offset));
-    itable_index = noreg;  // hack to disable load below
-  } else {
-    resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
+  resolve_cache_and_index(byte_no, cache, index, index_size);
     __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
-  }
+
   if (itable_index != noreg) {
-    // pick up itable index from f2 also:
-    assert(byte_no == f1_byte, "already picked up f1");
+    // pick up itable or appendix index from f2 also:
     __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
   }
   __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
 }
 
-
-// The registers cache and index expected to be set before call.
 // Correct values of the cache and index registers are preserved.
 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
                                             bool is_static, bool has_tos) {
@@ -2278,7 +2213,7 @@
     __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
 
     // cache entry pointer
-    __ addptr(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset()));
+    __ addptr(c_rarg2, in_bytes(ConstantPoolCache::base_offset()));
     __ shll(c_rarg3, LogBytesPerWord);
     __ addptr(c_rarg2, c_rarg3);
     if (is_static) {
@@ -2314,7 +2249,7 @@
   const Register flags = rax;
   const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
 
-  resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
+  resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
   jvmti_post_field_access(cache, index, is_static, false);
   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
 
@@ -2453,7 +2388,7 @@
 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
   transition(vtos, vtos);
 
-  ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
+  ByteSize cp_base_offset = ConstantPoolCache::base_offset();
 
   if (JvmtiExport::can_post_field_modification()) {
     // Check to see if a field modification watch has been set before
@@ -2517,7 +2452,7 @@
   const Register flags = rax;
   const Register bc    = c_rarg3;
 
-  resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
+  resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
   jvmti_post_field_mod(cache, index, is_static);
   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
 
@@ -2741,7 +2676,7 @@
 void TemplateTable::fast_storefield(TosState state) {
   transition(state, vtos);
 
-  ByteSize base = constantPoolCacheOopDesc::base_offset();
+  ByteSize base = ConstantPoolCache::base_offset();
 
   jvmti_post_fast_field_mod();
 
@@ -2841,13 +2776,13 @@
   // [jk] not needed currently
   // if (os::is_MP()) {
   //   __ movl(rdx, Address(rcx, rbx, Address::times_8,
-  //                        in_bytes(constantPoolCacheOopDesc::base_offset() +
+  //                        in_bytes(ConstantPoolCache::base_offset() +
   //                                 ConstantPoolCacheEntry::flags_offset())));
   //   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
   //   __ andl(rdx, 0x1);
   // }
   __ movptr(rbx, Address(rcx, rbx, Address::times_8,
-                         in_bytes(constantPoolCacheOopDesc::base_offset() +
+                         in_bytes(ConstantPoolCache::base_offset() +
                                   ConstantPoolCacheEntry::f2_offset())));
 
   // rax: object
@@ -2904,7 +2839,7 @@
   __ get_cache_and_index_at_bcp(rcx, rdx, 2);
   __ movptr(rbx,
             Address(rcx, rdx, Address::times_8,
-                    in_bytes(constantPoolCacheOopDesc::base_offset() +
+                    in_bytes(ConstantPoolCache::base_offset() +
                              ConstantPoolCacheEntry::f2_offset())));
   // make sure exception is reported in correct bcp range (getfield is
   // next instruction)
@@ -2929,7 +2864,7 @@
   // if (os::is_MP()) {
   //   Label notVolatile;
   //   __ movl(rdx, Address(rcx, rdx, Address::times_8,
-  //                        in_bytes(constantPoolCacheOopDesc::base_offset() +
+  //                        in_bytes(ConstantPoolCache::base_offset() +
   //                                 ConstantPoolCacheEntry::flags_offset())));
   //   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
   //   __ testl(rdx, 0x1);
@@ -2984,12 +2919,15 @@
   // maybe push appendix to arguments (just before return address)
   if (is_invokedynamic || is_invokehandle) {
     Label L_no_push;
-    __ verify_oop(index);
     __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
-    __ jccb(Assembler::zero, L_no_push);
+    __ jcc(Assembler::zero, L_no_push);
     // Push the appendix as a trailing parameter.
     // This must be done before we get the receiver,
     // since the parameter_size includes it.
+    __ push(rbx);
+    __ mov(rbx, index);
+    __ load_resolved_reference_at_index(index, rbx);
+    __ pop(rbx);
     __ push(index);  // push appendix (MethodType, CallSite, etc.)
     __ bind(L_no_push);
   }
@@ -3052,11 +2990,10 @@
 
   const Register method = index;  // method must be rbx
   assert(method == rbx,
-         "methodOop must be rbx for interpreter calling convention");
+         "Method* must be rbx for interpreter calling convention");
 
   // do the call - the index is actually the method to call
-  // that is, f2 is a vtable index if !is_vfinal, else f2 is a methodOop
-  __ verify_oop(method);
+  // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
 
   // It's final, need a null check here!
   __ null_check(recv);
@@ -3071,12 +3008,11 @@
   // get receiver klass
   __ null_check(recv, oopDesc::klass_offset_in_bytes());
   __ load_klass(rax, recv);
-  __ verify_oop(rax);
 
   // profile this call
   __ profile_virtual_call(rax, r14, rdx);
 
-  // get target methodOop & entry point
+  // get target Method* & entry point
   __ lookup_virtual_method(rax, index, method);
   __ jump_from_interpreted(method, rdx);
 }
@@ -3101,12 +3037,11 @@
 void TemplateTable::invokespecial(int byte_no) {
   transition(vtos, vtos);
   assert(byte_no == f1_byte, "use this argument");
-  prepare_invoke(byte_no, rbx, noreg,  // get f1 methodOop
+  prepare_invoke(byte_no, rbx, noreg,  // get f1 Method*
                  rcx);  // get receiver also for null check
   __ verify_oop(rcx);
   __ null_check(rcx);
   // do the call
-  __ verify_oop(rbx);
   __ profile_call(rax);
   __ jump_from_interpreted(rbx, rax);
 }
@@ -3115,9 +3050,8 @@
 void TemplateTable::invokestatic(int byte_no) {
   transition(vtos, vtos);
   assert(byte_no == f1_byte, "use this argument");
-  prepare_invoke(byte_no, rbx);  // get f1 methodOop
+  prepare_invoke(byte_no, rbx);  // get f1 Method*
   // do the call
-  __ verify_oop(rbx);
   __ profile_call(rax);
   __ jump_from_interpreted(rbx, rax);
 }
@@ -3131,7 +3065,7 @@
 void TemplateTable::invokeinterface(int byte_no) {
   transition(vtos, vtos);
   assert(byte_no == f1_byte, "use this argument");
-  prepare_invoke(byte_no, rax, rbx,  // get f1 klassOop, f2 itable index
+  prepare_invoke(byte_no, rax, rbx,  // get f1 Klass*, f2 itable index
                  rcx, rdx); // recv, flags
 
   // rax: interface klass (from f1)
@@ -3155,7 +3089,6 @@
   __ restore_locals();  // restore r14
   __ null_check(rcx, oopDesc::klass_offset_in_bytes());
   __ load_klass(rdx, rcx);
-  __ verify_oop(rdx);
 
   // profile this call
   __ profile_virtual_call(rdx, r13, r14);
@@ -3168,7 +3101,7 @@
                              rbx, r13,
                              no_such_interface);
 
-  // rbx: methodOop to call
+  // rbx: Method* to call
   // rcx: receiver
   // Check for abstract method error
   // Note: This should be done more efficiently via a throw_abstract_method_error
@@ -3179,7 +3112,7 @@
 
   // do the call
   // rcx: receiver
-  // rbx,: methodOop
+  // rbx,: Method*
   __ jump_from_interpreted(rbx, rdx);
   __ should_not_reach_here();
 
@@ -3210,7 +3143,7 @@
 
 void TemplateTable::invokehandle(int byte_no) {
   transition(vtos, vtos);
-  assert(byte_no == f12_oop, "use this argument");
+  assert(byte_no == f1_byte, "use this argument");
   const Register rbx_method = rbx;  // f2
   const Register rax_mtype  = rax;  // f1
   const Register rcx_recv   = rcx;
@@ -3223,7 +3156,7 @@
   }
 
   prepare_invoke(byte_no,
-                 rbx_method, rax_mtype,  // get f2 methodOop, f1 MethodType
+                 rbx_method, rax_mtype,  // get f2 Method*, f1 MethodType
                  rcx_recv);
   __ verify_oop(rbx_method);
   __ verify_oop(rcx_recv);
@@ -3240,7 +3173,7 @@
 
 void TemplateTable::invokedynamic(int byte_no) {
   transition(vtos, vtos);
-  assert(byte_no == f12_oop, "use this argument");
+  assert(byte_no == f1_byte, "use this argument");
 
   if (!EnableInvokeDynamic) {
     // We should not encounter this bytecode if !EnableInvokeDynamic.
@@ -3258,7 +3191,7 @@
 
   prepare_invoke(byte_no, rbx_method, rax_callsite);
 
-  // rax: CallSite object (from f1)
+  // rax: CallSite object (from cpool->resolved_references[])
   // rbx: MH.linkToCallSite method (from f2)
 
   // Note:  rax_callsite is already pushed by prepare_invoke
@@ -3287,25 +3220,25 @@
 
   __ get_cpool_and_tags(rsi, rax);
   // Make sure the class we're about to instantiate has been resolved.
-  // This is done before loading instanceKlass to be consistent with the order
-  // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
-  const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
+  // This is done before loading InstanceKlass to be consistent with the order
+  // how Constant Pool is updated (see ConstantPool::klass_at_put)
+  const int tags_offset = Array<u1>::base_offset_in_bytes();
   __ cmpb(Address(rax, rdx, Address::times_1, tags_offset),
           JVM_CONSTANT_Class);
   __ jcc(Assembler::notEqual, slow_case);
 
-  // get instanceKlass
+  // get InstanceKlass
   __ movptr(rsi, Address(rsi, rdx,
-            Address::times_8, sizeof(constantPoolOopDesc)));
+            Address::times_8, sizeof(ConstantPool)));
 
   // make sure klass is initialized & doesn't have finalizer
   // make sure klass is fully initialized
   __ cmpb(Address(rsi,
-                  instanceKlass::init_state_offset()),
-          instanceKlass::fully_initialized);
+                  InstanceKlass::init_state_offset()),
+          InstanceKlass::fully_initialized);
   __ jcc(Assembler::notEqual, slow_case);
 
-  // get instance_size in instanceKlass (scaled to a count of bytes)
+  // get instance_size in InstanceKlass (scaled to a count of bytes)
   __ movl(rdx,
           Address(rsi,
                   Klass::layout_helper_offset()));
@@ -3470,11 +3403,13 @@
   // See if bytecode has already been quicked
   __ cmpb(Address(rdx, rbx,
                   Address::times_1,
-                  typeArrayOopDesc::header_size(T_BYTE) * wordSize),
+                  Array<u1>::base_offset_in_bytes()),
           JVM_CONSTANT_Class);
   __ jcc(Assembler::equal, quicked);
   __ push(atos); // save receiver for result, and for GC
-  call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
+  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
+  // vm_result_2 has metadata result
+  __ get_vm_result_2(rax, r15_thread);
   __ pop_ptr(rdx); // restore receiver
   __ jmpb(resolved);
 
@@ -3482,7 +3417,7 @@
   __ bind(quicked);
   __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
   __ movptr(rax, Address(rcx, rbx,
-                       Address::times_8, sizeof(constantPoolOopDesc)));
+                       Address::times_8, sizeof(ConstantPool)));
 
   __ bind(resolved);
   __ load_klass(rbx, rdx);
@@ -3523,12 +3458,14 @@
   // See if bytecode has already been quicked
   __ cmpb(Address(rdx, rbx,
                   Address::times_1,
-                  typeArrayOopDesc::header_size(T_BYTE) * wordSize),
+                  Array<u1>::base_offset_in_bytes()),
           JVM_CONSTANT_Class);
   __ jcc(Assembler::equal, quicked);
 
   __ push(atos); // save receiver for result, and for GC
-  call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
+  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
+  // vm_result_2 has metadata result
+  __ get_vm_result_2(rax, r15_thread);
   __ pop_ptr(rdx); // restore receiver
   __ verify_oop(rdx);
   __ load_klass(rdx, rdx);
@@ -3538,7 +3475,7 @@
   __ bind(quicked);
   __ load_klass(rdx, rax);
   __ movptr(rax, Address(rcx, rbx,
-                         Address::times_8, sizeof(constantPoolOopDesc)));
+                         Address::times_8, sizeof(ConstantPool)));
 
   __ bind(resolved);