changeset 1523:8bfe9058ca46 hs19-b01

Merge
author jcoomes
date Thu, 13 May 2010 13:05:47 -0700
parents 67d74f7a15d9 (current diff) ef1a1d051971 (diff)
children 093432aa7573 cc387008223e
files src/share/vm/runtime/globals.hpp
diffstat 105 files changed, 2680 insertions(+), 2397 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed May 12 10:28:13 2010 -0700
+++ b/.hgtags	Thu May 13 13:05:47 2010 -0700
@@ -89,3 +89,8 @@
 4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b jdk7-b88
 15836273ac2494f36ef62088bc1cb6f3f011f565 jdk7-b89
 4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b hs18-b02
+605c9707a766ff518cd841fc04f9bb4b36a3a30b jdk7-b90
+e0a1a502e402dbe7bf2d9102b4084a7e79a99a9b jdk7-b91
+25f53b53aaa3eb8b2d5391a1e8de9a76ae1dd8a2 hs18-b03
+3221d1887d30341bedfdac1dbf365ea41beff20f jdk7-b92
+310cdbc355355a13aa53c002b6bde4a8c5ba67ff hs18-b04
--- a/make/hotspot_distro	Wed May 12 10:28:13 2010 -0700
+++ b/make/hotspot_distro	Thu May 13 13:05:47 2010 -0700
@@ -28,5 +28,5 @@
 
 # Don't put quotes (fail windows build).
 HOTSPOT_VM_DISTRO=Java HotSpot(TM)
-COMPANY_NAME=Sun Microsystems, Inc.
+COMPANY_NAME=Oracle Corporation
 PRODUCT_NAME=Java(TM) Platform SE
--- a/make/hotspot_version	Wed May 12 10:28:13 2010 -0700
+++ b/make/hotspot_version	Thu May 13 13:05:47 2010 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=18
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=03
+HS_BUILD_NUMBER=04
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/make/jprt.properties	Wed May 12 10:28:13 2010 -0700
+++ b/make/jprt.properties	Thu May 13 13:05:47 2010 -0700
@@ -51,6 +51,8 @@
 jprt.my.solaris.sparc.jdk6perf=solaris_sparc_5.8
 jprt.my.solaris.sparc.jdk6u10=solaris_sparc_5.8
 jprt.my.solaris.sparc.jdk6u14=solaris_sparc_5.8
+jprt.my.solaris.sparc.jdk6u18=solaris_sparc_5.8
+jprt.my.solaris.sparc.jdk6u20=solaris_sparc_5.8
 jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
 
 jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
@@ -58,6 +60,8 @@
 jprt.my.solaris.sparcv9.jdk6perf=solaris_sparcv9_5.8
 jprt.my.solaris.sparcv9.jdk6u10=solaris_sparcv9_5.8
 jprt.my.solaris.sparcv9.jdk6u14=solaris_sparcv9_5.8
+jprt.my.solaris.sparcv9.jdk6u18=solaris_sparcv9_5.8
+jprt.my.solaris.sparcv9.jdk6u20=solaris_sparcv9_5.8
 jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
 
 jprt.my.solaris.i586.jdk7=solaris_i586_5.10
@@ -65,6 +69,8 @@
 jprt.my.solaris.i586.jdk6perf=solaris_i586_5.8
 jprt.my.solaris.i586.jdk6u10=solaris_i586_5.8
 jprt.my.solaris.i586.jdk6u14=solaris_i586_5.8
+jprt.my.solaris.i586.jdk6u18=solaris_i586_5.8
+jprt.my.solaris.i586.jdk6u20=solaris_i586_5.8
 jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
 
 jprt.my.solaris.x64.jdk7=solaris_x64_5.10
@@ -72,6 +78,8 @@
 jprt.my.solaris.x64.jdk6perf=solaris_x64_5.10
 jprt.my.solaris.x64.jdk6u10=solaris_x64_5.10
 jprt.my.solaris.x64.jdk6u14=solaris_x64_5.10
+jprt.my.solaris.x64.jdk6u18=solaris_x64_5.10
+jprt.my.solaris.x64.jdk6u20=solaris_x64_5.10
 jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
 
 jprt.my.linux.i586.jdk7=linux_i586_2.6
@@ -79,6 +87,8 @@
 jprt.my.linux.i586.jdk6perf=linux_i586_2.4
 jprt.my.linux.i586.jdk6u10=linux_i586_2.4
 jprt.my.linux.i586.jdk6u14=linux_i586_2.4
+jprt.my.linux.i586.jdk6u18=linux_i586_2.4
+jprt.my.linux.i586.jdk6u20=linux_i586_2.4
 jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
 
 jprt.my.linux.x64.jdk7=linux_x64_2.6
@@ -86,6 +96,8 @@
 jprt.my.linux.x64.jdk6perf=linux_x64_2.4
 jprt.my.linux.x64.jdk6u10=linux_x64_2.4
 jprt.my.linux.x64.jdk6u14=linux_x64_2.4
+jprt.my.linux.x64.jdk6u18=linux_x64_2.4
+jprt.my.linux.x64.jdk6u20=linux_x64_2.4
 jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
 
 jprt.my.windows.i586.jdk7=windows_i586_5.0
@@ -93,6 +105,8 @@
 jprt.my.windows.i586.jdk6perf=windows_i586_5.0
 jprt.my.windows.i586.jdk6u10=windows_i586_5.0
 jprt.my.windows.i586.jdk6u14=windows_i586_5.0
+jprt.my.windows.i586.jdk6u18=windows_i586_5.0
+jprt.my.windows.i586.jdk6u20=windows_i586_5.0
 jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
 
 jprt.my.windows.x64.jdk7=windows_x64_5.2
@@ -100,6 +114,8 @@
 jprt.my.windows.x64.jdk6perf=windows_x64_5.2
 jprt.my.windows.x64.jdk6u10=windows_x64_5.2
 jprt.my.windows.x64.jdk6u14=windows_x64_5.2
+jprt.my.windows.x64.jdk6u18=windows_x64_5.2
+jprt.my.windows.x64.jdk6u20=windows_x64_5.2
 jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
 
 # Standard list of jprt build targets for this source tree
--- a/src/cpu/sparc/vm/assembler_sparc.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.cpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2333,6 +2333,18 @@
 #endif
 
 
+void MacroAssembler::load_sized_value(Address src, Register dst,
+                                      size_t size_in_bytes, bool is_signed) {
+  switch (size_in_bytes) {
+  case  8: ldx(src, dst); break;
+  case  4: ld( src, dst); break;
+  case  2: is_signed ? ldsh(src, dst) : lduh(src, dst); break;
+  case  1: is_signed ? ldsb(src, dst) : ldub(src, dst); break;
+  default: ShouldNotReachHere();
+  }
+}
+
+
 void MacroAssembler::float_cmp( bool is_float, int unordered_result,
                                 FloatRegister Fa, FloatRegister Fb,
                                 Register Rresult) {
@@ -2625,40 +2637,103 @@
 }
 
 
-void MacroAssembler::regcon_inc_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) {
-  assert(dest.register_or_noreg() != G0, "lost side effect");
-  if ((src.is_constant() && src.as_constant() == 0) ||
-      (src.is_register() && src.as_register() == G0)) {
-    // do nothing
-  } else if (dest.is_register()) {
-    add(dest.as_register(), ensure_simm13_or_reg(src, temp), dest.as_register());
-  } else if (src.is_constant()) {
-    intptr_t res = dest.as_constant() + src.as_constant();
-    dest = RegisterOrConstant(res); // side effect seen by caller
+RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
+  assert(d.register_or_noreg() != G0, "lost side effect");
+  if ((s2.is_constant() && s2.as_constant() == 0) ||
+      (s2.is_register() && s2.as_register() == G0)) {
+    // Do nothing, just move value.
+    if (s1.is_register()) {
+      if (d.is_constant())  d = temp;
+      mov(s1.as_register(), d.as_register());
+      return d;
+    } else {
+      return s1;
+    }
+  }
+
+  if (s1.is_register()) {
+    assert_different_registers(s1.as_register(), temp);
+    if (d.is_constant())  d = temp;
+    andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
+    return d;
   } else {
-    assert(temp != noreg, "cannot handle constant += register");
-    add(src.as_register(), ensure_simm13_or_reg(dest, temp), temp);
-    dest = RegisterOrConstant(temp); // side effect seen by caller
+    if (s2.is_register()) {
+      assert_different_registers(s2.as_register(), temp);
+      if (d.is_constant())  d = temp;
+      set(s1.as_constant(), temp);
+      andn(temp, s2.as_register(), d.as_register());
+      return d;
+    } else {
+      intptr_t res = s1.as_constant() & ~s2.as_constant();
+      return res;
+    }
   }
 }
 
-void MacroAssembler::regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) {
-  assert(dest.register_or_noreg() != G0, "lost side effect");
-  if (!is_simm13(src.constant_or_zero()))
-    src = (src.as_constant() & 0xFF);
-  if ((src.is_constant() && src.as_constant() == 0) ||
-      (src.is_register() && src.as_register() == G0)) {
-    // do nothing
-  } else if (dest.is_register()) {
-    sll_ptr(dest.as_register(), src, dest.as_register());
-  } else if (src.is_constant()) {
-    intptr_t res = dest.as_constant() << src.as_constant();
-    dest = RegisterOrConstant(res); // side effect seen by caller
+RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
+  assert(d.register_or_noreg() != G0, "lost side effect");
+  if ((s2.is_constant() && s2.as_constant() == 0) ||
+      (s2.is_register() && s2.as_register() == G0)) {
+    // Do nothing, just move value.
+    if (s1.is_register()) {
+      if (d.is_constant())  d = temp;
+      mov(s1.as_register(), d.as_register());
+      return d;
+    } else {
+      return s1;
+    }
+  }
+
+  if (s1.is_register()) {
+    assert_different_registers(s1.as_register(), temp);
+    if (d.is_constant())  d = temp;
+    add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
+    return d;
   } else {
-    assert(temp != noreg, "cannot handle constant <<= register");
-    set(dest.as_constant(), temp);
-    sll_ptr(temp, src, temp);
-    dest = RegisterOrConstant(temp); // side effect seen by caller
+    if (s2.is_register()) {
+      assert_different_registers(s2.as_register(), temp);
+      if (d.is_constant())  d = temp;
+      add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register());
+      return d;
+    } else {
+      intptr_t res = s1.as_constant() + s2.as_constant();
+      return res;
+    }
+  }
+}
+
+RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
+  assert(d.register_or_noreg() != G0, "lost side effect");
+  if (!is_simm13(s2.constant_or_zero()))
+    s2 = (s2.as_constant() & 0xFF);
+  if ((s2.is_constant() && s2.as_constant() == 0) ||
+      (s2.is_register() && s2.as_register() == G0)) {
+    // Do nothing, just move value.
+    if (s1.is_register()) {
+      if (d.is_constant())  d = temp;
+      mov(s1.as_register(), d.as_register());
+      return d;
+    } else {
+      return s1;
+    }
+  }
+
+  if (s1.is_register()) {
+    assert_different_registers(s1.as_register(), temp);
+    if (d.is_constant())  d = temp;
+    sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
+    return d;
+  } else {
+    if (s2.is_register()) {
+      assert_different_registers(s2.as_register(), temp);
+      if (d.is_constant())  d = temp;
+      set(s1.as_constant(), temp);
+      sll_ptr(temp, s2.as_register(), d.as_register());
+      return d;
+    } else {
+      intptr_t res = s1.as_constant() << s2.as_constant();
+      return res;
+    }
   }
 }
 
@@ -2708,8 +2783,8 @@
 
   // Adjust recv_klass by scaled itable_index, so we can free itable_index.
   RegisterOrConstant itable_offset = itable_index;
-  regcon_sll_ptr(itable_offset, exact_log2(itableMethodEntry::size() * wordSize));
-  regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes());
+  itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
+  itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
   add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
 
   // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
@@ -2805,7 +2880,7 @@
 
   assert_different_registers(sub_klass, super_klass, temp_reg);
   if (super_check_offset.is_register()) {
-    assert_different_registers(sub_klass, super_klass,
+    assert_different_registers(sub_klass, super_klass, temp_reg,
                                super_check_offset.as_register());
   } else if (must_load_sco) {
     assert(temp2_reg != noreg, "supply either a temp or a register offset");
@@ -2855,6 +2930,8 @@
     // The super check offset is always positive...
     lduw(super_klass, sco_offset, temp2_reg);
     super_check_offset = RegisterOrConstant(temp2_reg);
+    // super_check_offset is register.
+    assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register());
   }
   ld_ptr(sub_klass, super_check_offset, temp_reg);
   cmp(super_klass, temp_reg);
@@ -3014,11 +3091,10 @@
 }
 
 
-
-
 void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
                                               Register temp_reg,
                                               Label& wrong_method_type) {
+  if (UseCompressedOops)  unimplemented("coop");  // field accesses must decode
   assert_different_registers(mtype_reg, mh_reg, temp_reg);
   // compare method type against that of the receiver
   RegisterOrConstant mhtype_offset = delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg);
@@ -3029,10 +3105,33 @@
 }
 
 
-void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg) {
+// A method handle has a "vmslots" field which gives the size of its
+// argument list in JVM stack slots.  This field is either located directly
+// in every method handle, or else is indirectly accessed through the
+// method handle's MethodType.  This macro hides the distinction.
+void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
+                                                Register temp_reg) {
+  assert_different_registers(vmslots_reg, mh_reg, temp_reg);
+  if (UseCompressedOops)  unimplemented("coop");  // field accesses must decode
+  // load mh.type.form.vmslots
+  if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
+    // hoist vmslots into every mh to avoid dependent load chain
+    ld(    Address(mh_reg,    delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)),   vmslots_reg);
+  } else {
+    Register temp2_reg = vmslots_reg;
+    ld_ptr(Address(mh_reg,    delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)),      temp2_reg);
+    ld_ptr(Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)),        temp2_reg);
+    ld(    Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
+  }
+}
+
+
+void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) {
   assert(mh_reg == G3_method_handle, "caller must put MH object in G3");
   assert_different_registers(mh_reg, temp_reg);
 
+  if (UseCompressedOops)  unimplemented("coop");  // field accesses must decode
+
   // pick out the interpreted side of the handler
   ld_ptr(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg);
 
@@ -3043,17 +3142,18 @@
   // for the various stubs which take control at this point,
   // see MethodHandles::generate_method_handle_stub
 
-  // (Can any caller use this delay slot?  If so, add an option for supression.)
-  delayed()->nop();
+  // Some callers can fill the delay slot.
+  if (emit_delayed_nop) {
+    delayed()->nop();
+  }
 }
 
+
 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
                                                    int extra_slot_offset) {
   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
-  int stackElementSize = Interpreter::stackElementWords() * wordSize;
-  int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
-  int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
-  assert(offset1 - offset == stackElementSize, "correct arithmetic");
+  int stackElementSize = Interpreter::stackElementSize;
+  int offset = extra_slot_offset * stackElementSize;
   if (arg_slot.is_constant()) {
     offset += arg_slot.as_constant() * stackElementSize;
     return offset;
@@ -3067,6 +3167,11 @@
 }
 
 
+Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
+                                         int extra_slot_offset) {
+  return Address(Gargs, argument_offset(arg_slot, extra_slot_offset));
+}
+
 
 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
                                           Register temp_reg,
--- a/src/cpu/sparc/vm/assembler_sparc.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1380,24 +1380,25 @@
 
   // pp 181
 
-  void and3(     Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3               ) | rs1(s1) | rs2(s2) ); }
-  void and3(     Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3               ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+  void and3(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3              ) | rs1(s1) | rs2(s2) ); }
+  void and3(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3              ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   void andcc(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3  | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
   void andcc(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3  | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   void andn(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3             ) | rs1(s1) | rs2(s2) ); }
   void andn(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3             ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+  void andn(    Register s1, RegisterOrConstant s2, Register d);
   void andncc(  Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
   void andncc(  Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-  void or3(      Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3               ) | rs1(s1) | rs2(s2) ); }
-  void or3(      Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3               ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+  void or3(     Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3               ) | rs1(s1) | rs2(s2) ); }
+  void or3(     Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3               ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   void orcc(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3   | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
   void orcc(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3   | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   void orn(     Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | rs2(s2) ); }
   void orn(     Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   void orncc(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3  | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
   void orncc(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3  | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-  void xor3(     Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3              ) | rs1(s1) | rs2(s2) ); }
-  void xor3(     Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3              ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+  void xor3(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3              ) | rs1(s1) | rs2(s2) ); }
+  void xor3(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3              ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   void xorcc(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3  | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
   void xorcc(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3  | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   void xnor(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3             ) | rs1(s1) | rs2(s2) ); }
@@ -2026,8 +2027,8 @@
   inline void st_ptr(Register d, Register s1, ByteSize simm13a);
 #endif
 
-  // ld_long will perform ld for 32 bit VM's and ldx for 64 bit VM's
-  // st_long will perform st for 32 bit VM's and stx for 64 bit VM's
+  // ld_long will perform ldd for 32 bit VM's and ldx for 64 bit VM's
+  // st_long will perform std for 32 bit VM's and stx for 64 bit VM's
   inline void ld_long(Register s1, Register s2, Register d);
   inline void ld_long(Register s1, int simm13a, Register d);
   inline void ld_long(Register s1, RegisterOrConstant s2, Register d);
@@ -2038,23 +2039,19 @@
   inline void st_long(Register d, const Address& a, int offset = 0);
 
   // Helpers for address formation.
-  // They update the dest in place, whether it is a register or constant.
-  // They emit no code at all if src is a constant zero.
-  // If dest is a constant and src is a register, the temp argument
-  // is required, and becomes the result.
-  // If dest is a register and src is a non-simm13 constant,
-  // the temp argument is required, and is used to materialize the constant.
-  void regcon_inc_ptr( RegisterOrConstant& dest, RegisterOrConstant src,
-                       Register temp = noreg );
-  void regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src,
-                       Register temp = noreg );
-
-  RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant roc, Register Rtemp) {
-    guarantee(Rtemp != noreg, "constant offset overflow");
-    if (is_simm13(roc.constant_or_zero()))
-      return roc;               // register or short constant
-    set(roc.as_constant(), Rtemp);
-    return RegisterOrConstant(Rtemp);
+  // - They emit only a move if s2 is a constant zero.
+  // - If dest is a constant and either s1 or s2 is a register, the temp argument is required and becomes the result.
+  // - If dest is a register and either s1 or s2 is a non-simm13 constant, the temp argument is required and used to materialize the constant.
+  RegisterOrConstant regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
+  RegisterOrConstant regcon_inc_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
+  RegisterOrConstant regcon_sll_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
+
+  RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant src, Register temp) {
+    if (is_simm13(src.constant_or_zero()))
+      return src;               // register or short constant
+    guarantee(temp != noreg, "constant offset overflow");
+    set(src.as_constant(), temp);
+    return temp;
   }
 
   // --------------------------------------------------
@@ -2303,6 +2300,9 @@
   void lcmp( Register Ra, Register Rb, Register Rresult);
 #endif
 
+  // Loading values by size and signed-ness
+  void load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed);
+
   void float_cmp( bool is_float, int unordered_result,
                   FloatRegister Fa, FloatRegister Fb,
                   Register Rresult);
@@ -2421,12 +2421,16 @@
   void check_method_handle_type(Register mtype_reg, Register mh_reg,
                                 Register temp_reg,
                                 Label& wrong_method_type);
-  void jump_to_method_handle_entry(Register mh_reg, Register temp_reg);
+  void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
+                                  Register temp_reg);
+  void jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop = true);
   // offset relative to Gargs of argument at tos[arg_slot].
   // (arg_slot == 0 means the last argument, not the first).
   RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
                                      int extra_slot_offset = 0);
-
+  // Address of Gargs and argument_offset.
+  Address            argument_address(RegisterOrConstant arg_slot,
+                                      int extra_slot_offset = 0);
 
   // Stack overflow checking
 
--- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Thu May 13 13:05:47 2010 -0700
@@ -206,12 +206,17 @@
 inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
 
 // form effective addresses this way:
-inline void Assembler::add(   Register s1, RegisterOrConstant s2, Register d, int offset) {
-  if (s2.is_register())  add(s1, s2.as_register(), d);
+inline void Assembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
+  if (s2.is_register())  add(s1, s2.as_register(),          d);
   else                 { add(s1, s2.as_constant() + offset, d); offset = 0; }
   if (offset != 0)       add(d,  offset,                    d);
 }
 
+inline void Assembler::andn(Register s1, RegisterOrConstant s2, Register d) {
+  if (s2.is_register())  andn(s1, s2.as_register(), d);
+  else                   andn(s1, s2.as_constant(), d);
+}
+
 inline void Assembler::ldstub(  Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::ldstub(  Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
--- a/src/cpu/sparc/vm/cppInterpreter_sparc.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/sparc/vm/cppInterpreter_sparc.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
   // fail with a guarantee ("not enough space for interpreter generation");
   // if too small.
   // Run with +PrintInterpreter to get the VM to print out the size.
-  // Max size with JVMTI and TaggedStackInterpreter
+  // Max size with JVMTI
 
   // QQQ this is proably way too large for c++ interpreter
 
--- a/src/cpu/sparc/vm/frame_sparc.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/sparc/vm/frame_sparc.cpp	Thu May 13 13:05:47 2010 -0700
@@ -620,7 +620,7 @@
 
   // stack frames shouldn't be much larger than max_stack elements
 
-  if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize()) {
+  if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
     return false;
   }
 
--- a/src/cpu/sparc/vm/interp_masm_sparc.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp	Thu May 13 13:05:47 2010 -0700
@@ -50,7 +50,6 @@
   // Any changes should also be applied to CodeEmitter::emit_osr_entry().
   assert_different_registers(args_size, locals_size);
   // max_locals*2 for TAGS.  Assumes that args_size has already been adjusted.
-  if (TaggedStackInterpreter) sll(locals_size, 1, locals_size);
   subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words
   // Use br/mov combination because it works on both V8 and V9 and is
   // faster.
@@ -319,7 +318,7 @@
   ldf(FloatRegisterImpl::D, r1, offset, d);
 #else
   ldf(FloatRegisterImpl::S, r1, offset, d);
-  ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize(), d->successor());
+  ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor());
 #endif
 }
 
@@ -330,10 +329,10 @@
 #ifdef _LP64
   stf(FloatRegisterImpl::D, d, r1, offset);
   // store something more useful here
-  debug_only(stx(G0, r1, offset+Interpreter::stackElementSize());)
+  debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
 #else
   stf(FloatRegisterImpl::S, d, r1, offset);
-  stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize());
+  stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize);
 #endif
 }
 
@@ -345,7 +344,7 @@
   ldx(r1, offset, rd);
 #else
   ld(r1, offset, rd);
-  ld(r1, offset + Interpreter::stackElementSize(), rd->successor());
+  ld(r1, offset + Interpreter::stackElementSize, rd->successor());
 #endif
 }
 
@@ -356,138 +355,62 @@
 #ifdef _LP64
   stx(l, r1, offset);
   // store something more useful here
-  debug_only(stx(G0, r1, offset+Interpreter::stackElementSize());)
+  debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
 #else
   st(l, r1, offset);
-  st(l->successor(), r1, offset + Interpreter::stackElementSize());
+  st(l->successor(), r1, offset + Interpreter::stackElementSize);
 #endif
 }
 
-#ifdef ASSERT
-void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t,
-                                                 Register r,
-                                                 Register scratch) {
-  if (TaggedStackInterpreter) {
-    Label ok, long_ok;
-    ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(0), r);
-    if (t == frame::TagCategory2) {
-      cmp(r, G0);
-      brx(Assembler::equal, false, Assembler::pt, long_ok);
-      delayed()->ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(1), r);
-      stop("stack long/double tag value bad");
-      bind(long_ok);
-      cmp(r, G0);
-    } else if (t == frame::TagValue) {
-      cmp(r, G0);
-    } else {
-      assert_different_registers(r, scratch);
-      mov(t, scratch);
-      cmp(r, scratch);
-    }
-    brx(Assembler::equal, false, Assembler::pt, ok);
-    delayed()->nop();
-    // Also compare if the stack value is zero, then the tag might
-    // not have been set coming from deopt.
-    ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r);
-    cmp(r, G0);
-    brx(Assembler::equal, false, Assembler::pt, ok);
-    delayed()->nop();
-    stop("Stack tag value is bad");
-    bind(ok);
-  }
-}
-#endif // ASSERT
-
 void InterpreterMacroAssembler::pop_i(Register r) {
   assert_not_delayed();
-  // Uses destination register r for scratch
-  debug_only(verify_stack_tag(frame::TagValue, r));
   ld(Lesp, Interpreter::expr_offset_in_bytes(0), r);
-  inc(Lesp, Interpreter::stackElementSize());
+  inc(Lesp, Interpreter::stackElementSize);
   debug_only(verify_esp(Lesp));
 }
 
 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) {
   assert_not_delayed();
-  // Uses destination register r for scratch
-  debug_only(verify_stack_tag(frame::TagReference, r, scratch));
   ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r);
-  inc(Lesp, Interpreter::stackElementSize());
+  inc(Lesp, Interpreter::stackElementSize);
   debug_only(verify_esp(Lesp));
 }
 
 void InterpreterMacroAssembler::pop_l(Register r) {
   assert_not_delayed();
-  // Uses destination register r for scratch
-  debug_only(verify_stack_tag(frame::TagCategory2, r));
   load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r);
-  inc(Lesp, 2*Interpreter::stackElementSize());
+  inc(Lesp, 2*Interpreter::stackElementSize);
   debug_only(verify_esp(Lesp));
 }
 
 
 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) {
   assert_not_delayed();
-  debug_only(verify_stack_tag(frame::TagValue, scratch));
   ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f);
-  inc(Lesp, Interpreter::stackElementSize());
+  inc(Lesp, Interpreter::stackElementSize);
   debug_only(verify_esp(Lesp));
 }
 
 
 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) {
   assert_not_delayed();
-  debug_only(verify_stack_tag(frame::TagCategory2, scratch));
   load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f);
-  inc(Lesp, 2*Interpreter::stackElementSize());
+  inc(Lesp, 2*Interpreter::stackElementSize);
   debug_only(verify_esp(Lesp));
 }
 
 
-// (Note use register first, then decrement so dec can be done during store stall)
-void InterpreterMacroAssembler::tag_stack(Register r) {
-  if (TaggedStackInterpreter) {
-    st_ptr(r, Lesp, Interpreter::tag_offset_in_bytes());
-  }
-}
-
-void InterpreterMacroAssembler::tag_stack(frame::Tag t, Register r) {
-  if (TaggedStackInterpreter) {
-    assert (frame::TagValue == 0, "TagValue must be zero");
-    if (t == frame::TagValue) {
-      st_ptr(G0, Lesp, Interpreter::tag_offset_in_bytes());
-    } else if (t == frame::TagCategory2) {
-      st_ptr(G0, Lesp, Interpreter::tag_offset_in_bytes());
-      // Tag next slot down too
-      st_ptr(G0, Lesp, -Interpreter::stackElementSize() + Interpreter::tag_offset_in_bytes());
-    } else {
-      assert_different_registers(r, O3);
-      mov(t, O3);
-      st_ptr(O3, Lesp, Interpreter::tag_offset_in_bytes());
-    }
-  }
-}
-
 void InterpreterMacroAssembler::push_i(Register r) {
   assert_not_delayed();
   debug_only(verify_esp(Lesp));
-  tag_stack(frame::TagValue, r);
-  st(  r,    Lesp, Interpreter::value_offset_in_bytes());
-  dec( Lesp, Interpreter::stackElementSize());
+  st(r, Lesp, 0);
+  dec(Lesp, Interpreter::stackElementSize);
 }
 
 void InterpreterMacroAssembler::push_ptr(Register r) {
   assert_not_delayed();
-  tag_stack(frame::TagReference, r);
-  st_ptr(  r,    Lesp, Interpreter::value_offset_in_bytes());
-  dec( Lesp, Interpreter::stackElementSize());
-}
-
-void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
-  assert_not_delayed();
-  tag_stack(tag);
-  st_ptr(r, Lesp, Interpreter::value_offset_in_bytes());
-  dec( Lesp, Interpreter::stackElementSize());
+  st_ptr(r, Lesp, 0);
+  dec(Lesp, Interpreter::stackElementSize);
 }
 
 // remember: our convention for longs in SPARC is:
@@ -497,33 +420,28 @@
 void InterpreterMacroAssembler::push_l(Register r) {
   assert_not_delayed();
   debug_only(verify_esp(Lesp));
-  tag_stack(frame::TagCategory2, r);
-  // Longs are in stored in memory-correct order, even if unaligned.
-  // and may be separated by stack tags.
-  int offset = -Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
+  // Longs are stored in memory-correct order, even if unaligned.
+  int offset = -Interpreter::stackElementSize;
   store_unaligned_long(r, Lesp, offset);
-  dec(Lesp, 2 * Interpreter::stackElementSize());
+  dec(Lesp, 2 * Interpreter::stackElementSize);
 }
 
 
 void InterpreterMacroAssembler::push_f(FloatRegister f) {
   assert_not_delayed();
   debug_only(verify_esp(Lesp));
-  tag_stack(frame::TagValue, Otos_i);
-  stf(FloatRegisterImpl::S, f, Lesp, Interpreter::value_offset_in_bytes());
-  dec(Lesp, Interpreter::stackElementSize());
+  stf(FloatRegisterImpl::S, f, Lesp, 0);
+  dec(Lesp, Interpreter::stackElementSize);
 }
 
 
 void InterpreterMacroAssembler::push_d(FloatRegister d)   {
   assert_not_delayed();
   debug_only(verify_esp(Lesp));
-  tag_stack(frame::TagCategory2, Otos_i);
-  // Longs are in stored in memory-correct order, even if unaligned.
-  // and may be separated by stack tags.
-  int offset = -Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
+  // Longs are stored in memory-correct order, even if unaligned.
+  int offset = -Interpreter::stackElementSize;
   store_unaligned_double(d, Lesp, offset);
-  dec(Lesp, 2 * Interpreter::stackElementSize());
+  dec(Lesp, 2 * Interpreter::stackElementSize);
 }
 
 
@@ -561,30 +479,18 @@
 }
 
 
-// Tagged stack helpers for swap and dup
-void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
-                                                 Register tag) {
+// Helpers for swap and dup
+void InterpreterMacroAssembler::load_ptr(int n, Register val) {
   ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val);
-  if (TaggedStackInterpreter) {
-    ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(n), tag);
-  }
 }
-void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
-                                                  Register tag) {
+void InterpreterMacroAssembler::store_ptr(int n, Register val) {
   st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n));
-  if (TaggedStackInterpreter) {
-    st_ptr(tag, Lesp, Interpreter::expr_tag_offset_in_bytes(n));
-  }
 }
 
 
 void InterpreterMacroAssembler::load_receiver(Register param_count,
                                               Register recv) {
-
-  sll(param_count, Interpreter::logStackElementSize(), param_count);
-  if (TaggedStackInterpreter) {
-    add(param_count, Interpreter::value_offset_in_bytes(), param_count);  // get obj address
-  }
+  sll(param_count, Interpreter::logStackElementSize, param_count);
   ld_ptr(Lesp, param_count, recv);                      // gets receiver Oop
 }
 
@@ -605,7 +511,6 @@
 
   // Compute max expression stack+register save area
   lduh(Lmethod, in_bytes(methodOopDesc::max_stack_offset()), Gframe_size);  // Load max stack.
-  if (TaggedStackInterpreter) sll ( Gframe_size, 1, Gframe_size);  // max_stack * 2 for TAGS
   add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size );
 
   //
@@ -814,22 +719,39 @@
 }
 
 
-void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset) {
+void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp,
+                                                       int bcp_offset, bool giant_index) {
+  assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
+  if (!giant_index) {
+    get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
+  } else {
+    assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
+    get_4_byte_integer_at_bcp(bcp_offset, cache, tmp);
+    assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
+    xor3(tmp, -1, tmp);  // convert to plain index
+  }
+}
+
+
+void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,
+                                                           int bcp_offset, bool giant_index) {
   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
   assert_different_registers(cache, tmp);
   assert_not_delayed();
-  get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
-              // convert from field index to ConstantPoolCacheEntry index
-              // and from word index to byte offset
+  get_cache_index_at_bcp(cache, tmp, bcp_offset, giant_index);
+  // convert from field index to ConstantPoolCacheEntry index and from
+  // word index to byte offset
   sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
   add(LcpoolCache, tmp, cache);
 }
 
 
-void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset) {
+void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
+                                                               int bcp_offset, bool giant_index) {
   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
   assert_different_registers(cache, tmp);
   assert_not_delayed();
+  assert(!giant_index,"NYI");
   get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
               // convert from field index to ConstantPoolCacheEntry index
               // and from word index to byte offset
@@ -1675,15 +1597,31 @@
 // Count a virtual call in the bytecodes.
 
 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
-                                                     Register scratch) {
+                                                     Register scratch,
+                                                     bool receiver_can_be_null) {
   if (ProfileInterpreter) {
     Label profile_continue;
 
     // If no method data exists, go to profile_continue.
     test_method_data_pointer(profile_continue);
 
+
+    Label skip_receiver_profile;
+    if (receiver_can_be_null) {
+      Label not_null;
+      tst(receiver);
+      brx(Assembler::notZero, false, Assembler::pt, not_null);
+      delayed()->nop();
+      // We are making a call.  Increment the count for null receiver.
+      increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
+      ba(false, skip_receiver_profile);
+      delayed()->nop();
+      bind(not_null);
+    }
+
     // Record the receiver type.
     record_klass_in_profile(receiver, scratch, true);
+    bind(skip_receiver_profile);
 
     // The method data pointer needs to be updated to reflect the new target.
     update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
@@ -1985,51 +1923,11 @@
 }
 
 // Locals
-#ifdef ASSERT
-void InterpreterMacroAssembler::verify_local_tag(frame::Tag t,
-                                                 Register base,
-                                                 Register scratch,
-                                                 int n) {
-  if (TaggedStackInterpreter) {
-    Label ok, long_ok;
-    // Use dst for scratch
-    assert_different_registers(base, scratch);
-    ld_ptr(base, Interpreter::local_tag_offset_in_bytes(n), scratch);
-    if (t == frame::TagCategory2) {
-      cmp(scratch, G0);
-      brx(Assembler::equal, false, Assembler::pt, long_ok);
-      delayed()->ld_ptr(base, Interpreter::local_tag_offset_in_bytes(n+1), scratch);
-      stop("local long/double tag value bad");
-      bind(long_ok);
-      // compare second half tag
-      cmp(scratch, G0);
-    } else if (t == frame::TagValue) {
-      cmp(scratch, G0);
-    } else {
-      assert_different_registers(O3, base, scratch);
-      mov(t, O3);
-      cmp(scratch, O3);
-    }
-    brx(Assembler::equal, false, Assembler::pt, ok);
-    delayed()->nop();
-    // Also compare if the local value is zero, then the tag might
-    // not have been set coming from deopt.
-    ld_ptr(base, Interpreter::local_offset_in_bytes(n), scratch);
-    cmp(scratch, G0);
-    brx(Assembler::equal, false, Assembler::pt, ok);
-    delayed()->nop();
-    stop("Local tag value is bad");
-    bind(ok);
-  }
-}
-#endif // ASSERT
-
 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  debug_only(verify_local_tag(frame::TagReference, index, dst));
-  ld_ptr(index, Interpreter::value_offset_in_bytes(), dst);
+  ld_ptr(index, 0, dst);
   // Note:  index must hold the effective address--the iinc template uses it
 }
 
@@ -2037,27 +1935,24 @@
 void InterpreterMacroAssembler::access_local_returnAddress(Register index,
                                                            Register dst ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  debug_only(verify_local_tag(frame::TagValue, index, dst));
-  ld_ptr(index, Interpreter::value_offset_in_bytes(), dst);
+  ld_ptr(index, 0, dst);
 }
 
 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  debug_only(verify_local_tag(frame::TagValue, index, dst));
-  ld(index, Interpreter::value_offset_in_bytes(), dst);
+  ld(index, 0, dst);
   // Note:  index must hold the effective address--the iinc template uses it
 }
 
 
 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  debug_only(verify_local_tag(frame::TagCategory2, index, dst));
   // First half stored at index n+1 (which grows down from Llocals[n])
   load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst);
 }
@@ -2065,18 +1960,16 @@
 
 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  debug_only(verify_local_tag(frame::TagValue, index, G1_scratch));
-  ldf(FloatRegisterImpl::S, index, Interpreter::value_offset_in_bytes(), dst);
+  ldf(FloatRegisterImpl::S, index, 0, dst);
 }
 
 
 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  debug_only(verify_local_tag(frame::TagCategory2, index, G1_scratch));
   load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst);
 }
 
@@ -2102,94 +1995,60 @@
 }
 #endif // ASSERT
 
-void InterpreterMacroAssembler::tag_local(frame::Tag t,
-                                          Register base,
-                                          Register src,
-                                          int n) {
-  if (TaggedStackInterpreter) {
-    // have to store zero because local slots can be reused (rats!)
-    if (t == frame::TagValue) {
-      st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n));
-    } else if (t == frame::TagCategory2) {
-      st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n));
-      st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n+1));
-    } else {
-      // assert that we don't stomp the value in 'src'
-      // O3 is arbitrary because it's not used.
-      assert_different_registers(src, base, O3);
-      mov( t, O3);
-      st_ptr(O3, base, Interpreter::local_tag_offset_in_bytes(n));
-    }
-  }
-}
-
 
 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  debug_only(check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch);)
-  tag_local(frame::TagValue, index, src);
-  st(src, index, Interpreter::value_offset_in_bytes());
+  debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);)
+  st(src, index, 0);
 }
 
-void InterpreterMacroAssembler::store_local_ptr( Register index, Register src,
-                                                 Register tag ) {
+void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  #ifdef ASSERT
-  check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch);
-  #endif
-  st_ptr(src, index, Interpreter::value_offset_in_bytes());
-  // Store tag register directly
-  if (TaggedStackInterpreter) {
-    st_ptr(tag, index, Interpreter::tag_offset_in_bytes());
-  }
+#ifdef ASSERT
+  check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
+#endif
+  st_ptr(src, index, 0);
 }
 
 
 
-void InterpreterMacroAssembler::store_local_ptr( int n, Register src,
-                                                 Register tag ) {
-  st_ptr(src,  Llocals, Interpreter::local_offset_in_bytes(n));
-  if (TaggedStackInterpreter) {
-    st_ptr(tag, Llocals, Interpreter::local_tag_offset_in_bytes(n));
-  }
+void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) {
+  st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n));
 }
 
 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  #ifdef ASSERT
+#ifdef ASSERT
   check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
-  #endif
-  tag_local(frame::TagCategory2, index, src);
+#endif
   store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1
 }
 
 
 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  #ifdef ASSERT
-  check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch);
-  #endif
-  tag_local(frame::TagValue, index, G1_scratch);
-  stf(FloatRegisterImpl::S, src, index, Interpreter::value_offset_in_bytes());
+#ifdef ASSERT
+  check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
+#endif
+  stf(FloatRegisterImpl::S, src, index, 0);
 }
 
 
 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  #ifdef ASSERT
+#ifdef ASSERT
   check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
-  #endif
-  tag_local(frame::TagCategory2, index, G1_scratch);
+#endif
   store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1));
 }
 
--- a/src/cpu/sparc/vm/interp_masm_sparc.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/sparc/vm/interp_masm_sparc.hpp	Thu May 13 13:05:47 2010 -0700
@@ -149,7 +149,6 @@
 
   void push_i(     Register r = Otos_i);
   void push_ptr(   Register r = Otos_i);
-  void push_ptr(   Register r, Register tag);
   void push_l(     Register r = Otos_l1);
   void push_f(FloatRegister f = Ftos_f);
   void push_d(FloatRegister f = Ftos_d1);
@@ -159,17 +158,9 @@
   void push(TosState state);           // transition state -> vtos
   void empty_expression_stack();       // resets both Lesp and SP
 
-  // Support for Tagged Stacks
-  void tag_stack(frame::Tag t, Register r);
-  void tag_stack(Register tag);
-  void tag_local(frame::Tag t, Register src, Register base, int n = 0);
-
 #ifdef ASSERT
   void verify_sp(Register Rsp, Register Rtemp);
   void verify_esp(Register Resp);      // verify that Lesp points to a word in the temp stack
-
-  void verify_stack_tag(frame::Tag t, Register r, Register scratch = G0);
-  void verify_local_tag(frame::Tag t, Register base, Register scr, int n = 0);
 #endif // ASSERT
 
  public:
@@ -191,8 +182,9 @@
                                   Register   Rdst,
                                   setCCOrNot should_set_CC = dont_set_CC );
 
-  void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset);
-  void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset);
+  void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
+  void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
+  void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
 
 
   // common code
@@ -241,17 +233,17 @@
   void check_for_regarea_stomp( Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1);
 #endif // ASSERT
   void store_local_int( Register index, Register src );
-  void store_local_ptr( Register index, Register src, Register tag = Otos_l2 );
-  void store_local_ptr( int n, Register src, Register tag = Otos_l2 );
+  void store_local_ptr( Register index, Register src );
+  void store_local_ptr( int n, Register src );
   void store_local_long( Register index, Register src );
   void store_local_float( Register index, FloatRegister src );
   void store_local_double( Register index, FloatRegister src );
 
-  // Tagged stack helpers for swap and dup
-  void load_ptr_and_tag(int n, Register val, Register tag);
-  void store_ptr_and_tag(int n, Register val, Register tag);
+  // Helpers for swap and dup
+  void load_ptr(int n, Register val);
+  void store_ptr(int n, Register val);
 
-  // Tagged stack helper for getting receiver in register.
+  // Helper for getting receiver in register.
   void load_receiver(Register param_count, Register recv);
 
   static int top_most_monitor_byte_offset(); // offset in bytes to top of monitor block
@@ -304,7 +296,7 @@
   void profile_not_taken_branch(Register scratch);
   void profile_call(Register scratch);
   void profile_final_call(Register scratch);
-  void profile_virtual_call(Register receiver, Register scratch);
+  void profile_virtual_call(Register receiver, Register scratch, bool receiver_can_be_null = false);
   void profile_ret(TosState state, Register return_bci, Register scratch);
   void profile_null_seen(Register scratch);
   void profile_typecheck(Register klass, Register scratch);
--- a/src/cpu/sparc/vm/interpreterRT_sparc.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/sparc/vm/interpreterRT_sparc.cpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,19 +43,6 @@
   Argument  jni_arg(jni_offset(), false);
   Register  Rtmp = O0;
 
-#ifdef ASSERT
-  if (TaggedStackInterpreter) {
-    // check at least one tag is okay
-    Label ok;
-    __ ld_ptr(Llocals, Interpreter::local_tag_offset_in_bytes(offset() + 1), Rtmp);
-    __ cmp(Rtmp, G0);
-    __ brx(Assembler::equal, false, Assembler::pt, ok);
-    __ delayed()->nop();
-    __ stop("Native object has bad tag value");
-    __ bind(ok);
-  }
-#endif // ASSERT
-
 #ifdef _LP64
   __ ldx(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
   __ store_long_argument(Rtmp, jni_arg);
@@ -107,18 +94,6 @@
 
   Address     h_arg = Address(Llocals, Interpreter::local_offset_in_bytes(offset()));
   __ ld_ptr(h_arg, Rtmp1);
-#ifdef ASSERT
-  if (TaggedStackInterpreter) {
-    // check we have the obj and not the tag
-    Label ok;
-    __ mov(frame::TagReference, Rtmp3);
-    __ cmp(Rtmp1, Rtmp3);
-    __ brx(Assembler::notEqual, true, Assembler::pt, ok);
-    __ delayed()->nop();
-    __ stop("Native object passed tag by mistake");
-    __ bind(ok);
-  }
-#endif // ASSERT
   if (!do_NULL_check) {
     __ add(h_arg.base(), h_arg.disp(), Rtmp2);
   } else {
@@ -168,17 +143,9 @@
     long_sig   = 3
   };
 
-#ifdef ASSERT
-  void verify_tag(frame::Tag t) {
-    assert(!TaggedStackInterpreter ||
-           *(intptr_t*)(_from+Interpreter::local_tag_offset_in_bytes(0)) == t, "wrong tag");
-  }
-#endif // ASSERT
-
   virtual void pass_int() {
     *_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
     add_signature( non_float );
   }
 
@@ -186,31 +153,27 @@
     // pass address of from
     intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
     *_to++ = (*from_addr == 0) ? NULL : (intptr_t) from_addr;
-    debug_only(verify_tag(frame::TagReference));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
     add_signature( non_float );
    }
 
 #ifdef _LP64
   virtual void pass_float()  {
     *_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
     add_signature( float_sig );
    }
 
   virtual void pass_double() {
     *_to++ = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= 2*Interpreter::stackElementSize();
+    _from -= 2*Interpreter::stackElementSize;
    add_signature( double_sig );
    }
 
   virtual void pass_long() {
     _to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
-    debug_only(verify_tag(frame::TagValue));
     _to += 1;
-    _from -= 2*Interpreter::stackElementSize();
+    _from -= 2*Interpreter::stackElementSize;
     add_signature( long_sig );
   }
 #else
@@ -218,9 +181,8 @@
   virtual void pass_long() {
     _to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
     _to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
     _to += 2;
-    _from -= 2*Interpreter::stackElementSize();
+    _from -= 2*Interpreter::stackElementSize;
     add_signature( non_float );
   }
 #endif // _LP64
--- a/src/cpu/sparc/vm/interpreter_sparc.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/sparc/vm/interpreter_sparc.cpp	Thu May 13 13:05:47 2010 -0700
@@ -235,19 +235,17 @@
 }
 
 
-
 // Method handle invoker
 // Dispatch a method of the form java.dyn.MethodHandles::invoke(...)
 address InterpreterGenerator::generate_method_handle_entry(void) {
   if (!EnableMethodHandles) {
     return generate_abstract_entry();
   }
-  return generate_abstract_entry(); //6815692//
+
+  return MethodHandles::generate_method_handle_interpreter_entry(_masm);
 }
 
 
-
-
 //----------------------------------------------------------------------------------------------------
 // Entry points & stack frame layout
 //
--- a/src/cpu/sparc/vm/interpreter_sparc.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/sparc/vm/interpreter_sparc.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,33 +24,13 @@
 
  public:
 
-  // Support for Tagged Stacks
+  static int expr_offset_in_bytes(int i) { return stackElementSize * i + wordSize; }
 
   // Stack index relative to tos (which points at value)
-  static int expr_index_at(int i)     {
-    return stackElementWords() * i;
-  }
-
-  static int expr_tag_index_at(int i) {
-    assert(TaggedStackInterpreter, "should not call this");
-    // tag is one word above java stack element
-    return stackElementWords() * i + 1;
-  }
-
-  static int expr_offset_in_bytes(int i) { return stackElementSize()*i + wordSize; }
-  static int expr_tag_offset_in_bytes (int i) {
-    assert(TaggedStackInterpreter, "should not call this");
-    return expr_offset_in_bytes(i) + wordSize;
-  }
+  static int expr_index_at(int i)        { return stackElementWords * i; }
 
   // Already negated by c++ interpreter
-  static int local_index_at(int i)     {
-    assert(i<=0, "local direction already negated");
-    return stackElementWords() * i + (value_offset_in_bytes()/wordSize);
+  static int local_index_at(int i) {
+    assert(i <= 0, "local direction already negated");
+    return stackElementWords * i;
   }
-
-  static int local_tag_index_at(int i) {
-    assert(i<=0, "local direction already negated");
-    assert(TaggedStackInterpreter, "should not call this");
-    return stackElementWords() * i + (tag_offset_in_bytes()/wordSize);
-  }
--- a/src/cpu/sparc/vm/methodHandles_sparc.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2008-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,9 @@
 
 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
                                                 address interpreted_entry) {
+  // Just before the actual machine code entry point, allocate space
+  // for a MethodHandleEntry::Data record, so that we can manage everything
+  // from one base pointer.
   __ align(wordSize);
   address target = __ pc() + sizeof(Data);
   while (__ pc() < target) {
@@ -59,12 +62,891 @@
 
 // Code generation
 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
-  ShouldNotReachHere(); //NYI, 6815692
-  return NULL;
+  // I5_savedSP: sender SP (must preserve)
+  // G4 (Gargs): incoming argument list (must preserve)
+  // G5_method:  invoke methodOop; becomes method type.
+  // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots])
+  // O0, O1: garbage temps, blown away
+  Register O0_argslot = O0;
+  Register O1_scratch = O1;
+
+  // emit WrongMethodType path first, to enable back-branch from main path
+  Label wrong_method_type;
+  __ bind(wrong_method_type);
+  __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
+  __ delayed()->nop();
+
+  // here's where control starts out:
+  __ align(CodeEntryAlignment);
+  address entry_point = __ pc();
+
+  // fetch the MethodType from the method handle into G5_method_type
+  {
+    Register tem = G5_method;
+    assert(tem == G5_method_type, "yes, it's the same register");
+    for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
+      __ ld_ptr(Address(tem, *pchase), G5_method_type);
+    }
+  }
+
+  // given the MethodType, find out where the MH argument is buried
+  __ ld_ptr(Address(G5_method_type, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)),        O0_argslot);
+  __ ldsw(  Address(O0_argslot,     __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O0_argslot);
+  __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
+
+  __ check_method_handle_type(G5_method_type, G3_method_handle, O1_scratch, wrong_method_type);
+  __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+
+  return entry_point;
 }
 
+
+#ifdef ASSERT
+static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
+  // Verify that argslot lies within (Gargs, FP].
+  Label L_ok, L_bad;
+#ifdef _LP64
+  __ add(FP, STACK_BIAS, temp_reg);
+  __ cmp(argslot_reg, temp_reg);
+#else
+  __ cmp(argslot_reg, FP);
+#endif
+  __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
+  __ delayed()->nop();
+  __ cmp(Gargs, argslot_reg);
+  __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
+  __ delayed()->nop();
+  __ bind(L_bad);
+  __ stop(error_message);
+  __ bind(L_ok);
+}
+#endif
+
+
+// Helper to insert argument slots into the stack.
+// arg_slots must be a multiple of stack_move_unit() and <= 0
+void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
+                                     RegisterOrConstant arg_slots,
+                                     int arg_mask,
+                                     Register argslot_reg,
+                                     Register temp_reg, Register temp2_reg, Register temp3_reg) {
+  assert(temp3_reg != noreg, "temp3 required");
+  assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
+                             (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
+
+#ifdef ASSERT
+  verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
+  if (arg_slots.is_register()) {
+    Label L_ok, L_bad;
+    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
+    __ br(Assembler::greater, false, Assembler::pn, L_bad);
+    __ delayed()->nop();
+    __ btst(-stack_move_unit() - 1, arg_slots.as_register());
+    __ br(Assembler::zero, false, Assembler::pt, L_ok);
+    __ delayed()->nop();
+    __ bind(L_bad);
+    __ stop("assert arg_slots <= 0 and clear low bits");
+    __ bind(L_ok);
+  } else {
+    assert(arg_slots.as_constant() <= 0, "");
+    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
+  }
+#endif // ASSERT
+
+#ifdef _LP64
+  if (arg_slots.is_register()) {
+    // Was arg_slots register loaded as signed int?
+    Label L_ok;
+    __ sll(arg_slots.as_register(), BitsPerInt, temp_reg);
+    __ sra(temp_reg, BitsPerInt, temp_reg);
+    __ cmp(arg_slots.as_register(), temp_reg);
+    __ br(Assembler::equal, false, Assembler::pt, L_ok);
+    __ delayed()->nop();
+    __ stop("arg_slots register not loaded as signed int");
+    __ bind(L_ok);
+  }
+#endif
+
+  // Make space on the stack for the inserted argument(s).
+  // Then pull down everything shallower than argslot_reg.
+  // The stacked return address gets pulled down with everything else.
+  // That is, copy [sp, argslot) downward by -size words.  In pseudo-code:
+  //   sp -= size;
+  //   for (temp = sp + size; temp < argslot; temp++)
+  //     temp[-size] = temp[0]
+  //   argslot -= size;
+  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
+
+  // Keep the stack pointer 2*wordSize aligned.
+  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
+  RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
+  __ add(SP, masked_offset, SP);
+
+  __ mov(Gargs, temp_reg);  // source pointer for copy
+  __ add(Gargs, offset, Gargs);
+
+  {
+    Label loop;
+    __ bind(loop);
+    // pull one word down each time through the loop
+    __ ld_ptr(Address(temp_reg, 0), temp2_reg);
+    __ st_ptr(temp2_reg, Address(temp_reg, offset));
+    __ add(temp_reg, wordSize, temp_reg);
+    __ cmp(temp_reg, argslot_reg);
+    __ brx(Assembler::less, false, Assembler::pt, loop);
+    __ delayed()->nop();  // FILLME
+  }
+
+  // Now move the argslot down, to point to the opened-up space.
+  __ add(argslot_reg, offset, argslot_reg);
+}
+
+
+// Helper to remove argument slots from the stack.
+// arg_slots must be a multiple of stack_move_unit() and >= 0
+void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
+                                     RegisterOrConstant arg_slots,
+                                     Register argslot_reg,
+                                     Register temp_reg, Register temp2_reg, Register temp3_reg) {
+  assert(temp3_reg != noreg, "temp3 required");
+  assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
+                             (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
+
+  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
+
+#ifdef ASSERT
+  // Verify that [argslot..argslot+size) lies within (Gargs, FP).
+  __ add(argslot_reg, offset, temp2_reg);
+  verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
+  if (arg_slots.is_register()) {
+    Label L_ok, L_bad;
+    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
+    __ br(Assembler::less, false, Assembler::pn, L_bad);
+    __ delayed()->nop();
+    __ btst(-stack_move_unit() - 1, arg_slots.as_register());
+    __ br(Assembler::zero, false, Assembler::pt, L_ok);
+    __ delayed()->nop();
+    __ bind(L_bad);
+    __ stop("assert arg_slots >= 0 and clear low bits");
+    __ bind(L_ok);
+  } else {
+    assert(arg_slots.as_constant() >= 0, "");
+    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
+  }
+#endif // ASSERT
+
+  // Pull up everything shallower than argslot.
+  // Then remove the excess space on the stack.
+  // The stacked return address gets pulled up with everything else.
+  // That is, copy [sp, argslot) upward by size words.  In pseudo-code:
+  //   for (temp = argslot-1; temp >= sp; --temp)
+  //     temp[size] = temp[0]
+  //   argslot += size;
+  //   sp += size;
+  __ sub(argslot_reg, wordSize, temp_reg);  // source pointer for copy
+  {
+    Label loop;
+    __ bind(loop);
+    // pull one word up each time through the loop
+    __ ld_ptr(Address(temp_reg, 0), temp2_reg);
+    __ st_ptr(temp2_reg, Address(temp_reg, offset));
+    __ sub(temp_reg, wordSize, temp_reg);
+    __ cmp(temp_reg, Gargs);
+    __ brx(Assembler::greaterEqual, false, Assembler::pt, loop);
+    __ delayed()->nop();  // FILLME
+  }
+
+  // Now move the argslot up, to point to the just-copied block.
+  __ add(Gargs, offset, Gargs);
+  // And adjust the argslot address to point at the deletion point.
+  __ add(argslot_reg, offset, argslot_reg);
+
+  // Keep the stack pointer 2*wordSize aligned.
+  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
+  RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
+  __ add(SP, masked_offset, SP);
+}
+
+
+#ifndef PRODUCT
+extern "C" void print_method_handle(oop mh);
+void trace_method_handle_stub(const char* adaptername,
+                              oop mh) {
+#if 0
+                              intptr_t* entry_sp,
+                              intptr_t* saved_sp,
+                              intptr_t* saved_bp) {
+  // called as a leaf from native code: do not block the JVM!
+  intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
+  intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
+  printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
+         adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
+  if (last_sp != saved_sp)
+    printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
+#endif
+
+  printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh);
+  print_method_handle(mh);
+}
+#endif // PRODUCT
+
+// which conversion op types are implemented here?
+int MethodHandles::adapter_conversion_ops_supported_mask() {
+  return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS)
+         //|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
+         );
+  // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
+}
+
+//------------------------------------------------------------------------------
+// MethodHandles::generate_method_handle_stub
+//
 // Generate an "entry" field for a method handle.
 // This determines how the method handle will respond to calls.
 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
-  ShouldNotReachHere(); //NYI, 6815692
+  // Here is the register state during an interpreted call,
+  // as set up by generate_method_handle_interpreter_entry():
+  // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
+  // - G3: receiver method handle
+  // - O5_savedSP: sender SP (must preserve)
+
+  Register O0_argslot = O0;
+  Register O1_scratch = O1;
+  Register O2_scratch = O2;
+  Register O3_scratch = O3;
+  Register G5_index   = G5;
+
+  guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
+
+  // Some handy addresses:
+  Address G5_method_fie(    G5_method,        in_bytes(methodOopDesc::from_interpreted_offset()));
+
+  Address G3_mh_vmtarget(   G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes());
+
+  Address G3_dmh_vmindex(   G3_method_handle, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes());
+
+  Address G3_bmh_vmargslot( G3_method_handle, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes());
+  Address G3_bmh_argument(  G3_method_handle, sun_dyn_BoundMethodHandle::argument_offset_in_bytes());
+
+  Address G3_amh_vmargslot( G3_method_handle, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes());
+  Address G3_amh_argument ( G3_method_handle, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes());
+  Address G3_amh_conversion(G3_method_handle, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes());
+
+  const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
+
+  if (have_entry(ek)) {
+    __ nop();  // empty stubs make SG sick
+    return;
+  }
+
+  address interp_entry = __ pc();
+  if (UseCompressedOops)  __ unimplemented("UseCompressedOops");
+
+#ifndef PRODUCT
+  if (TraceMethodHandles) {
+    // save: Gargs, O5_savedSP
+    __ save(SP, -16*wordSize, SP);
+    __ set((intptr_t) entry_name(ek), O0);
+    __ mov(G3_method_handle, O1);
+    __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
+    __ restore(SP, 16*wordSize, SP);
+  }
+#endif // PRODUCT
+
+  switch ((int) ek) {
+  case _raise_exception:
+    {
+      // Not a real MH entry, but rather shared code for raising an
+      // exception.  Extra local arguments are passed in scratch
+      // registers, as required type in O3, failing object (or NULL)
+      // in O2, failing bytecode type in O1.
+
+      __ mov(O5_savedSP, SP);  // Cut the stack back to where the caller started.
+
+      // Push arguments as if coming from the interpreter.
+      Register O0_scratch = O0_argslot;
+      int stackElementSize = Interpreter::stackElementSize;
+
+      // Make space on the stack for the arguments.
+      __ sub(SP,    4*stackElementSize, SP);
+      __ sub(Gargs, 3*stackElementSize, Gargs);
+      //__ sub(Lesp,  3*stackElementSize, Lesp);
+
+      // void raiseException(int code, Object actual, Object required)
+      __ st(    O1_scratch, Address(Gargs, 2*stackElementSize));  // code
+      __ st_ptr(O2_scratch, Address(Gargs, 1*stackElementSize));  // actual
+      __ st_ptr(O3_scratch, Address(Gargs, 0*stackElementSize));  // required
+
+      Label no_method;
+      // FIXME: fill in _raise_exception_method with a suitable sun.dyn method
+      __ set(AddressLiteral((address) &_raise_exception_method), G5_method);
+      __ ld_ptr(Address(G5_method, 0), G5_method);
+      __ tst(G5_method);
+      __ brx(Assembler::zero, false, Assembler::pn, no_method);
+      __ delayed()->nop();
+
+      int jobject_oop_offset = 0;
+      __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
+      __ tst(G5_method);
+      __ brx(Assembler::zero, false, Assembler::pn, no_method);
+      __ delayed()->nop();
+
+      __ verify_oop(G5_method);
+      __ jump_indirect_to(G5_method_fie, O1_scratch);
+      __ delayed()->nop();
+
+      // If we get here, the Java runtime did not do its job of creating the exception.
+      // Do something that is at least causes a valid throw from the interpreter.
+      __ bind(no_method);
+      __ unimplemented("_raise_exception no method");
+    }
+    break;
+
+  case _invokestatic_mh:
+  case _invokespecial_mh:
+    {
+      __ ld_ptr(G3_mh_vmtarget, G5_method);  // target is a methodOop
+      __ verify_oop(G5_method);
+      // Same as TemplateTable::invokestatic or invokespecial,
+      // minus the CP setup and profiling:
+      if (ek == _invokespecial_mh) {
+        // Must load & check the first argument before entering the target method.
+        __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
+        __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
+        __ null_check(G3_method_handle);
+        __ verify_oop(G3_method_handle);
+      }
+      __ jump_indirect_to(G5_method_fie, O1_scratch);
+      __ delayed()->nop();
+    }
+    break;
+
+  case _invokevirtual_mh:
+    {
+      // Same as TemplateTable::invokevirtual,
+      // minus the CP setup and profiling:
+
+      // Pick out the vtable index and receiver offset from the MH,
+      // and then we can discard it:
+      __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
+      __ ldsw(G3_dmh_vmindex, G5_index);
+      // Note:  The verifier allows us to ignore G3_mh_vmtarget.
+      __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
+      __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
+
+      // Get receiver klass:
+      Register O0_klass = O0_argslot;
+      __ load_klass(G3_method_handle, O0_klass);
+      __ verify_oop(O0_klass);
+
+      // Get target methodOop & entry point:
+      const int base = instanceKlass::vtable_start_offset() * wordSize;
+      assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
+
+      __ sll_ptr(G5_index, LogBytesPerWord, G5_index);
+      __ add(O0_klass, G5_index, O0_klass);
+      Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
+      __ ld_ptr(vtable_entry_addr, G5_method);
+
+      __ verify_oop(G5_method);
+      __ jump_indirect_to(G5_method_fie, O1_scratch);
+      __ delayed()->nop();
+    }
+    break;
+
+  case _invokeinterface_mh:
+    {
+      // Same as TemplateTable::invokeinterface,
+      // minus the CP setup and profiling:
+      __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
+      Register O1_intf  = O1_scratch;
+      __ ld_ptr(G3_mh_vmtarget, O1_intf);
+      __ ldsw(G3_dmh_vmindex, G5_index);
+      __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
+      __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
+
+      // Get receiver klass:
+      Register O0_klass = O0_argslot;
+      __ load_klass(G3_method_handle, O0_klass);
+      __ verify_oop(O0_klass);
+
+      // Get interface:
+      Label no_such_interface;
+      __ verify_oop(O1_intf);
+      __ lookup_interface_method(O0_klass, O1_intf,
+                                 // Note: next two args must be the same:
+                                 G5_index, G5_method,
+                                 O2_scratch,
+                                 O3_scratch,
+                                 no_such_interface);
+
+      __ verify_oop(G5_method);
+      __ jump_indirect_to(G5_method_fie, O1_scratch);
+      __ delayed()->nop();
+
+      __ bind(no_such_interface);
+      // Throw an exception.
+      // For historical reasons, it will be IncompatibleClassChangeError.
+      __ unimplemented("not tested yet");
+      __ ld_ptr(Address(O1_intf, java_mirror_offset), O3_scratch);  // required interface
+      __ mov(O0_klass, O2_scratch);  // bad receiver
+      __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
+      __ delayed()->mov(Bytecodes::_invokeinterface, O1_scratch);  // who is complaining?
+    }
+    break;
+
+  case _bound_ref_mh:
+  case _bound_int_mh:
+  case _bound_long_mh:
+  case _bound_ref_direct_mh:
+  case _bound_int_direct_mh:
+  case _bound_long_direct_mh:
+    {
+      const bool direct_to_method = (ek >= _bound_ref_direct_mh);
+      BasicType arg_type  = T_ILLEGAL;
+      int       arg_mask  = _INSERT_NO_MASK;
+      int       arg_slots = -1;
+      get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
+
+      // Make room for the new argument:
+      __ ldsw(G3_bmh_vmargslot, O0_argslot);
+      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+
+      insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index);
+
+      // Store bound argument into the new stack slot:
+      __ ld_ptr(G3_bmh_argument, O1_scratch);
+      if (arg_type == T_OBJECT) {
+        __ st_ptr(O1_scratch, Address(O0_argslot, 0));
+      } else {
+        Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
+        __ load_sized_value(prim_value_addr, O2_scratch, type2aelembytes(arg_type), is_signed_subword_type(arg_type));
+        if (arg_slots == 2) {
+          __ unimplemented("not yet tested");
+#ifndef _LP64
+          __ signx(O2_scratch, O3_scratch);  // Sign extend
+#endif
+          __ st_long(O2_scratch, Address(O0_argslot, 0));  // Uses O2/O3 on !_LP64
+        } else {
+          __ st_ptr( O2_scratch, Address(O0_argslot, 0));
+        }
+      }
+
+      if (direct_to_method) {
+        __ ld_ptr(G3_mh_vmtarget, G5_method);  // target is a methodOop
+        __ verify_oop(G5_method);
+        __ jump_indirect_to(G5_method_fie, O1_scratch);
+        __ delayed()->nop();
+      } else {
+        __ ld_ptr(G3_mh_vmtarget, G3_method_handle);  // target is a methodOop
+        __ verify_oop(G3_method_handle);
+        __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+      }
+    }
+    break;
+
+  case _adapter_retype_only:
+  case _adapter_retype_raw:
+    // Immediately jump to the next MH layer:
+    __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+    __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+    // This is OK when all parameter types widen.
+    // It is also OK when a return type narrows.
+    break;
+
+  case _adapter_check_cast:
+    {
+      // Temps:
+      Register G5_klass = G5_index;  // Interesting AMH data.
+
+      // Check a reference argument before jumping to the next layer of MH:
+      __ ldsw(G3_amh_vmargslot, O0_argslot);
+      Address vmarg = __ argument_address(O0_argslot);
+
+      // What class are we casting to?
+      __ ld_ptr(G3_amh_argument, G5_klass);  // This is a Class object!
+      __ ld_ptr(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass);
+
+      Label done;
+      __ ld_ptr(vmarg, O1_scratch);
+      __ tst(O1_scratch);
+      __ brx(Assembler::zero, false, Assembler::pn, done);  // No cast if null.
+      __ delayed()->nop();
+      __ load_klass(O1_scratch, O1_scratch);
+
+      // Live at this point:
+      // - G5_klass        :  klass required by the target method
+      // - O1_scratch      :  argument klass to test
+      // - G3_method_handle:  adapter method handle
+      __ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done);
+
+      // If we get here, the type check failed!
+      __ ldsw(G3_amh_vmargslot, O0_argslot);  // reload argslot field
+      __ ld_ptr(G3_amh_argument, O3_scratch);  // required class
+      __ ld_ptr(vmarg, O2_scratch);  // bad object
+      __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
+      __ delayed()->mov(Bytecodes::_checkcast, O1_scratch);  // who is complaining?
+
+      __ bind(done);
+      // Get the new MH:
+      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+    }
+    break;
+
+  case _adapter_prim_to_prim:
+  case _adapter_ref_to_prim:
+    // Handled completely by optimized cases.
+    __ stop("init_AdapterMethodHandle should not issue this");
+    break;
+
+  case _adapter_opt_i2i:        // optimized subcase of adapt_prim_to_prim
+//case _adapter_opt_f2i:        // optimized subcase of adapt_prim_to_prim
+  case _adapter_opt_l2i:        // optimized subcase of adapt_prim_to_prim
+  case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
+    {
+      // Perform an in-place conversion to int or an int subword.
+      __ ldsw(G3_amh_vmargslot, O0_argslot);
+      Address vmarg = __ argument_address(O0_argslot);
+      Address value;
+      bool value_left_justified = false;
+
+      switch (ek) {
+      case _adapter_opt_i2i:
+      case _adapter_opt_l2i:
+        __ unimplemented(entry_name(ek));
+        value = vmarg;
+        break;
+      case _adapter_opt_unboxi:
+        {
+          // Load the value up from the heap.
+          __ ld_ptr(vmarg, O1_scratch);
+          int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
+#ifdef ASSERT
+          for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
+            if (is_subword_type(BasicType(bt)))
+              assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
+          }
+#endif
+          __ null_check(O1_scratch, value_offset);
+          value = Address(O1_scratch, value_offset);
+#ifdef _BIG_ENDIAN
+          // Values stored in objects are packed.
+          value_left_justified = true;
+#endif
+        }
+        break;
+      default:
+        ShouldNotReachHere();
+      }
+
+      // This check is required on _BIG_ENDIAN
+      Register G5_vminfo = G5_index;
+      __ ldsw(G3_amh_conversion, G5_vminfo);
+      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
+
+      // Original 32-bit vmdata word must be of this form:
+      // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
+      __ lduw(value, O1_scratch);
+      if (!value_left_justified)
+        __ sll(O1_scratch, G5_vminfo, O1_scratch);
+      Label zero_extend, done;
+      __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo);
+      __ br(Assembler::zero, false, Assembler::pn, zero_extend);
+      __ delayed()->nop();
+
+      // this path is taken for int->byte, int->short
+      __ sra(O1_scratch, G5_vminfo, O1_scratch);
+      __ ba(false, done);
+      __ delayed()->nop();
+
+      __ bind(zero_extend);
+      // this is taken for int->char
+      __ srl(O1_scratch, G5_vminfo, O1_scratch);
+
+      __ bind(done);
+      __ st(O1_scratch, vmarg);
+
+      // Get the new MH:
+      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+    }
+    break;
+
+  case _adapter_opt_i2l:        // optimized subcase of adapt_prim_to_prim
+  case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
+    {
+      // Perform an in-place int-to-long or ref-to-long conversion.
+      __ ldsw(G3_amh_vmargslot, O0_argslot);
+
+      // On big-endian machine we duplicate the slot and store the MSW
+      // in the first slot.
+      __ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot);
+
+      insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index);
+
+      Address arg_lsw(O0_argslot, 0);
+      Address arg_msw(O0_argslot, -Interpreter::stackElementSize);
+
+      switch (ek) {
+      case _adapter_opt_i2l:
+        {
+          __ ldsw(arg_lsw, O2_scratch);      // Load LSW
+#ifndef _LP64
+          __ signx(O2_scratch, O3_scratch);  // Sign extend
+#endif
+          __ st_long(O2_scratch, arg_msw);   // Uses O2/O3 on !_LP64
+        }
+        break;
+      case _adapter_opt_unboxl:
+        {
+          // Load the value up from the heap.
+          __ ld_ptr(arg_lsw, O1_scratch);
+          int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
+          assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
+          __ null_check(O1_scratch, value_offset);
+          __ ld_long(Address(O1_scratch, value_offset), O2_scratch);  // Uses O2/O3 on !_LP64
+          __ st_long(O2_scratch, arg_msw);
+        }
+        break;
+      default:
+        ShouldNotReachHere();
+      }
+
+      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+    }
+    break;
+
+  case _adapter_opt_f2d:        // optimized subcase of adapt_prim_to_prim
+  case _adapter_opt_d2f:        // optimized subcase of adapt_prim_to_prim
+    {
+      // perform an in-place floating primitive conversion
+      __ unimplemented(entry_name(ek));
+    }
+    break;
+
+  case _adapter_prim_to_ref:
+    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
+    break;
+
+  case _adapter_swap_args:
+  case _adapter_rot_args:
+    // handled completely by optimized cases
+    __ stop("init_AdapterMethodHandle should not issue this");
+    break;
+
+  case _adapter_opt_swap_1:
+  case _adapter_opt_swap_2:
+  case _adapter_opt_rot_1_up:
+  case _adapter_opt_rot_1_down:
+  case _adapter_opt_rot_2_up:
+  case _adapter_opt_rot_2_down:
+    {
+      int swap_bytes = 0, rotate = 0;
+      get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
+
+      // 'argslot' is the position of the first argument to swap.
+      __ ldsw(G3_amh_vmargslot, O0_argslot);
+      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+
+      // 'vminfo' is the second.
+      Register O1_destslot = O1_scratch;
+      __ ldsw(G3_amh_conversion, O1_destslot);
+      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
+      __ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot);
+      __ add(Gargs, __ argument_offset(O1_destslot), O1_destslot);
+
+      if (!rotate) {
+        for (int i = 0; i < swap_bytes; i += wordSize) {
+          __ ld_ptr(Address(O0_argslot,  i), O2_scratch);
+          __ ld_ptr(Address(O1_destslot, i), O3_scratch);
+          __ st_ptr(O3_scratch, Address(O0_argslot,  i));
+          __ st_ptr(O2_scratch, Address(O1_destslot, i));
+        }
+      } else {
+        // Save the first chunk, which is going to get overwritten.
+        switch (swap_bytes) {
+        case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break;
+        case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru
+        case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break;
+        default: ShouldNotReachHere();
+        }
+
+        if (rotate > 0) {
+          // Rorate upward.
+          __ sub(O0_argslot, swap_bytes, O0_argslot);
+#if ASSERT
+          {
+            // Verify that argslot > destslot, by at least swap_bytes.
+            Label L_ok;
+            __ cmp(O0_argslot, O1_destslot);
+            __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok);
+            __ delayed()->nop();
+            __ stop("source must be above destination (upward rotation)");
+            __ bind(L_ok);
+          }
+#endif
+          // Work argslot down to destslot, copying contiguous data upwards.
+          // Pseudo-code:
+          //   argslot  = src_addr - swap_bytes
+          //   destslot = dest_addr
+          //   while (argslot >= destslot) {
+          //     *(argslot + swap_bytes) = *(argslot + 0);
+          //     argslot--;
+          //   }
+          Label loop;
+          __ bind(loop);
+          __ ld_ptr(Address(O0_argslot, 0), G5_index);
+          __ st_ptr(G5_index, Address(O0_argslot, swap_bytes));
+          __ sub(O0_argslot, wordSize, O0_argslot);
+          __ cmp(O0_argslot, O1_destslot);
+          __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop);
+          __ delayed()->nop();  // FILLME
+        } else {
+          __ add(O0_argslot, swap_bytes, O0_argslot);
+#if ASSERT
+          {
+            // Verify that argslot < destslot, by at least swap_bytes.
+            Label L_ok;
+            __ cmp(O0_argslot, O1_destslot);
+            __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
+            __ delayed()->nop();
+            __ stop("source must be above destination (upward rotation)");
+            __ bind(L_ok);
+          }
+#endif
+          // Work argslot up to destslot, copying contiguous data downwards.
+          // Pseudo-code:
+          //   argslot  = src_addr + swap_bytes
+          //   destslot = dest_addr
+          //   while (argslot >= destslot) {
+          //     *(argslot - swap_bytes) = *(argslot + 0);
+          //     argslot++;
+          //   }
+          Label loop;
+          __ bind(loop);
+          __ ld_ptr(Address(O0_argslot, 0), G5_index);
+          __ st_ptr(G5_index, Address(O0_argslot, -swap_bytes));
+          __ add(O0_argslot, wordSize, O0_argslot);
+          __ cmp(O0_argslot, O1_destslot);
+          __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop);
+          __ delayed()->nop();  // FILLME
+        }
+
+        // Store the original first chunk into the destination slot, now free.
+        switch (swap_bytes) {
+        case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break;
+        case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru
+        case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break;
+        default: ShouldNotReachHere();
+        }
+      }
+
+      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+    }
+    break;
+
+  case _adapter_dup_args:
+    {
+      // 'argslot' is the position of the first argument to duplicate.
+      __ ldsw(G3_amh_vmargslot, O0_argslot);
+      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+
+      // 'stack_move' is negative number of words to duplicate.
+      Register G5_stack_move = G5_index;
+      __ ldsw(G3_amh_conversion, G5_stack_move);
+      __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
+
+      // Remember the old Gargs (argslot[0]).
+      Register O1_oldarg = O1_scratch;
+      __ mov(Gargs, O1_oldarg);
+
+      // Move Gargs down to make room for dups.
+      __ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move);
+      __ add(Gargs, G5_stack_move, Gargs);
+
+      // Compute the new Gargs (argslot[0]).
+      Register O2_newarg = O2_scratch;
+      __ mov(Gargs, O2_newarg);
+
+      // Copy from oldarg[0...] down to newarg[0...]
+      // Pseude-code:
+      //   O1_oldarg  = old-Gargs
+      //   O2_newarg  = new-Gargs
+      //   O0_argslot = argslot
+      //   while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++
+      Label loop;
+      __ bind(loop);
+      __ ld_ptr(Address(O0_argslot, 0), O3_scratch);
+      __ st_ptr(O3_scratch, Address(O2_newarg, 0));
+      __ add(O0_argslot, wordSize, O0_argslot);
+      __ add(O2_newarg,  wordSize, O2_newarg);
+      __ cmp(O2_newarg, O1_oldarg);
+      __ brx(Assembler::less, false, Assembler::pt, loop);
+      __ delayed()->nop();  // FILLME
+
+      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+    }
+    break;
+
+  case _adapter_drop_args:
+    {
+      // 'argslot' is the position of the first argument to nuke.
+      __ ldsw(G3_amh_vmargslot, O0_argslot);
+      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+
+      // 'stack_move' is number of words to drop.
+      Register G5_stack_move = G5_index;
+      __ ldsw(G3_amh_conversion, G5_stack_move);
+      __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
+
+      remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch);
+
+      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+    }
+    break;
+
+  case _adapter_collect_args:
+    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
+    break;
+
+  case _adapter_spread_args:
+    // Handled completely by optimized cases.
+    __ stop("init_AdapterMethodHandle should not issue this");
+    break;
+
+  case _adapter_opt_spread_0:
+  case _adapter_opt_spread_1:
+  case _adapter_opt_spread_more:
+    {
+      // spread an array out into a group of arguments
+      __ unimplemented(entry_name(ek));
+    }
+    break;
+
+  case _adapter_flyby:
+  case _adapter_ricochet:
+    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
+    break;
+
+  default:
+    ShouldNotReachHere();
+  }
+
+  address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
+  __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
+
+  init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
 }
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu May 13 13:05:47 2010 -0700
@@ -547,17 +547,11 @@
   void set_Rdisp(Register r)  { Rdisp = r; }
 
   void patch_callers_callsite();
-  void tag_c2i_arg(frame::Tag t, Register base, int st_off, Register scratch);
 
   // base+st_off points to top of argument
-  int arg_offset(const int st_off) { return st_off + Interpreter::value_offset_in_bytes(); }
+  int arg_offset(const int st_off) { return st_off; }
   int next_arg_offset(const int st_off) {
-    return st_off - Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
-  }
-
-  int tag_offset(const int st_off) { return st_off + Interpreter::tag_offset_in_bytes(); }
-  int next_tag_offset(const int st_off) {
-    return st_off - Interpreter::stackElementSize() + Interpreter::tag_offset_in_bytes();
+    return st_off - Interpreter::stackElementSize;
   }
 
   // Argument slot values may be loaded first into a register because
@@ -565,9 +559,6 @@
   RegisterOrConstant arg_slot(const int st_off);
   RegisterOrConstant next_arg_slot(const int st_off);
 
-  RegisterOrConstant tag_slot(const int st_off);
-  RegisterOrConstant next_tag_slot(const int st_off);
-
   // Stores long into offset pointed to by base
   void store_c2i_long(Register r, Register base,
                       const int st_off, bool is_stack);
@@ -653,23 +644,6 @@
   __ bind(L);
 }
 
-void AdapterGenerator::tag_c2i_arg(frame::Tag t, Register base, int st_off,
-                 Register scratch) {
-  if (TaggedStackInterpreter) {
-    RegisterOrConstant slot = tag_slot(st_off);
-    // have to store zero because local slots can be reused (rats!)
-    if (t == frame::TagValue) {
-      __ st_ptr(G0, base, slot);
-    } else if (t == frame::TagCategory2) {
-      __ st_ptr(G0, base, slot);
-      __ st_ptr(G0, base, next_tag_slot(st_off));
-    } else {
-      __ mov(t, scratch);
-      __ st_ptr(scratch, base, slot);
-    }
-  }
-}
-
 
 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
   RegisterOrConstant roc(arg_offset(st_off));
@@ -682,17 +656,6 @@
 }
 
 
-RegisterOrConstant AdapterGenerator::tag_slot(const int st_off) {
-  RegisterOrConstant roc(tag_offset(st_off));
-  return __ ensure_simm13_or_reg(roc, Rdisp);
-}
-
-RegisterOrConstant AdapterGenerator::next_tag_slot(const int st_off) {
-  RegisterOrConstant roc(next_tag_offset(st_off));
-  return __ ensure_simm13_or_reg(roc, Rdisp);
-}
-
-
 // Stores long into offset pointed to by base
 void AdapterGenerator::store_c2i_long(Register r, Register base,
                                       const int st_off, bool is_stack) {
@@ -718,19 +681,16 @@
   }
 #endif // COMPILER2
 #endif // _LP64
-  tag_c2i_arg(frame::TagCategory2, base, st_off, r);
 }
 
 void AdapterGenerator::store_c2i_object(Register r, Register base,
                       const int st_off) {
   __ st_ptr (r, base, arg_slot(st_off));
-  tag_c2i_arg(frame::TagReference, base, st_off, r);
 }
 
 void AdapterGenerator::store_c2i_int(Register r, Register base,
                    const int st_off) {
   __ st (r, base, arg_slot(st_off));
-  tag_c2i_arg(frame::TagValue, base, st_off, r);
 }
 
 // Stores into offset pointed to by base
@@ -745,13 +705,11 @@
   __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
   __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
 #endif
-  tag_c2i_arg(frame::TagCategory2, base, st_off, G1_scratch);
 }
 
 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
                                        const int st_off) {
   __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
-  tag_c2i_arg(frame::TagValue, base, st_off, G1_scratch);
 }
 
 void AdapterGenerator::gen_c2i_adapter(
@@ -786,14 +744,14 @@
   // Since all args are passed on the stack, total_args_passed*wordSize is the
   // space we need.  Add in varargs area needed by the interpreter. Round up
   // to stack alignment.
-  const int arg_size = total_args_passed * Interpreter::stackElementSize();
+  const int arg_size = total_args_passed * Interpreter::stackElementSize;
   const int varargs_area =
                  (frame::varargs_offset - frame::register_save_words)*wordSize;
   const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
 
   int bias = STACK_BIAS;
   const int interp_arg_offset = frame::varargs_offset*wordSize +
-                        (total_args_passed-1)*Interpreter::stackElementSize();
+                        (total_args_passed-1)*Interpreter::stackElementSize;
 
   Register base = SP;
 
@@ -814,7 +772,7 @@
 
   // First write G1 (if used) to where ever it must go
   for (int i=0; i<total_args_passed; i++) {
-    const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias;
+    const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
     VMReg r_1 = regs[i].first();
     VMReg r_2 = regs[i].second();
     if (r_1 == G1_scratch->as_VMReg()) {
@@ -831,7 +789,7 @@
 
   // Now write the args into the outgoing interpreter space
   for (int i=0; i<total_args_passed; i++) {
-    const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias;
+    const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
     VMReg r_1 = regs[i].first();
     VMReg r_2 = regs[i].second();
     if (!r_1->is_valid()) {
@@ -900,7 +858,7 @@
 #endif // _LP64
 
   __ mov((frame::varargs_offset)*wordSize -
-         1*Interpreter::stackElementSize()+bias+BytesPerWord, G1);
+         1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
   // Jump to the interpreter just as if interpreter was doing it.
   __ jmpl(G3_scratch, 0, G0);
   // Setup Lesp for the call.  Cannot actually set Lesp as the current Lesp
@@ -1051,7 +1009,7 @@
     // ldx/lddf optimizations.
 
     // Load in argument order going down.
-    const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
+    const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
     set_Rdisp(G1_scratch);
 
     VMReg r_1 = regs[i].first();
@@ -1120,7 +1078,7 @@
   for (int i=0; i<total_args_passed; i++) {
     if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) {
       // Load in argument order going down
-      int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
+      int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
       // Need to marshal 64-bit value from misaligned Lesp loads
       Register r = regs[i].first()->as_Register()->after_restore();
       if (r == G1 || r == G4) {
@@ -3062,7 +3020,7 @@
           "test and remove; got more parms than locals");
   if (callee_locals < callee_parameters)
     return 0;                   // No adjustment for negative locals
-  int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords();
+  int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
   return round_to(diff, WordsPerLong);
 }
 
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu May 13 13:05:47 2010 -0700
@@ -139,7 +139,7 @@
       __ ld_ptr(parameter_size.as_address(), t);                // get parameter size (in words)
       __ add(t, frame::memory_parameter_word_sp_offset, t);     // add space for save area (in words)
       __ round_to(t, WordsPerLong);                             // make sure it is multiple of 2 (in words)
-      __ sll(t, Interpreter::logStackElementSize(), t);                    // compute number of bytes
+      __ sll(t, Interpreter::logStackElementSize, t);           // compute number of bytes
       __ neg(t);                                                // negate so it can be used with save
       __ save(SP, t, SP);                                       // setup new frame
     }
@@ -191,19 +191,13 @@
       // copy parameters if any
       Label loop;
       __ BIND(loop);
-      // Store tag first.
-      if (TaggedStackInterpreter) {
-        __ ld_ptr(src, 0, tmp);
-        __ add(src, BytesPerWord, src);  // get next
-        __ st_ptr(tmp, dst, Interpreter::tag_offset_in_bytes());
-      }
       // Store parameter value
       __ ld_ptr(src, 0, tmp);
       __ add(src, BytesPerWord, src);
-      __ st_ptr(tmp, dst, Interpreter::value_offset_in_bytes());
+      __ st_ptr(tmp, dst, 0);
       __ deccc(cnt);
       __ br(Assembler::greater, false, Assembler::pt, loop);
-      __ delayed()->sub(dst, Interpreter::stackElementSize(), dst);
+      __ delayed()->sub(dst, Interpreter::stackElementSize, dst);
 
       // done
       __ BIND(exit);
@@ -220,7 +214,7 @@
     // setup parameters
     const Register t = G3_scratch;
     __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
-    __ sll(t, Interpreter::logStackElementSize(), t);            // compute number of bytes
+    __ sll(t, Interpreter::logStackElementSize, t);    // compute number of bytes
     __ sub(FP, t, Gargs);                              // setup parameter pointer
 #ifdef _LP64
     __ add( Gargs, STACK_BIAS, Gargs );                // Account for LP64 stack bias
@@ -2917,6 +2911,16 @@
     // arraycopy stubs used by compilers
     generate_arraycopy_stubs();
 
+    // generic method handle stubs
+    if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
+      for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
+           ek < MethodHandles::_EK_LIMIT;
+           ek = MethodHandles::EntryKind(1 + (int)ek)) {
+        StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
+        MethodHandles::generate_method_handle_stub(_masm, ek);
+      }
+    }
+
     // Don't initialize the platform math functions since sparc
     // doesn't have intrinsics for these operations.
   }
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu May 13 13:05:47 2010 -0700
@@ -151,8 +151,10 @@
 
 
 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
+  TosState incoming_state = state;
+
+  Label cont;
   address compiled_entry = __ pc();
-  Label cont;
 
   address entry = __ pc();
 #if !defined(_LP64) && defined(COMPILER2)
@@ -165,12 +167,11 @@
   // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
   // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
 
-  if( state == ltos ) {
-    __ srl (G1, 0,O1);
-    __ srlx(G1,32,O0);
+  if (incoming_state == ltos) {
+    __ srl (G1,  0, O1);
+    __ srlx(G1, 32, O0);
   }
-#endif /* !_LP64 && COMPILER2 */
-
+#endif // !_LP64 && COMPILER2
 
   __ bind(cont);
 
@@ -182,17 +183,32 @@
 
   __ mov(Llast_SP, SP);   // Remove any adapter added stack space.
 
-
+  Label L_got_cache, L_giant_index;
   const Register cache = G3_scratch;
   const Register size  = G1_scratch;
+  if (EnableInvokeDynamic) {
+    __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode.
+    __ cmp(G1_scratch, Bytecodes::_invokedynamic);
+    __ br(Assembler::equal, false, Assembler::pn, L_giant_index);
+    __ delayed()->nop();
+  }
   __ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
+  __ bind(L_got_cache);
   __ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() +
                    ConstantPoolCacheEntry::flags_offset(), size);
   __ and3(size, 0xFF, size);                   // argument size in words
-  __ sll(size, Interpreter::logStackElementSize(), size); // each argument size in bytes
+  __ sll(size, Interpreter::logStackElementSize, size); // each argument size in bytes
   __ add(Lesp, size, Lesp);                    // pop arguments
   __ dispatch_next(state, step);
 
+  // out of the main line of code...
+  if (EnableInvokeDynamic) {
+    __ bind(L_giant_index);
+    __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, true);
+    __ ba(false, L_got_cache);
+    __ delayed()->nop();
+  }
+
   return entry;
 }
 
@@ -479,7 +495,7 @@
   // Set the saved SP after the register window save
   //
   assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
-  __ sll(Glocals_size, Interpreter::logStackElementSize(), Otmp1);
+  __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
   __ add(Gargs, Otmp1, Gargs);
 
   if (native_call) {
@@ -495,7 +511,7 @@
     __ lduh( size_of_locals, Otmp1 );
     __ sub( Otmp1, Glocals_size, Glocals_size );
     __ round_to( Glocals_size, WordsPerLong );
-    __ sll( Glocals_size, Interpreter::logStackElementSize(), Glocals_size );
+    __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size );
 
     // see if the frame is greater than one page in size. If so,
     // then we need to verify there is enough stack space remaining
@@ -503,7 +519,7 @@
     __ lduh( max_stack, Gframe_size );
     __ add( Gframe_size, extra_space, Gframe_size );
     __ round_to( Gframe_size, WordsPerLong );
-    __ sll( Gframe_size, Interpreter::logStackElementSize(), Gframe_size);
+    __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
 
     // Add in java locals size for stack overflow check only
     __ add( Gframe_size, Glocals_size, Gframe_size );
@@ -1218,8 +1234,8 @@
   // be updated!
   __ lduh( size_of_locals, O2 );
   __ lduh( size_of_parameters, O1 );
-  __ sll( O2, Interpreter::logStackElementSize(), O2);
-  __ sll( O1, Interpreter::logStackElementSize(), O1 );
+  __ sll( O2, Interpreter::logStackElementSize, O2);
+  __ sll( O1, Interpreter::logStackElementSize, O1 );
   __ sub( Llocals, O2, O2 );
   __ sub( Llocals, O1, O1 );
 
@@ -1454,8 +1470,8 @@
        round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
   // callee_locals and max_stack are counts, not the size in frame.
   const int locals_size =
-       round_to(callee_extra_locals * Interpreter::stackElementWords(), WordsPerLong);
-  const int max_stack_words = max_stack * Interpreter::stackElementWords();
+       round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
+  const int max_stack_words = max_stack * Interpreter::stackElementWords;
   return (round_to((max_stack_words
                    //6815692//+ methodOopDesc::extra_stack_words()
                    + rounded_vm_local_words
@@ -1554,11 +1570,11 @@
 
     // preallocate stack space
     intptr_t*  esp = monitors - 1 -
-                     (tempcount * Interpreter::stackElementWords()) -
+                     (tempcount * Interpreter::stackElementWords) -
                      popframe_extra_args;
 
-    int local_words = method->max_locals() * Interpreter::stackElementWords();
-    int parm_words  = method->size_of_parameters() * Interpreter::stackElementWords();
+    int local_words = method->max_locals() * Interpreter::stackElementWords;
+    int parm_words  = method->size_of_parameters() * Interpreter::stackElementWords;
     NEEDS_CLEANUP;
     intptr_t* locals;
     if (caller->is_interpreted_frame()) {
@@ -1646,7 +1662,7 @@
     BasicObjectLock* mp = (BasicObjectLock*)monitors;
 
     assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
-    assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize())+Interpreter::value_offset_in_bytes()), "locals match");
+    assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
     assert(interpreter_frame->interpreter_frame_monitor_end()   == mp, "monitor_end matches");
     assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
     assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
@@ -1742,7 +1758,7 @@
 
     // Compute size of arguments for saving when returning to deoptimized caller
     __ lduh(Lmethod, in_bytes(methodOopDesc::size_of_parameters_offset()), Gtmp1);
-    __ sll(Gtmp1, Interpreter::logStackElementSize(), Gtmp1);
+    __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
     __ sub(Llocals, Gtmp1, Gtmp2);
     __ add(Gtmp2, wordSize, Gtmp2);
     // Save these arguments
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,8 @@
   // fail with a guarantee ("not enough space for interpreter generation");
   // if too small.
   // Run with +PrintInterpreter to get the VM to print out the size.
-  // Max size with JVMTI and TaggedStackInterpreter
+  // Max size with JVMTI
+
 #ifdef _LP64
   // The sethi() instruction generates lots more instructions when shell
   // stack limit is unlimited, so that's why this is much bigger.
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -580,7 +580,6 @@
 
 void TemplateTable::iload(int n) {
   transition(vtos, itos);
-  debug_only(__ verify_local_tag(frame::TagValue, Llocals, Otos_i, n));
   __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
 }
 
@@ -588,7 +587,6 @@
 void TemplateTable::lload(int n) {
   transition(vtos, ltos);
   assert(n+1 < Argument::n_register_parameters, "would need more code");
-  debug_only(__ verify_local_tag(frame::TagCategory2, Llocals, Otos_l, n));
   __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
 }
 
@@ -596,7 +594,6 @@
 void TemplateTable::fload(int n) {
   transition(vtos, ftos);
   assert(n < Argument::n_register_parameters, "would need more code");
-  debug_only(__ verify_local_tag(frame::TagValue, Llocals, G3_scratch, n));
   __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n),     Ftos_f );
 }
 
@@ -604,14 +601,12 @@
 void TemplateTable::dload(int n) {
   transition(vtos, dtos);
   FloatRegister dst = Ftos_d;
-  debug_only(__ verify_local_tag(frame::TagCategory2, Llocals, G3_scratch, n));
   __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
 }
 
 
 void TemplateTable::aload(int n) {
   transition(vtos, atos);
-  debug_only(__ verify_local_tag(frame::TagReference, Llocals, Otos_i, n));
   __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
 }
 
@@ -707,12 +702,11 @@
 
 void TemplateTable::astore() {
   transition(vtos, vtos);
-  // astore tos can also be a returnAddress, so load and store the tag too
-  __ load_ptr_and_tag(0, Otos_i, Otos_l2);
-  __ inc(Lesp, Interpreter::stackElementSize());
+  __ load_ptr(0, Otos_i);
+  __ inc(Lesp, Interpreter::stackElementSize);
   __ verify_oop_or_return_address(Otos_i, G3_scratch);
   locals_index(G3_scratch);
-  __ store_local_ptr( G3_scratch, Otos_i, Otos_l2 );
+  __ store_local_ptr(G3_scratch, Otos_i);
 }
 
 
@@ -750,12 +744,11 @@
 
 void TemplateTable::wide_astore() {
   transition(vtos, vtos);
-  // astore tos can also be a returnAddress, so load and store the tag too
-  __ load_ptr_and_tag(0, Otos_i, Otos_l2);
-  __ inc(Lesp, Interpreter::stackElementSize());
+  __ load_ptr(0, Otos_i);
+  __ inc(Lesp, Interpreter::stackElementSize);
   __ verify_oop_or_return_address(Otos_i, G3_scratch);
   locals_index_wide(G3_scratch);
-  __ store_local_ptr( G3_scratch, Otos_i, Otos_l2 );
+  __ store_local_ptr(G3_scratch, Otos_i);
 }
 
 
@@ -845,13 +838,13 @@
   do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true);
 
   __ ba(false,done);
-  __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
+  __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
 
   __ bind(is_null);
   do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true);
 
   __ profile_null_seen(G3_scratch);
-  __ inc(Lesp, 3* Interpreter::stackElementSize());     // adj sp (pops array, index and value)
+  __ inc(Lesp, 3* Interpreter::stackElementSize);     // adj sp (pops array, index and value)
   __ bind(done);
 }
 
@@ -884,7 +877,6 @@
 
 void TemplateTable::istore(int n) {
   transition(itos, vtos);
-  __ tag_local(frame::TagValue, Llocals, Otos_i, n);
   __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
 }
 
@@ -892,7 +884,6 @@
 void TemplateTable::lstore(int n) {
   transition(ltos, vtos);
   assert(n+1 < Argument::n_register_parameters, "only handle register cases");
-  __ tag_local(frame::TagCategory2, Llocals, Otos_l, n);
   __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
 
 }
@@ -901,7 +892,6 @@
 void TemplateTable::fstore(int n) {
   transition(ftos, vtos);
   assert(n < Argument::n_register_parameters, "only handle register cases");
-  __ tag_local(frame::TagValue, Llocals, Otos_l, n);
   __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
 }
 
@@ -909,30 +899,28 @@
 void TemplateTable::dstore(int n) {
   transition(dtos, vtos);
   FloatRegister src = Ftos_d;
-  __ tag_local(frame::TagCategory2, Llocals, Otos_l, n);
   __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
 }
 
 
 void TemplateTable::astore(int n) {
   transition(vtos, vtos);
-  // astore tos can also be a returnAddress, so load and store the tag too
-  __ load_ptr_and_tag(0, Otos_i, Otos_l2);
-  __ inc(Lesp, Interpreter::stackElementSize());
+  __ load_ptr(0, Otos_i);
+  __ inc(Lesp, Interpreter::stackElementSize);
   __ verify_oop_or_return_address(Otos_i, G3_scratch);
-  __ store_local_ptr( n, Otos_i, Otos_l2 );
+  __ store_local_ptr(n, Otos_i);
 }
 
 
 void TemplateTable::pop() {
   transition(vtos, vtos);
-  __ inc(Lesp, Interpreter::stackElementSize());
+  __ inc(Lesp, Interpreter::stackElementSize);
 }
 
 
 void TemplateTable::pop2() {
   transition(vtos, vtos);
-  __ inc(Lesp, 2 * Interpreter::stackElementSize());
+  __ inc(Lesp, 2 * Interpreter::stackElementSize);
 }
 
 
@@ -940,8 +928,8 @@
   transition(vtos, vtos);
   // stack: ..., a
   // load a and tag
-  __ load_ptr_and_tag(0, Otos_i, Otos_l2);
-  __ push_ptr(Otos_i, Otos_l2);
+  __ load_ptr(0, Otos_i);
+  __ push_ptr(Otos_i);
   // stack: ..., a, a
 }
 
@@ -949,11 +937,11 @@
 void TemplateTable::dup_x1() {
   transition(vtos, vtos);
   // stack: ..., a, b
-  __ load_ptr_and_tag(1, G3_scratch, G4_scratch);   // get a
-  __ load_ptr_and_tag(0, Otos_l1, Otos_l2);         // get b
-  __ store_ptr_and_tag(1, Otos_l1, Otos_l2);        // put b
-  __ store_ptr_and_tag(0, G3_scratch, G4_scratch);  // put a - like swap
-  __ push_ptr(Otos_l1, Otos_l2);                    // push b
+  __ load_ptr( 1, G3_scratch);  // get a
+  __ load_ptr( 0, Otos_l1);     // get b
+  __ store_ptr(1, Otos_l1);     // put b
+  __ store_ptr(0, G3_scratch);  // put a - like swap
+  __ push_ptr(Otos_l1);         // push b
   // stack: ..., b, a, b
 }
 
@@ -962,27 +950,27 @@
   transition(vtos, vtos);
   // stack: ..., a, b, c
   // get c and push on stack, reuse registers
-  __ load_ptr_and_tag(0, G3_scratch, G4_scratch);     // get c
-  __ push_ptr(G3_scratch, G4_scratch);               // push c with tag
+  __ load_ptr( 0, G3_scratch);  // get c
+  __ push_ptr(G3_scratch);      // push c with tag
   // stack: ..., a, b, c, c  (c in reg)  (Lesp - 4)
   // (stack offsets n+1 now)
-  __ load_ptr_and_tag(3, Otos_l1, Otos_l2);          // get a
-  __ store_ptr_and_tag(3, G3_scratch, G4_scratch);   // put c at 3
+  __ load_ptr( 3, Otos_l1);     // get a
+  __ store_ptr(3, G3_scratch);  // put c at 3
   // stack: ..., c, b, c, c  (a in reg)
-  __ load_ptr_and_tag(2, G3_scratch, G4_scratch);    // get b
-  __ store_ptr_and_tag(2, Otos_l1, Otos_l2);         // put a at 2
+  __ load_ptr( 2, G3_scratch);  // get b
+  __ store_ptr(2, Otos_l1);     // put a at 2
   // stack: ..., c, a, c, c  (b in reg)
-  __ store_ptr_and_tag(1, G3_scratch, G4_scratch);   // put b at 1
+  __ store_ptr(1, G3_scratch);  // put b at 1
   // stack: ..., c, a, b, c
 }
 
 
 void TemplateTable::dup2() {
   transition(vtos, vtos);
-  __ load_ptr_and_tag(1, G3_scratch, G4_scratch);     // get a
-  __ load_ptr_and_tag(0, Otos_l1, Otos_l2);           // get b
-  __ push_ptr(G3_scratch, G4_scratch);                // push a
-  __ push_ptr(Otos_l1, Otos_l2);                      // push b
+  __ load_ptr(1, G3_scratch);  // get a
+  __ load_ptr(0, Otos_l1);     // get b
+  __ push_ptr(G3_scratch);     // push a
+  __ push_ptr(Otos_l1);        // push b
   // stack: ..., a, b, a, b
 }
 
@@ -990,17 +978,17 @@
 void TemplateTable::dup2_x1() {
   transition(vtos, vtos);
   // stack: ..., a, b, c
-  __ load_ptr_and_tag(1, Lscratch, G1_scratch);       // get b
-  __ load_ptr_and_tag(2, Otos_l1, Otos_l2);           // get a
-  __ store_ptr_and_tag(2, Lscratch, G1_scratch);      // put b at a
+  __ load_ptr( 1, Lscratch);    // get b
+  __ load_ptr( 2, Otos_l1);     // get a
+  __ store_ptr(2, Lscratch);    // put b at a
   // stack: ..., b, b, c
-  __ load_ptr_and_tag(0, G3_scratch, G4_scratch);     // get c
-  __ store_ptr_and_tag(1, G3_scratch, G4_scratch);    // put c at b
+  __ load_ptr( 0, G3_scratch);  // get c
+  __ store_ptr(1, G3_scratch);  // put c at b
   // stack: ..., b, c, c
-  __ store_ptr_and_tag(0, Otos_l1, Otos_l2);          // put a at c
+  __ store_ptr(0, Otos_l1);     // put a at c
   // stack: ..., b, c, a
-  __ push_ptr(Lscratch, G1_scratch);                  // push b
-  __ push_ptr(G3_scratch, G4_scratch);                // push c
+  __ push_ptr(Lscratch);        // push b
+  __ push_ptr(G3_scratch);      // push c
   // stack: ..., b, c, a, b, c
 }
 
@@ -1010,18 +998,18 @@
 void TemplateTable::dup2_x2() {
   transition(vtos, vtos);
   // stack: ..., a, b, c, d
-  __ load_ptr_and_tag(1, Lscratch, G1_scratch);       // get c
-  __ load_ptr_and_tag(3, Otos_l1, Otos_l2);           // get a
-  __ store_ptr_and_tag(3, Lscratch, G1_scratch);      // put c at 3
-  __ store_ptr_and_tag(1, Otos_l1, Otos_l2);          // put a at 1
+  __ load_ptr( 1, Lscratch);    // get c
+  __ load_ptr( 3, Otos_l1);     // get a
+  __ store_ptr(3, Lscratch);    // put c at 3
+  __ store_ptr(1, Otos_l1);     // put a at 1
   // stack: ..., c, b, a, d
-  __ load_ptr_and_tag(2, G3_scratch, G4_scratch);     // get b
-  __ load_ptr_and_tag(0, Otos_l1, Otos_l2);           // get d
-  __ store_ptr_and_tag(0, G3_scratch, G4_scratch);    // put b at 0
-  __ store_ptr_and_tag(2, Otos_l1, Otos_l2);          // put d at 2
+  __ load_ptr( 2, G3_scratch);  // get b
+  __ load_ptr( 0, Otos_l1);     // get d
+  __ store_ptr(0, G3_scratch);  // put b at 0
+  __ store_ptr(2, Otos_l1);     // put d at 2
   // stack: ..., c, d, a, b
-  __ push_ptr(Lscratch, G1_scratch);                  // push c
-  __ push_ptr(Otos_l1, Otos_l2);                      // push d
+  __ push_ptr(Lscratch);        // push c
+  __ push_ptr(Otos_l1);         // push d
   // stack: ..., c, d, a, b, c, d
 }
 
@@ -1029,10 +1017,10 @@
 void TemplateTable::swap() {
   transition(vtos, vtos);
   // stack: ..., a, b
-  __ load_ptr_and_tag(1, G3_scratch, G4_scratch);     // get a
-  __ load_ptr_and_tag(0, Otos_l1, Otos_l2);           // get b
-  __ store_ptr_and_tag(0, G3_scratch, G4_scratch);    // put b
-  __ store_ptr_and_tag(1, Otos_l1, Otos_l2);          // put a
+  __ load_ptr( 1, G3_scratch);  // get a
+  __ load_ptr( 0, Otos_l1);     // get b
+  __ store_ptr(0, G3_scratch);  // put b
+  __ store_ptr(1, Otos_l1);     // put a
   // stack: ..., b, a
 }
 
@@ -1045,9 +1033,9 @@
    case  sub:  __  sub(O1, Otos_i, Otos_i);  break;
      // %%%%% Mul may not exist: better to call .mul?
    case  mul:  __ smul(O1, Otos_i, Otos_i);  break;
-   case _and:  __  and3(O1, Otos_i, Otos_i);  break;
-   case  _or:  __   or3(O1, Otos_i, Otos_i);  break;
-   case _xor:  __  xor3(O1, Otos_i, Otos_i);  break;
+   case _and:  __ and3(O1, Otos_i, Otos_i);  break;
+   case  _or:  __  or3(O1, Otos_i, Otos_i);  break;
+   case _xor:  __ xor3(O1, Otos_i, Otos_i);  break;
    case  shl:  __  sll(O1, Otos_i, Otos_i);  break;
    case  shr:  __  sra(O1, Otos_i, Otos_i);  break;
    case ushr:  __  srl(O1, Otos_i, Otos_i);  break;
@@ -1061,17 +1049,17 @@
   __ pop_l(O2);
   switch (op) {
 #ifdef _LP64
-   case  add:  __ add(O2, Otos_l, Otos_l);  break;
-   case  sub:  __ sub(O2, Otos_l, Otos_l);  break;
-   case _and:  __ and3( O2, Otos_l, Otos_l);  break;
-   case  _or:  __  or3( O2, Otos_l, Otos_l);  break;
-   case _xor:  __ xor3( O2, Otos_l, Otos_l);  break;
+   case  add:  __  add(O2, Otos_l, Otos_l);  break;
+   case  sub:  __  sub(O2, Otos_l, Otos_l);  break;
+   case _and:  __ and3(O2, Otos_l, Otos_l);  break;
+   case  _or:  __  or3(O2, Otos_l, Otos_l);  break;
+   case _xor:  __ xor3(O2, Otos_l, Otos_l);  break;
 #else
    case  add:  __ addcc(O3, Otos_l2, Otos_l2);  __ addc(O2, Otos_l1, Otos_l1);  break;
    case  sub:  __ subcc(O3, Otos_l2, Otos_l2);  __ subc(O2, Otos_l1, Otos_l1);  break;
-   case _and:  __ and3(  O3, Otos_l2, Otos_l2);  __ and3( O2, Otos_l1, Otos_l1);  break;
-   case  _or:  __  or3(  O3, Otos_l2, Otos_l2);  __  or3( O2, Otos_l1, Otos_l1);  break;
-   case _xor:  __ xor3(  O3, Otos_l2, Otos_l2);  __ xor3( O2, Otos_l1, Otos_l1);  break;
+   case _and:  __  and3(O3, Otos_l2, Otos_l2);  __ and3(O2, Otos_l1, Otos_l1);  break;
+   case  _or:  __   or3(O3, Otos_l2, Otos_l2);  __  or3(O2, Otos_l1, Otos_l1);  break;
+   case _xor:  __  xor3(O3, Otos_l2, Otos_l2);  __ xor3(O2, Otos_l1, Otos_l1);  break;
 #endif
    default: ShouldNotReachHere();
   }
@@ -1307,7 +1295,7 @@
   __ ldsb(Lbcp, 2, O2);  // load constant
   __ access_local_int(G3_scratch, Otos_i);
   __ add(Otos_i, O2, Otos_i);
-  __ st(Otos_i, G3_scratch, Interpreter::value_offset_in_bytes());    // access_local_int puts E.A. in G3_scratch
+  __ st(Otos_i, G3_scratch, 0);    // access_local_int puts E.A. in G3_scratch
 }
 
 
@@ -1317,7 +1305,7 @@
   __ get_2_byte_integer_at_bcp( 4,  O2, O3, InterpreterMacroAssembler::Signed);
   __ access_local_int(G3_scratch, Otos_i);
   __ add(Otos_i, O3, Otos_i);
-  __ st(Otos_i, G3_scratch, Interpreter::value_offset_in_bytes());    // access_local_int puts E.A. in G3_scratch
+  __ st(Otos_i, G3_scratch, 0);    // access_local_int puts E.A. in G3_scratch
 }
 
 
@@ -1555,7 +1543,7 @@
     // Bump Lbcp to target of JSR
     __ add(Lbcp, O1_disp, Lbcp);
     // Push returnAddress for "ret" on stack
-    __ push_ptr(Otos_i, G0); // push ptr sized thing plus 0 for tag.
+    __ push_ptr(Otos_i);
     // And away we go!
     __ dispatch_next(vtos);
     return;
@@ -1963,19 +1951,30 @@
 // ----------------------------------------------------------------------------
 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
   assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
+  bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
+
   // Depends on cpCacheOop layout!
   const int shift_count = (1 + byte_no)*BitsPerByte;
   Label resolved;
 
-  __ get_cache_and_index_at_bcp(Rcache, index, 1);
-  __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
-                    ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
-
-  __ srl(  Lbyte_code, shift_count, Lbyte_code );
-  __ and3( Lbyte_code,        0xFF, Lbyte_code );
-  __ cmp(  Lbyte_code, (int)bytecode());
-  __ br(   Assembler::equal, false, Assembler::pt, resolved);
-  __ delayed()->set((int)bytecode(), O1);
+  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
+  if (is_invokedynamic) {
+    // We are resolved if the f1 field contains a non-null CallSite object.
+    __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
+              ConstantPoolCacheEntry::f1_offset(), Lbyte_code);
+    __ tst(Lbyte_code);
+    __ br(Assembler::notEqual, false, Assembler::pt, resolved);
+    __ delayed()->set((int)bytecode(), O1);
+  } else {
+    __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
+              ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
+
+    __ srl(  Lbyte_code, shift_count, Lbyte_code );
+    __ and3( Lbyte_code,        0xFF, Lbyte_code );
+    __ cmp(  Lbyte_code, (int)bytecode());
+    __ br(   Assembler::equal, false, Assembler::pt, resolved);
+    __ delayed()->set((int)bytecode(), O1);
+  }
 
   address entry;
   switch (bytecode()) {
@@ -1987,12 +1986,13 @@
     case Bytecodes::_invokespecial  : // fall through
     case Bytecodes::_invokestatic   : // fall through
     case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);  break;
+    case Bytecodes::_invokedynamic  : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);  break;
     default                         : ShouldNotReachHere();                                 break;
   }
   // first time invocation - must resolve first
   __ call_VM(noreg, entry, O1);
   // Update registers with resolved info
-  __ get_cache_and_index_at_bcp(Rcache, index, 1);
+  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
   __ bind(resolved);
 }
 
@@ -2742,7 +2742,7 @@
   Register Rflags  = G4_scratch;
   Register Rreceiver = Lscratch;
 
-  __ ld_ptr(Llocals, Interpreter::value_offset_in_bytes(), Rreceiver);
+  __ ld_ptr(Llocals, 0, Rreceiver);
 
   // access constant pool cache  (is resolved)
   __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
@@ -3130,7 +3130,42 @@
     return;
   }
 
-  __ stop("invokedynamic NYI");//6815692//
+  // G5: CallSite object (f1)
+  // XX: unused (f2)
+  // G3: receiver address
+  // XX: flags (unused)
+
+  Register G5_callsite = G5_method;
+  Register Rscratch    = G3_scratch;
+  Register Rtemp       = G1_scratch;
+  Register Rret        = Lscratch;
+
+  load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret, false);
+  __ mov(SP, O5_savedSP);  // record SP that we wanted the callee to restore
+
+  __ verify_oop(G5_callsite);
+
+  // profile this call
+  __ profile_call(O4);
+
+  // get return address
+  AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
+  __ set(table, Rtemp);
+  __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret);  // get return type
+  // Make sure we don't need to mask Rret for tosBits after the above shift
+  ConstantPoolCacheEntry::verify_tosBits();
+  __ sll(Rret, LogBytesPerWord, Rret);
+  __ ld_ptr(Rtemp, Rret, Rret);  // get return address
+
+  __ ld_ptr(G5_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
+  __ null_check(G3_method_handle);
+
+  // Adjust Rret first so Llast_SP can be same as Rret
+  __ add(Rret, -frame::pc_return_offset, O7);
+  __ add(Lesp, BytesPerWord, Gargs);  // setup parameter pointer
+  __ jump_to_method_handle_entry(G3_method_handle, Rtemp, /* emit_delayed_nop */ false);
+  // Record SP so we can remove any stack space allocated by adapter transition
+  __ delayed()->mov(SP, Llast_SP);
 }
 
 
@@ -3649,7 +3684,7 @@
   transition(vtos, atos);
      // put ndims * wordSize into Lscratch
   __ ldub( Lbcp,     3,               Lscratch);
-  __ sll(  Lscratch, Interpreter::logStackElementSize(), Lscratch);
+  __ sll(  Lscratch, Interpreter::logStackElementSize, Lscratch);
      // Lesp points past last_dim, so set to O1 to first_dim address
   __ add(  Lesp,     Lscratch,        O1);
      call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);
--- a/src/cpu/x86/vm/assembler_x86.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -6492,24 +6492,19 @@
 }
 
 void MacroAssembler::load_sized_value(Register dst, Address src,
-                                      int size_in_bytes, bool is_signed) {
-  switch (size_in_bytes ^ (is_signed ? -1 : 0)) {
+                                      size_t size_in_bytes, bool is_signed) {
+  switch (size_in_bytes) {
 #ifndef _LP64
   // For case 8, caller is responsible for manually loading
   // the second word into another register.
-  case ~8:  // fall through:
-  case  8:  movl(                dst, src ); break;
+  case  8: movl(dst, src); break;
 #else
-  case ~8:  // fall through:
-  case  8:  movq(                dst, src ); break;
+  case  8: movq(dst, src); break;
 #endif
-  case ~4:  // fall through:
-  case  4:  movl(                dst, src ); break;
-  case ~2:  load_signed_short(   dst, src ); break;
-  case  2:  load_unsigned_short( dst, src ); break;
-  case ~1:  load_signed_byte(    dst, src ); break;
-  case  1:  load_unsigned_byte(  dst, src ); break;
-  default:  ShouldNotReachHere();
+  case  4: movl(dst, src); break;
+  case  2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
+  case  1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
+  default: ShouldNotReachHere();
   }
 }
 
@@ -7706,6 +7701,7 @@
 // method handle's MethodType.  This macro hides the distinction.
 void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
                                                 Register temp_reg) {
+  assert_different_registers(vmslots_reg, mh_reg, temp_reg);
   if (UseCompressedOops)  unimplemented();  // field accesses must decode
   // load mh.type.form.vmslots
   if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
@@ -7744,7 +7740,7 @@
 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
                                          int extra_slot_offset) {
   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
-  int stackElementSize = Interpreter::stackElementSize();
+  int stackElementSize = Interpreter::stackElementSize;
   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 #ifdef ASSERT
   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
--- a/src/cpu/x86/vm/assembler_x86.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/assembler_x86.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1511,7 +1511,7 @@
   void extend_sign(Register hi, Register lo);
 
   // Loading values by size and signed-ness
-  void load_sized_value(Register dst, Address src, int size_in_bytes, bool is_signed);
+  void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed);
 
   // Support for inc/dec with optimal instruction selection depending on value
 
--- a/src/cpu/x86/vm/cppInterpreter_x86.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/cppInterpreter_x86.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,6 @@
   // Size of interpreter code.  Increase if too small.  Interpreter will
   // fail with a guarantee ("not enough space for interpreter generation");
   // if too small.
-  // Run with +PrintInterpreterSize to get the VM to print out the size.
-  // Max size with JVMTI and TaggedStackInterpreter
+  // Run with +PrintInterpreter to get the VM to print out the size.
+  // Max size with JVMTI
   const static int InterpreterCodeSize = 168 * 1024;
--- a/src/cpu/x86/vm/frame_x86.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/frame_x86.cpp	Thu May 13 13:05:47 2010 -0700
@@ -502,7 +502,7 @@
   // When unpacking an optimized frame the frame pointer is
   // adjusted with:
   int diff = (method->max_locals() - method->size_of_parameters()) *
-             Interpreter::stackElementWords();
+             Interpreter::stackElementWords;
   return _fp == (fp - diff);
 }
 
@@ -542,7 +542,7 @@
 
   // stack frames shouldn't be much larger than max_stack elements
 
-  if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize()) {
+  if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
     return false;
   }
 
@@ -594,7 +594,7 @@
 #ifdef AMD64
       // This is times two because we do a push(ltos) after pushing XMM0
       // and that takes two interpreter stack slots.
-      tos_addr += 2 * Interpreter::stackElementWords();
+      tos_addr += 2 * Interpreter::stackElementWords;
 #else
       tos_addr += 2;
 #endif // AMD64
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp	Thu May 13 13:05:47 2010 -0700
@@ -265,89 +265,30 @@
 
 // Java Expression Stack
 
-#ifdef ASSERT
-void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
-  if (TaggedStackInterpreter) {
-    Label okay;
-    cmpptr(Address(rsp, wordSize), (int32_t)t);
-    jcc(Assembler::equal, okay);
-    // Also compare if the stack value is zero, then the tag might
-    // not have been set coming from deopt.
-    cmpptr(Address(rsp, 0), 0);
-    jcc(Assembler::equal, okay);
-    stop("Java Expression stack tag value is bad");
-    bind(okay);
-  }
-}
-#endif // ASSERT
-
 void InterpreterMacroAssembler::pop_ptr(Register r) {
-  debug_only(verify_stack_tag(frame::TagReference));
   pop(r);
-  if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
-}
-
-void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) {
-  pop(r);
-  // Tag may not be reference for jsr, can be returnAddress
-  if (TaggedStackInterpreter) pop(tag);
 }
 
 void InterpreterMacroAssembler::pop_i(Register r) {
-  debug_only(verify_stack_tag(frame::TagValue));
   pop(r);
-  if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
 }
 
 void InterpreterMacroAssembler::pop_l(Register lo, Register hi) {
-  debug_only(verify_stack_tag(frame::TagValue));
   pop(lo);
-  if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
-  debug_only(verify_stack_tag(frame::TagValue));
   pop(hi);
-  if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
 }
 
 void InterpreterMacroAssembler::pop_f() {
-  debug_only(verify_stack_tag(frame::TagValue));
   fld_s(Address(rsp, 0));
   addptr(rsp, 1 * wordSize);
-  if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
 }
 
 void InterpreterMacroAssembler::pop_d() {
-  // Write double to stack contiguously and load into ST0
-  pop_dtos_to_rsp();
   fld_d(Address(rsp, 0));
   addptr(rsp, 2 * wordSize);
 }
 
 
-// Pop the top of the java expression stack to execution stack (which
-// happens to be the same place).
-void InterpreterMacroAssembler::pop_dtos_to_rsp() {
-  if (TaggedStackInterpreter) {
-    // Pop double value into scratch registers
-    debug_only(verify_stack_tag(frame::TagValue));
-    pop(rax);
-    addptr(rsp, 1* wordSize);
-    debug_only(verify_stack_tag(frame::TagValue));
-    pop(rdx);
-    addptr(rsp, 1* wordSize);
-    push(rdx);
-    push(rax);
-  }
-}
-
-void InterpreterMacroAssembler::pop_ftos_to_rsp() {
-  if (TaggedStackInterpreter) {
-    debug_only(verify_stack_tag(frame::TagValue));
-    pop(rax);
-    addptr(rsp, 1 * wordSize);
-    push(rax);  // ftos is at rsp
-  }
-}
-
 void InterpreterMacroAssembler::pop(TosState state) {
   switch (state) {
     case atos: pop_ptr(rax);                                 break;
@@ -365,54 +306,28 @@
 }
 
 void InterpreterMacroAssembler::push_ptr(Register r) {
-  if (TaggedStackInterpreter) push(frame::TagReference);
-  push(r);
-}
-
-void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
-  if (TaggedStackInterpreter) push(tag);  // tag first
   push(r);
 }
 
 void InterpreterMacroAssembler::push_i(Register r) {
-  if (TaggedStackInterpreter) push(frame::TagValue);
   push(r);
 }
 
 void InterpreterMacroAssembler::push_l(Register lo, Register hi) {
-  if (TaggedStackInterpreter) push(frame::TagValue);
   push(hi);
-  if (TaggedStackInterpreter) push(frame::TagValue);
   push(lo);
 }
 
 void InterpreterMacroAssembler::push_f() {
-  if (TaggedStackInterpreter) push(frame::TagValue);
   // Do not schedule for no AGI! Never write beyond rsp!
   subptr(rsp, 1 * wordSize);
   fstp_s(Address(rsp, 0));
 }
 
 void InterpreterMacroAssembler::push_d(Register r) {
-  if (TaggedStackInterpreter) {
-    // Double values are stored as:
-    //   tag
-    //   high
-    //   tag
-    //   low
-    push(frame::TagValue);
-    subptr(rsp, 3 * wordSize);
-    fstp_d(Address(rsp, 0));
-    // move high word up to slot n-1
-    movl(r, Address(rsp, 1*wordSize));
-    movl(Address(rsp, 2*wordSize), r);
-    // move tag
-    movl(Address(rsp, 1*wordSize), frame::TagValue);
-  } else {
-    // Do not schedule for no AGI! Never write beyond rsp!
-    subptr(rsp, 2 * wordSize);
-    fstp_d(Address(rsp, 0));
-  }
+  // Do not schedule for no AGI! Never write beyond rsp!
+  subptr(rsp, 2 * wordSize);
+  fstp_d(Address(rsp, 0));
 }
 
 
@@ -433,118 +348,15 @@
 }
 
 
-// Tagged stack helpers for swap and dup
-void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
-                                                 Register tag) {
+// Helpers for swap and dup
+void InterpreterMacroAssembler::load_ptr(int n, Register val) {
   movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
-  if (TaggedStackInterpreter) {
-    movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
-  }
-}
-
-void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
-                                                  Register tag) {
-  movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
-  if (TaggedStackInterpreter) {
-    movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
-  }
-}
-
-
-// Tagged local support
-void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
-  if (TaggedStackInterpreter) {
-    if (tag == frame::TagCategory2) {
-      movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)frame::TagValue);
-      movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)frame::TagValue);
-    } else {
-      movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag);
-    }
-  }
-}
-
-void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
-  if (TaggedStackInterpreter) {
-    if (tag == frame::TagCategory2) {
-      movptr(Address(rdi, idx, Interpreter::stackElementScale(),
-                  Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue);
-      movptr(Address(rdi, idx, Interpreter::stackElementScale(),
-                    Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue);
-    } else {
-      movptr(Address(rdi, idx, Interpreter::stackElementScale(),
-                               Interpreter::local_tag_offset_in_bytes(0)), (int32_t)tag);
-    }
-  }
-}
-
-void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
-  if (TaggedStackInterpreter) {
-    // can only be TagValue or TagReference
-    movptr(Address(rdi, idx, Interpreter::stackElementScale(),
-                           Interpreter::local_tag_offset_in_bytes(0)), tag);
-  }
 }
 
-
-void InterpreterMacroAssembler::tag_local(Register tag, int n) {
-  if (TaggedStackInterpreter) {
-    // can only be TagValue or TagReference
-    movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), tag);
-  }
+void InterpreterMacroAssembler::store_ptr(int n, Register val) {
+  movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
 }
 
-#ifdef ASSERT
-void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) {
-  if (TaggedStackInterpreter) {
-     frame::Tag t = tag;
-    if (tag == frame::TagCategory2) {
-      Label nbl;
-      t = frame::TagValue;  // change to what is stored in locals
-      cmpptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t);
-      jcc(Assembler::equal, nbl);
-      stop("Local tag is bad for long/double");
-      bind(nbl);
-    }
-    Label notBad;
-    cmpptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t);
-    jcc(Assembler::equal, notBad);
-    // Also compare if the local value is zero, then the tag might
-    // not have been set coming from deopt.
-    cmpptr(Address(rdi, Interpreter::local_offset_in_bytes(n)), 0);
-    jcc(Assembler::equal, notBad);
-    stop("Local tag is bad");
-    bind(notBad);
-  }
-}
-
-void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, Register idx) {
-  if (TaggedStackInterpreter) {
-    frame::Tag t = tag;
-    if (tag == frame::TagCategory2) {
-      Label nbl;
-      t = frame::TagValue;  // change to what is stored in locals
-      cmpptr(Address(rdi, idx, Interpreter::stackElementScale(),
-                  Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t);
-      jcc(Assembler::equal, nbl);
-      stop("Local tag is bad for long/double");
-      bind(nbl);
-    }
-    Label notBad;
-    cmpl(Address(rdi, idx, Interpreter::stackElementScale(),
-                  Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t);
-    jcc(Assembler::equal, notBad);
-    // Also compare if the local value is zero, then the tag might
-    // not have been set coming from deopt.
-    cmpptr(Address(rdi, idx, Interpreter::stackElementScale(),
-                  Interpreter::local_offset_in_bytes(0)), 0);
-    jcc(Assembler::equal, notBad);
-    stop("Local tag is bad");
-    bind(notBad);
-
-  }
-}
-#endif // ASSERT
-
 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
   MacroAssembler::call_VM_leaf_base(entry_point, 0);
 }
--- a/src/cpu/x86/vm/interp_masm_x86_32.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -85,16 +85,12 @@
   void d2ieee();                                           // truncate dtos to 64bits
 
   void pop_ptr(Register r = rax);
-  void pop_ptr(Register r, Register tag);
   void pop_i(Register r = rax);
   void pop_l(Register lo = rax, Register hi = rdx);
   void pop_f();
   void pop_d();
-  void pop_ftos_to_rsp();
-  void pop_dtos_to_rsp();
 
   void push_ptr(Register r = rax);
-  void push_ptr(Register r, Register tag);
   void push_i(Register r = rax);
   void push_l(Register lo = rax, Register hi = rdx);
   void push_d(Register r = rax);
@@ -112,33 +108,15 @@
   void pop(void* v ); // Add unimplemented ambiguous method
   void push(void* v );   // Add unimplemented ambiguous method
 
-  DEBUG_ONLY(void verify_stack_tag(frame::Tag t);)
-
-#endif // CC_INTERP
-
-#ifndef CC_INTERP
-
-  void empty_expression_stack()                            {
-       movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
-      // NULL last_sp until next java call
-      movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
+  void empty_expression_stack() {
+    movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
+    // NULL last_sp until next java call
+    movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
   }
 
-  // Tagged stack helpers for swap and dup
-  void load_ptr_and_tag(int n, Register val, Register tag);
-  void store_ptr_and_tag(int n, Register val, Register tag);
-
-  // Tagged Local support
-
-  void tag_local(frame::Tag tag, int n);
-  void tag_local(Register tag, int n);
-  void tag_local(frame::Tag tag, Register idx);
-  void tag_local(Register tag, Register idx);
-
-#ifdef ASSERT
-  void verify_local_tag(frame::Tag tag, int n);
-  void verify_local_tag(frame::Tag tag, Register idx);
-#endif // ASSERT
+  // Helpers for swap and dup
+  void load_ptr(int n, Register val);
+  void store_ptr(int n, Register val);
 
   // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
   void super_call_VM_leaf(address entry_point);
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp	Thu May 13 13:05:47 2010 -0700
@@ -264,113 +264,51 @@
 
 // Java Expression Stack
 
-#ifdef ASSERT
-// Verifies that the stack tag matches.  Must be called before the stack
-// value is popped off the stack.
-void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
-  if (TaggedStackInterpreter) {
-    frame::Tag tag = t;
-    if (t == frame::TagCategory2) {
-      tag = frame::TagValue;
-      Label hokay;
-      cmpptr(Address(rsp, 3*wordSize), (int32_t)tag);
-      jcc(Assembler::equal, hokay);
-      stop("Java Expression stack tag high value is bad");
-      bind(hokay);
-    }
-    Label okay;
-    cmpptr(Address(rsp, wordSize), (int32_t)tag);
-    jcc(Assembler::equal, okay);
-    // Also compare if the stack value is zero, then the tag might
-    // not have been set coming from deopt.
-    cmpptr(Address(rsp, 0), 0);
-    jcc(Assembler::equal, okay);
-    stop("Java Expression stack tag value is bad");
-    bind(okay);
-  }
-}
-#endif // ASSERT
-
 void InterpreterMacroAssembler::pop_ptr(Register r) {
-  debug_only(verify_stack_tag(frame::TagReference));
   pop(r);
-  if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
-}
-
-void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) {
-  pop(r);
-  if (TaggedStackInterpreter) pop(tag);
 }
 
 void InterpreterMacroAssembler::pop_i(Register r) {
   // XXX can't use pop currently, upper half non clean
-  debug_only(verify_stack_tag(frame::TagValue));
   movl(r, Address(rsp, 0));
   addptr(rsp, wordSize);
-  if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
 }
 
 void InterpreterMacroAssembler::pop_l(Register r) {
-  debug_only(verify_stack_tag(frame::TagCategory2));
   movq(r, Address(rsp, 0));
-  addptr(rsp, 2 * Interpreter::stackElementSize());
+  addptr(rsp, 2 * Interpreter::stackElementSize);
 }
 
 void InterpreterMacroAssembler::pop_f(XMMRegister r) {
-  debug_only(verify_stack_tag(frame::TagValue));
   movflt(r, Address(rsp, 0));
   addptr(rsp, wordSize);
-  if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
 }
 
 void InterpreterMacroAssembler::pop_d(XMMRegister r) {
-  debug_only(verify_stack_tag(frame::TagCategory2));
   movdbl(r, Address(rsp, 0));
-  addptr(rsp, 2 * Interpreter::stackElementSize());
+  addptr(rsp, 2 * Interpreter::stackElementSize);
 }
 
 void InterpreterMacroAssembler::push_ptr(Register r) {
-  if (TaggedStackInterpreter) push(frame::TagReference);
-  push(r);
-}
-
-void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
-  if (TaggedStackInterpreter) push(tag);
   push(r);
 }
 
 void InterpreterMacroAssembler::push_i(Register r) {
-  if (TaggedStackInterpreter) push(frame::TagValue);
   push(r);
 }
 
 void InterpreterMacroAssembler::push_l(Register r) {
-  if (TaggedStackInterpreter) {
-    push(frame::TagValue);
-    subptr(rsp, 1 * wordSize);
-    push(frame::TagValue);
-    subptr(rsp, 1 * wordSize);
-  } else {
-    subptr(rsp, 2 * wordSize);
-  }
+  subptr(rsp, 2 * wordSize);
   movq(Address(rsp, 0), r);
 }
 
 void InterpreterMacroAssembler::push_f(XMMRegister r) {
-  if (TaggedStackInterpreter) push(frame::TagValue);
   subptr(rsp, wordSize);
   movflt(Address(rsp, 0), r);
 }
 
 void InterpreterMacroAssembler::push_d(XMMRegister r) {
-  if (TaggedStackInterpreter) {
-    push(frame::TagValue);
-    subptr(rsp, 1 * wordSize);
-    push(frame::TagValue);
-    subptr(rsp, 1 * wordSize);
-  } else {
-    subptr(rsp, 2 * wordSize);
-  }
+  subptr(rsp, 2 * wordSize);
   movdbl(Address(rsp, 0), r);
 }
 
@@ -407,117 +345,15 @@
 }
 
 
-
-
-// Tagged stack helpers for swap and dup
-void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
-                                                 Register tag) {
+// Helpers for swap and dup
+void InterpreterMacroAssembler::load_ptr(int n, Register val) {
   movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
-  if (TaggedStackInterpreter) {
-    movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
-  }
-}
-
-void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
-                                                  Register tag) {
-  movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
-  if (TaggedStackInterpreter) {
-    movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
-  }
-}
-
-
-// Tagged local support
-void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
-  if (TaggedStackInterpreter) {
-    if (tag == frame::TagCategory2) {
-      movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)),
-           (int32_t)frame::TagValue);
-      movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)),
-           (int32_t)frame::TagValue);
-    } else {
-      movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag);
-    }
-  }
-}
-
-void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
-  if (TaggedStackInterpreter) {
-    if (tag == frame::TagCategory2) {
-      movptr(Address(r14, idx, Address::times_8,
-                  Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue);
-      movptr(Address(r14, idx, Address::times_8,
-                  Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue);
-    } else {
-      movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)),
-           (int32_t)tag);
-    }
-  }
-}
-
-void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
-  if (TaggedStackInterpreter) {
-    // can only be TagValue or TagReference
-    movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), tag);
-  }
 }
 
-
-void InterpreterMacroAssembler::tag_local(Register tag, int n) {
-  if (TaggedStackInterpreter) {
-    // can only be TagValue or TagReference
-    movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), tag);
-  }
+void InterpreterMacroAssembler::store_ptr(int n, Register val) {
+  movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
 }
 
-#ifdef ASSERT
-void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) {
-  if (TaggedStackInterpreter) {
-     frame::Tag t = tag;
-    if (tag == frame::TagCategory2) {
-      Label nbl;
-      t = frame::TagValue;  // change to what is stored in locals
-      cmpptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t);
-      jcc(Assembler::equal, nbl);
-      stop("Local tag is bad for long/double");
-      bind(nbl);
-    }
-    Label notBad;
-    cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t);
-    jcc(Assembler::equal, notBad);
-    // Also compare if the local value is zero, then the tag might
-    // not have been set coming from deopt.
-    cmpptr(Address(r14, Interpreter::local_offset_in_bytes(n)), 0);
-    jcc(Assembler::equal, notBad);
-    stop("Local tag is bad");
-    bind(notBad);
-  }
-}
-
-void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, Register idx) {
-  if (TaggedStackInterpreter) {
-    frame::Tag t = tag;
-    if (tag == frame::TagCategory2) {
-      Label nbl;
-      t = frame::TagValue;  // change to what is stored in locals
-      cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t);
-      jcc(Assembler::equal, nbl);
-      stop("Local tag is bad for long/double");
-      bind(nbl);
-    }
-    Label notBad;
-    cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t);
-    jcc(Assembler::equal, notBad);
-    // Also compare if the local value is zero, then the tag might
-    // not have been set coming from deopt.
-    cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_offset_in_bytes(0)), 0);
-    jcc(Assembler::equal, notBad);
-    stop("Local tag is bad");
-    bind(notBad);
-  }
-}
-#endif // ASSERT
-
 
 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
   MacroAssembler::call_VM_leaf_base(entry_point, 0);
--- a/src/cpu/x86/vm/interp_masm_x86_64.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -120,38 +120,16 @@
   void pop(TosState state); // transition vtos -> state
   void push(TosState state); // transition state -> vtos
 
-  // Tagged stack support, pop and push both tag and value.
-  void pop_ptr(Register r, Register tag);
-  void push_ptr(Register r, Register tag);
-#endif // CC_INTERP
-
-  DEBUG_ONLY(void verify_stack_tag(frame::Tag t);)
-
-#ifndef CC_INTERP
-
-  // Tagged stack helpers for swap and dup
-  void load_ptr_and_tag(int n, Register val, Register tag);
-  void store_ptr_and_tag(int n, Register val, Register tag);
-
-  // Tagged Local support
-  void tag_local(frame::Tag tag, int n);
-  void tag_local(Register tag, int n);
-  void tag_local(frame::Tag tag, Register idx);
-  void tag_local(Register tag, Register idx);
-
-#ifdef ASSERT
-  void verify_local_tag(frame::Tag tag, int n);
-  void verify_local_tag(frame::Tag tag, Register idx);
-#endif // ASSERT
-
-
-  void empty_expression_stack()
-  {
+  void empty_expression_stack() {
     movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
     // NULL last_sp until next java call
     movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
   }
 
+  // Helpers for swap and dup
+  void load_ptr(int n, Register val);
+  void store_ptr(int n, Register val);
+
   // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
   void super_call_VM_leaf(address entry_point);
   void super_call_VM_leaf(address entry_point, Register arg_1);
--- a/src/cpu/x86/vm/interpreterRT_x86_32.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/interpreterRT_x86_32.cpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -86,33 +86,23 @@
   address   _from;
   intptr_t* _to;
 
-#ifdef ASSERT
-  void verify_tag(frame::Tag t) {
-    assert(!TaggedStackInterpreter ||
-           *(intptr_t*)(_from+Interpreter::local_tag_offset_in_bytes(0)) == t, "wrong tag");
-  }
-#endif // ASSERT
-
   virtual void pass_int() {
     *_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
   }
 
   virtual void pass_long() {
     _to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
     _to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
     _to += 2;
-    _from -= 2*Interpreter::stackElementSize();
+    _from -= 2*Interpreter::stackElementSize;
   }
 
   virtual void pass_object() {
     // pass address of from
     intptr_t from_addr = (intptr_t)(_from + Interpreter::local_offset_in_bytes(0));
     *_to++ = (*(intptr_t*)from_addr == 0) ? NULL_WORD : from_addr;
-    debug_only(verify_tag(frame::TagReference));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
    }
 
  public:
--- a/src/cpu/x86/vm/interpreterRT_x86_64.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/interpreterRT_x86_64.cpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -293,18 +293,10 @@
   intptr_t* _fp_identifiers;
   unsigned int _num_args;
 
-#ifdef ASSERT
-  void verify_tag(frame::Tag t) {
-    assert(!TaggedStackInterpreter ||
-           *(intptr_t*)(_from+Interpreter::local_tag_offset_in_bytes(0)) == t, "wrong tag");
-  }
-#endif // ASSERT
-
   virtual void pass_int()
   {
     jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
 
     if (_num_args < Argument::n_int_register_parameters_c-1) {
       *_reg_args++ = from_obj;
@@ -317,8 +309,7 @@
   virtual void pass_long()
   {
     intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= 2*Interpreter::stackElementSize();
+    _from -= 2*Interpreter::stackElementSize;
 
     if (_num_args < Argument::n_int_register_parameters_c-1) {
       *_reg_args++ = from_obj;
@@ -331,8 +322,7 @@
   virtual void pass_object()
   {
     intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagReference));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
     if (_num_args < Argument::n_int_register_parameters_c-1) {
       *_reg_args++ = (*from_addr == 0) ? NULL : (intptr_t) from_addr;
       _num_args++;
@@ -344,8 +334,7 @@
   virtual void pass_float()
   {
     jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
 
     if (_num_args < Argument::n_float_register_parameters_c-1) {
       *_reg_args++ = from_obj;
@@ -359,8 +348,7 @@
   virtual void pass_double()
   {
     intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= 2*Interpreter::stackElementSize();
+    _from -= 2*Interpreter::stackElementSize;
 
     if (_num_args < Argument::n_float_register_parameters_c-1) {
       *_reg_args++ = from_obj;
@@ -397,18 +385,10 @@
   unsigned int _num_int_args;
   unsigned int _num_fp_args;
 
-#ifdef ASSERT
-  void verify_tag(frame::Tag t) {
-    assert(!TaggedStackInterpreter ||
-           *(intptr_t*)(_from+Interpreter::local_tag_offset_in_bytes(0)) == t, "wrong tag");
-  }
-#endif // ASSERT
-
   virtual void pass_int()
   {
     jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
 
     if (_num_int_args < Argument::n_int_register_parameters_c-1) {
       *_int_args++ = from_obj;
@@ -421,8 +401,7 @@
   virtual void pass_long()
   {
     intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= 2*Interpreter::stackElementSize();
+    _from -= 2*Interpreter::stackElementSize;
 
     if (_num_int_args < Argument::n_int_register_parameters_c-1) {
       *_int_args++ = from_obj;
@@ -435,8 +414,7 @@
   virtual void pass_object()
   {
     intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagReference));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
 
     if (_num_int_args < Argument::n_int_register_parameters_c-1) {
       *_int_args++ = (*from_addr == 0) ? NULL : (intptr_t)from_addr;
@@ -449,8 +427,7 @@
   virtual void pass_float()
   {
     jint from_obj = *(jint*)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
 
     if (_num_fp_args < Argument::n_float_register_parameters_c) {
       *_fp_args++ = from_obj;
@@ -463,7 +440,7 @@
   virtual void pass_double()
   {
     intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
-    _from -= 2*Interpreter::stackElementSize();
+    _from -= 2*Interpreter::stackElementSize;
 
     if (_num_fp_args < Argument::n_float_register_parameters_c) {
       *_fp_args++ = from_obj;
--- a/src/cpu/x86/vm/interpreter_x86.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/interpreter_x86.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,39 +31,16 @@
   // the fpu stack.
   static const int return_sentinel;
 
-
-  static Address::ScaleFactor stackElementScale() {
-    return TaggedStackInterpreter? Address::times_8 : Address::times_4;
-  }
+  static Address::ScaleFactor stackElementScale() { return Address::times_4; }
 
   // Offset from rsp (which points to the last stack element)
-  static int expr_offset_in_bytes(int i) { return stackElementSize()*i ; }
-  static int expr_tag_offset_in_bytes(int i) {
-    assert(TaggedStackInterpreter, "should not call this");
-    return expr_offset_in_bytes(i) + wordSize;
-  }
-
-  // Support for Tagged Stacks
+  static int expr_offset_in_bytes(int i) { return stackElementSize * i; }
 
   // Stack index relative to tos (which points at value)
-  static int expr_index_at(int i)     {
-    return stackElementWords() * i;
-  }
-
-  static int expr_tag_index_at(int i) {
-    assert(TaggedStackInterpreter, "should not call this");
-    // tag is one word above java stack element
-    return stackElementWords() * i + 1;
-  }
+  static int expr_index_at(int i)        { return stackElementWords * i; }
 
   // Already negated by c++ interpreter
-  static int local_index_at(int i)     {
-    assert(i<=0, "local direction already negated");
-    return stackElementWords() * i + (value_offset_in_bytes()/wordSize);
+  static int local_index_at(int i) {
+    assert(i <= 0, "local direction already negated");
+    return stackElementWords * i;
   }
-
-  static int local_tag_index_at(int i) {
-    assert(i<=0, "local direction already negated");
-    assert(TaggedStackInterpreter, "should not call this");
-    return stackElementWords() * i + (tag_offset_in_bytes()/wordSize);
-  }
--- a/src/cpu/x86/vm/interpreter_x86_32.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/interpreter_x86_32.cpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -131,14 +131,7 @@
   //       java methods.  Interpreter::method_kind(...) will select
   //       this entry point for the corresponding methods in JDK 1.3.
   // get argument
-  if (TaggedStackInterpreter) {
-    __ pushl(Address(rsp, 3*wordSize));  // push hi (and note rsp -= wordSize)
-    __ pushl(Address(rsp, 2*wordSize));  // push lo
-    __ fld_d(Address(rsp, 0));           // get double in ST0
-    __ addptr(rsp, 2*wordSize);
-  } else {
-    __ fld_d(Address(rsp, 1*wordSize));
-  }
+  __ fld_d(Address(rsp, 1*wordSize));
   switch (kind) {
     case Interpreter::java_lang_math_sin :
         __ trigfunc('s');
--- a/src/cpu/x86/vm/methodHandles_x86.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Thu May 13 13:05:47 2010 -0700
@@ -127,7 +127,8 @@
                                      RegisterOrConstant arg_slots,
                                      int arg_mask,
                                      Register rax_argslot,
-                                     Register rbx_temp, Register rdx_temp) {
+                                     Register rbx_temp, Register rdx_temp, Register temp3_reg) {
+  assert(temp3_reg == noreg, "temp3 not required");
   assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
                              (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
 
@@ -185,7 +186,8 @@
 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
                                     RegisterOrConstant arg_slots,
                                     Register rax_argslot,
-                                    Register rbx_temp, Register rdx_temp) {
+                                     Register rbx_temp, Register rdx_temp, Register temp3_reg) {
+  assert(temp3_reg == noreg, "temp3 not required");
   assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
                              (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
 
@@ -260,6 +262,22 @@
 }
 #endif //PRODUCT
 
+// which conversion op types are implemented here?
+int MethodHandles::adapter_conversion_ops_supported_mask() {
+  return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS)
+         //|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
+         );
+  // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
+}
+
 // Generate an "entry" field for a method handle.
 // This determines how the method handle will respond to calls.
 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
@@ -498,7 +516,7 @@
 #ifndef _LP64
         if (arg_slots == 2) {
           __ movl(rdx_temp, prim_value_addr.plus_disp(wordSize));
-          __ movl(Address(rax_argslot, Interpreter::stackElementSize()), rdx_temp);
+          __ movl(Address(rax_argslot, Interpreter::stackElementSize), rdx_temp);
         }
 #endif //_LP64
       }
@@ -594,7 +612,7 @@
           __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
           remove_arg_slots(_masm, -stack_move_unit(),
                            rax_argslot, rbx_temp, rdx_temp);
-          vmarg = Address(rax_argslot, -Interpreter::stackElementSize());
+          vmarg = Address(rax_argslot, -Interpreter::stackElementSize);
           __ movl(rdx_temp, vmarg);
         }
         break;
@@ -663,8 +681,8 @@
       __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
       insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
                        rax_argslot, rbx_temp, rdx_temp);
-      Address vmarg1(rax_argslot, -Interpreter::stackElementSize());
-      Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize());
+      Address vmarg1(rax_argslot, -Interpreter::stackElementSize);
+      Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize);
 
       switch (ek) {
       case _adapter_opt_i2l:
@@ -716,7 +734,7 @@
         insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
                          rax_argslot, rbx_temp, rdx_temp);
       }
-      Address vmarg(rax_argslot, -Interpreter::stackElementSize());
+      Address vmarg(rax_argslot, -Interpreter::stackElementSize);
 
 #ifdef _LP64
       if (ek == _adapter_opt_f2d) {
@@ -1014,7 +1032,7 @@
       // Array length checks out.  Now insert any required stack slots.
       if (length_constant == -1) {
         // Form a pointer to the end of the affected region.
-        __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize()));
+        __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize));
         // 'stack_move' is negative number of words to insert
         Register rdi_stack_move = rdi;
         __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
@@ -1052,7 +1070,7 @@
         __ movptr(rbx_temp, Address(rsi_source, 0));
         __ movptr(Address(rax_argslot, 0), rbx_temp);
         __ addptr(rsi_source, type2aelembytes(elem_type));
-        __ addptr(rax_argslot, Interpreter::stackElementSize());
+        __ addptr(rax_argslot, Interpreter::stackElementSize);
         __ cmpptr(rax_argslot, rdx_argslot_limit);
         __ jccb(Assembler::less, loop);
       } else if (length_constant == 0) {
@@ -1065,7 +1083,7 @@
           __ movptr(rbx_temp, Address(rsi_array, elem_offset));
           __ movptr(Address(rax_argslot, slot_offset), rbx_temp);
           elem_offset += type2aelembytes(elem_type);
-           slot_offset += Interpreter::stackElementSize();
+           slot_offset += Interpreter::stackElementSize;
         }
       }
 
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Thu May 13 13:05:47 2010 -0700
@@ -503,34 +503,9 @@
 }
 
 
-// Helper function to put tags in interpreter stack.
-static void  tag_stack(MacroAssembler *masm, const BasicType sig, int st_off) {
-  if (TaggedStackInterpreter) {
-    int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0);
-    if (sig == T_OBJECT || sig == T_ARRAY) {
-      __ movptr(Address(rsp, tag_offset), frame::TagReference);
-    } else if (sig == T_LONG || sig == T_DOUBLE) {
-      int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1);
-      __ movptr(Address(rsp, next_tag_offset), frame::TagValue);
-      __ movptr(Address(rsp, tag_offset), frame::TagValue);
-    } else {
-      __ movptr(Address(rsp, tag_offset), frame::TagValue);
-    }
-  }
-}
-
-// Double and long values with Tagged stacks are not contiguous.
 static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
-  int next_off = st_off - Interpreter::stackElementSize();
-  if (TaggedStackInterpreter) {
-   __ movdbl(Address(rsp, next_off), r);
-   // Move top half up and put tag in the middle.
-   __ movl(rdi, Address(rsp, next_off+wordSize));
-   __ movl(Address(rsp, st_off), rdi);
-   tag_stack(masm, T_DOUBLE, next_off);
-  } else {
-   __ movdbl(Address(rsp, next_off), r);
-  }
+  int next_off = st_off - Interpreter::stackElementSize;
+  __ movdbl(Address(rsp, next_off), r);
 }
 
 static void gen_c2i_adapter(MacroAssembler *masm,
@@ -560,7 +535,7 @@
   // Since all args are passed on the stack, total_args_passed * interpreter_
   // stack_element_size  is the
   // space we need.
-  int extraspace = total_args_passed * Interpreter::stackElementSize();
+  int extraspace = total_args_passed * Interpreter::stackElementSize;
 
   // Get return address
   __ pop(rax);
@@ -578,8 +553,8 @@
     }
 
     // st_off points to lowest address on stack.
-    int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize();
-    int next_off = st_off - Interpreter::stackElementSize();
+    int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
+    int next_off = st_off - Interpreter::stackElementSize;
 
     // Say 4 args:
     // i   st_off
@@ -601,7 +576,6 @@
       if (!r_2->is_valid()) {
         __ movl(rdi, Address(rsp, ld_off));
         __ movptr(Address(rsp, st_off), rdi);
-        tag_stack(masm, sig_bt[i], st_off);
       } else {
 
         // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
@@ -619,13 +593,11 @@
         __ movptr(Address(rsp, st_off), rax);
 #endif /* ASSERT */
 #endif // _LP64
-        tag_stack(masm, sig_bt[i], next_off);
       }
     } else if (r_1->is_Register()) {
       Register r = r_1->as_Register();
       if (!r_2->is_valid()) {
         __ movl(Address(rsp, st_off), r);
-        tag_stack(masm, sig_bt[i], st_off);
       } else {
         // long/double in gpr
         NOT_LP64(ShouldNotReachHere());
@@ -639,17 +611,14 @@
           __ movptr(Address(rsp, st_off), rax);
 #endif /* ASSERT */
           __ movptr(Address(rsp, next_off), r);
-          tag_stack(masm, sig_bt[i], next_off);
         } else {
           __ movptr(Address(rsp, st_off), r);
-          tag_stack(masm, sig_bt[i], st_off);
         }
       }
     } else {
       assert(r_1->is_XMMRegister(), "");
       if (!r_2->is_valid()) {
         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
-        tag_stack(masm, sig_bt[i], st_off);
       } else {
         assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
         move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
@@ -665,20 +634,9 @@
 }
 
 
-// For tagged stacks, double or long value aren't contiguous on the stack
-// so get them contiguous for the xmm load
 static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
-  int next_val_off = ld_off - Interpreter::stackElementSize();
-  if (TaggedStackInterpreter) {
-    // use tag slot temporarily for MSW
-    __ movptr(rsi, Address(saved_sp, ld_off));
-    __ movptr(Address(saved_sp, next_val_off+wordSize), rsi);
-    __ movdbl(r, Address(saved_sp, next_val_off));
-    // restore tag
-    __ movptr(Address(saved_sp, next_val_off+wordSize), frame::TagValue);
-  } else {
-    __ movdbl(r, Address(saved_sp, next_val_off));
-  }
+  int next_val_off = ld_off - Interpreter::stackElementSize;
+  __ movdbl(r, Address(saved_sp, next_val_off));
 }
 
 static void gen_i2c_adapter(MacroAssembler *masm,
@@ -797,9 +755,9 @@
     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
             "scrambled load targets?");
     // Load in argument order going down.
-    int ld_off = (total_args_passed - i)*Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
+    int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
     // Point to interpreter value (vs. tag)
-    int next_off = ld_off - Interpreter::stackElementSize();
+    int next_off = ld_off - Interpreter::stackElementSize;
     //
     //
     //
@@ -2322,7 +2280,7 @@
 // this function returns the adjust size (in number of words) to a c2i adapter
 // activation for use during deoptimization
 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
-  return (callee_locals - callee_parameters) * Interpreter::stackElementWords();
+  return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
 }
 
 
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu May 13 13:05:47 2010 -0700
@@ -452,22 +452,6 @@
   __ bind(L);
 }
 
-// Helper function to put tags in interpreter stack.
-static void  tag_stack(MacroAssembler *masm, const BasicType sig, int st_off) {
-  if (TaggedStackInterpreter) {
-    int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0);
-    if (sig == T_OBJECT || sig == T_ARRAY) {
-      __ movptr(Address(rsp, tag_offset), (int32_t) frame::TagReference);
-    } else if (sig == T_LONG || sig == T_DOUBLE) {
-      int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1);
-      __ movptr(Address(rsp, next_tag_offset), (int32_t) frame::TagValue);
-      __ movptr(Address(rsp, tag_offset), (int32_t) frame::TagValue);
-    } else {
-      __ movptr(Address(rsp, tag_offset), (int32_t) frame::TagValue);
-    }
-  }
-}
-
 
 static void gen_c2i_adapter(MacroAssembler *masm,
                             int total_args_passed,
@@ -489,7 +473,7 @@
   // we also account for the return address location since
   // we store it first rather than hold it in rax across all the shuffling
 
-  int extraspace = (total_args_passed * Interpreter::stackElementSize()) + wordSize;
+  int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
 
   // stack is aligned, keep it that way
   extraspace = round_to(extraspace, 2*wordSize);
@@ -513,9 +497,8 @@
     }
 
     // offset to start parameters
-    int st_off   = (total_args_passed - i) * Interpreter::stackElementSize() +
-                   Interpreter::value_offset_in_bytes();
-    int next_off = st_off - Interpreter::stackElementSize();
+    int st_off   = (total_args_passed - i) * Interpreter::stackElementSize;
+    int next_off = st_off - Interpreter::stackElementSize;
 
     // Say 4 args:
     // i   st_off
@@ -543,7 +526,6 @@
         // sign extend??
         __ movl(rax, Address(rsp, ld_off));
         __ movptr(Address(rsp, st_off), rax);
-        tag_stack(masm, sig_bt[i], st_off);
 
       } else {
 
@@ -560,10 +542,8 @@
           __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
           __ movptr(Address(rsp, st_off), rax);
 #endif /* ASSERT */
-          tag_stack(masm, sig_bt[i], next_off);
         } else {
           __ movq(Address(rsp, st_off), rax);
-          tag_stack(masm, sig_bt[i], st_off);
         }
       }
     } else if (r_1->is_Register()) {
@@ -572,7 +552,6 @@
         // must be only an int (or less ) so move only 32bits to slot
         // why not sign extend??
         __ movl(Address(rsp, st_off), r);
-        tag_stack(masm, sig_bt[i], st_off);
       } else {
         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
         // T_DOUBLE and T_LONG use two slots in the interpreter
@@ -584,10 +563,8 @@
           __ movptr(Address(rsp, st_off), rax);
 #endif /* ASSERT */
           __ movq(Address(rsp, next_off), r);
-          tag_stack(masm, sig_bt[i], next_off);
         } else {
           __ movptr(Address(rsp, st_off), r);
-          tag_stack(masm, sig_bt[i], st_off);
         }
       }
     } else {
@@ -595,7 +572,6 @@
       if (!r_2->is_valid()) {
         // only a float use just part of the slot
         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
-        tag_stack(masm, sig_bt[i], st_off);
       } else {
 #ifdef ASSERT
         // Overwrite the unused slot with known junk
@@ -603,7 +579,6 @@
         __ movptr(Address(rsp, st_off), rax);
 #endif /* ASSERT */
         __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
-        tag_stack(masm, sig_bt[i], next_off);
       }
     }
   }
@@ -688,9 +663,9 @@
     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
             "scrambled load targets?");
     // Load in argument order going down.
-    int ld_off = (total_args_passed - i)*Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
+    int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
     // Point to interpreter value (vs. tag)
-    int next_off = ld_off - Interpreter::stackElementSize();
+    int next_off = ld_off - Interpreter::stackElementSize;
     //
     //
     //
@@ -2535,7 +2510,7 @@
 // this function returns the adjust size (in number of words) to a c2i adapter
 // activation for use during deoptimization
 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
-  return (callee_locals - callee_parameters) * Interpreter::stackElementWords();
+  return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
 }
 
 
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Thu May 13 13:05:47 2010 -0700
@@ -139,7 +139,7 @@
     // stub code
     __ enter();
     __ movptr(rcx, parameter_size);              // parameter counter
-    __ shlptr(rcx, Interpreter::logStackElementSize()); // convert parameter count to bytes
+    __ shlptr(rcx, Interpreter::logStackElementSize); // convert parameter count to bytes
     __ addptr(rcx, locals_count_in_bytes);       // reserve space for register saves
     __ subptr(rsp, rcx);
     __ andptr(rsp, -(StackAlignmentInBytes));    // Align stack
@@ -194,12 +194,6 @@
     __ xorptr(rbx, rbx);
 
     __ BIND(loop);
-    if (TaggedStackInterpreter) {
-      __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(),
-                      -2*wordSize));                          // get tag
-      __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(),
-                      Interpreter::expr_tag_offset_in_bytes(0)), rax);     // store tag
-    }
 
     // get parameter
     __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize));
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu May 13 13:05:47 2010 -0700
@@ -278,11 +278,6 @@
     __ movptr(c_rarg2, parameters);       // parameter pointer
     __ movl(c_rarg1, c_rarg3);            // parameter counter is in c_rarg1
     __ BIND(loop);
-    if (TaggedStackInterpreter) {
-      __ movl(rax, Address(c_rarg2, 0)); // get tag
-      __ addptr(c_rarg2, wordSize);      // advance to next tag
-      __ push(rax);                      // pass tag
-    }
     __ movptr(rax, Address(c_rarg2, 0));// get parameter
     __ addptr(c_rarg2, wordSize);       // advance to next parameter
     __ decrementl(c_rarg1);             // decrement counter
--- a/src/cpu/x86/vm/templateInterpreter_x86.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/templateInterpreter_x86.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,8 +28,8 @@
   // Size of interpreter code.  Increase if too small.  Interpreter will
   // fail with a guarantee ("not enough space for interpreter generation");
   // if too small.
-  // Run with +PrintInterpreterSize to get the VM to print out the size.
-  // Max size with JVMTI and TaggedStackInterpreter
+  // Run with +PrintInterpreter to get the VM to print out the size.
+  // Max size with JVMTI
 #ifdef AMD64
   const static int InterpreterCodeSize = 200 * 1024;
 #else
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu May 13 13:05:47 2010 -0700
@@ -305,7 +305,6 @@
     case T_FLOAT  :
       { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
         __ pop(t);                            // remove return address first
-        __ pop_dtos_to_rsp();
         // Must return a result for interpreter or compiler. In SSE
         // mode, results are returned in xmm0 and the FPU stack must
         // be empty.
@@ -468,7 +467,7 @@
   // see if the frame is greater than one page in size. If so,
   // then we need to verify there is enough stack space remaining
   // for the additional locals.
-  __ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize());
+  __ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize);
   __ jcc(Assembler::belowEqual, after_frame_check);
 
   // compute rsp as if this were going to be the last frame on
@@ -882,7 +881,7 @@
   __ get_method(method);
   __ verify_oop(method);
   __ load_unsigned_short(t, Address(method, methodOopDesc::size_of_parameters_offset()));
-  __ shlptr(t, Interpreter::logStackElementSize());
+  __ shlptr(t, Interpreter::logStackElementSize);
   __ addptr(t, 2*wordSize);     // allocate two more slots for JNIEnv and possible mirror
   __ subptr(rsp, t);
   __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
@@ -1225,9 +1224,6 @@
     __ testl(rdx, rdx);
     __ jcc(Assembler::lessEqual, exit);               // do nothing if rdx <= 0
     __ bind(loop);
-    if (TaggedStackInterpreter) {
-      __ push((int32_t)NULL_WORD);                    // push tag
-    }
     __ push((int32_t)NULL_WORD);                      // initialize local variables
     __ decrement(rdx);                                // until everything initialized
     __ jcc(Assembler::greater, loop);
@@ -1463,7 +1459,7 @@
 
   const int extra_stack = methodOopDesc::extra_stack_entries();
   const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
-                           Interpreter::stackElementWords();
+                           Interpreter::stackElementWords;
   return overhead_size + method_stack + stub_code;
 }
 
@@ -1487,9 +1483,9 @@
   // NOTE: return size is in words not bytes
 
   // fixed size of an interpreter frame:
-  int max_locals = method->max_locals() * Interpreter::stackElementWords();
+  int max_locals = method->max_locals() * Interpreter::stackElementWords;
   int extra_locals = (method->max_locals() - method->size_of_parameters()) *
-                     Interpreter::stackElementWords();
+                     Interpreter::stackElementWords;
 
   int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;
 
@@ -1499,9 +1495,9 @@
 
 
   int size = overhead +
-         ((callee_locals - callee_param_count)*Interpreter::stackElementWords()) +
+         ((callee_locals - callee_param_count)*Interpreter::stackElementWords) +
          (moncount*frame::interpreter_frame_monitor_size()) +
-         tempcount*Interpreter::stackElementWords() + popframe_extra_args;
+         tempcount*Interpreter::stackElementWords + popframe_extra_args;
 
   if (interpreter_frame != NULL) {
 #ifdef ASSERT
@@ -1525,7 +1521,7 @@
 
     // Set last_sp
     intptr_t*  rsp = (intptr_t*) monbot  -
-                     tempcount*Interpreter::stackElementWords() -
+                     tempcount*Interpreter::stackElementWords -
                      popframe_extra_args;
     interpreter_frame->interpreter_frame_set_last_sp(rsp);
 
@@ -1625,7 +1621,7 @@
     __ get_method(rax);
     __ verify_oop(rax);
     __ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset())));
-    __ shlptr(rax, Interpreter::logStackElementSize());
+    __ shlptr(rax, Interpreter::logStackElementSize);
     __ restore_locals();
     __ subptr(rdi, rax);
     __ addptr(rdi, wordSize);
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu May 13 13:05:47 2010 -0700
@@ -199,7 +199,6 @@
                        in_bytes(constantPoolCacheOopDesc::base_offset()) +
                        3 * wordSize));
   __ andl(rbx, 0xFF);
-  if (TaggedStackInterpreter) __ shll(rbx, 1); // 2 slots per parameter.
   __ lea(rsp, Address(rsp, rbx, Address::times_8));
   __ dispatch_next(state, step);
 
@@ -417,7 +416,7 @@
   // see if the frame is greater than one page in size. If so,
   // then we need to verify there is enough stack space remaining
   // for the additional locals.
-  __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize());
+  __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize);
   __ jcc(Assembler::belowEqual, after_frame_check);
 
   // compute rsp as if this were going to be the last frame on
@@ -428,7 +427,7 @@
 
   // locals + overhead, in bytes
   __ mov(rax, rdx);
-  __ shlptr(rax, Interpreter::logStackElementSize()); // 2 slots per parameter.
+  __ shlptr(rax, Interpreter::logStackElementSize);  // 2 slots per parameter.
   __ addptr(rax, overhead_size);
 
 #ifdef ASSERT
@@ -759,7 +758,6 @@
   // for natives the size of locals is zero
 
   // compute beginning of parameters (r14)
-  if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter.
   __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
 
   // add 2 zero-initialized slots for native calls
@@ -865,7 +863,7 @@
   __ load_unsigned_short(t,
                          Address(method,
                                  methodOopDesc::size_of_parameters_offset()));
-  __ shll(t, Interpreter::logStackElementSize());
+  __ shll(t, Interpreter::logStackElementSize);
 
   __ subptr(rsp, t);
   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
@@ -1228,7 +1226,6 @@
   __ pop(rax);
 
   // compute beginning of parameters (r14)
-  if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter.
   __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
 
   // rdx - # of additional locals
@@ -1239,7 +1236,6 @@
     __ testl(rdx, rdx);
     __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
     __ bind(loop);
-    if (TaggedStackInterpreter) __ push((int) NULL_WORD);  // push tag
     __ push((int) NULL_WORD); // initialize local variables
     __ decrementl(rdx); // until everything initialized
     __ jcc(Assembler::greater, loop);
@@ -1486,7 +1482,7 @@
   const int stub_code = frame::entry_frame_after_call_words;
   const int extra_stack = methodOopDesc::extra_stack_entries();
   const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
-                           Interpreter::stackElementWords();
+                           Interpreter::stackElementWords;
   return (overhead_size + method_stack + stub_code);
 }
 
@@ -1507,9 +1503,9 @@
   // It is also guaranteed to be walkable even though it is in a skeletal state
 
   // fixed size of an interpreter frame:
-  int max_locals = method->max_locals() * Interpreter::stackElementWords();
+  int max_locals = method->max_locals() * Interpreter::stackElementWords;
   int extra_locals = (method->max_locals() - method->size_of_parameters()) *
-                     Interpreter::stackElementWords();
+                     Interpreter::stackElementWords;
 
   int overhead = frame::sender_sp_offset -
                  frame::interpreter_frame_initial_sp_offset;
@@ -1518,9 +1514,9 @@
   // for the callee's params we only need to account for the extra
   // locals.
   int size = overhead +
-         (callee_locals - callee_param_count)*Interpreter::stackElementWords() +
+         (callee_locals - callee_param_count)*Interpreter::stackElementWords +
          moncount * frame::interpreter_frame_monitor_size() +
-         tempcount* Interpreter::stackElementWords() + popframe_extra_args;
+         tempcount* Interpreter::stackElementWords + popframe_extra_args;
   if (interpreter_frame != NULL) {
 #ifdef ASSERT
     if (!EnableMethodHandles)
@@ -1544,7 +1540,7 @@
 
     // Set last_sp
     intptr_t*  esp = (intptr_t*) monbot -
-                     tempcount*Interpreter::stackElementWords() -
+                     tempcount*Interpreter::stackElementWords -
                      popframe_extra_args;
     interpreter_frame->interpreter_frame_set_last_sp(esp);
 
@@ -1650,7 +1646,7 @@
     __ get_method(rax);
     __ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::
                                                 size_of_parameters_offset())));
-    __ shll(rax, Interpreter::logStackElementSize());
+    __ shll(rax, Interpreter::logStackElementSize);
     __ restore_locals(); // XXX do we need this?
     __ subptr(r14, rax);
     __ addptr(r14, wordSize);
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Thu May 13 13:05:47 2010 -0700
@@ -50,7 +50,7 @@
 static inline Address aaddress(int n)            { return iaddress(n); }
 
 static inline Address iaddress(Register r)       {
-  return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::value_offset_in_bytes());
+  return Address(rdi, r, Interpreter::stackElementScale());
 }
 static inline Address laddress(Register r)       {
   return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
@@ -59,12 +59,9 @@
   return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
 }
 
-static inline Address faddress(Register r)       { return iaddress(r); };
-static inline Address daddress(Register r)       {
-  assert(!TaggedStackInterpreter, "This doesn't work");
-  return laddress(r);
-};
-static inline Address aaddress(Register r)       { return iaddress(r); };
+static inline Address faddress(Register r)       { return iaddress(r); }
+static inline Address daddress(Register r)       { return laddress(r); }
+static inline Address aaddress(Register r)       { return iaddress(r); }
 
 // expression stack
 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
@@ -448,7 +445,6 @@
   // Get the local value into tos
   locals_index(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 
@@ -456,18 +452,15 @@
   transition(vtos, itos);
   locals_index(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
   __ push(itos);
   locals_index(rbx, 3);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 void TemplateTable::fast_iload() {
   transition(vtos, itos);
   locals_index(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 
@@ -476,7 +469,6 @@
   locals_index(rbx);
   __ movptr(rax, laddress(rbx));
   NOT_LP64(__ movl(rdx, haddress(rbx)));
-  debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
 }
 
 
@@ -484,26 +476,13 @@
   transition(vtos, ftos);
   locals_index(rbx);
   __ fld_s(faddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 
 void TemplateTable::dload() {
   transition(vtos, dtos);
   locals_index(rbx);
-  if (TaggedStackInterpreter) {
-    // Get double out of locals array, onto temp stack and load with
-    // float instruction into ST0
-    __ movl(rax, laddress(rbx));
-    __ movl(rdx, haddress(rbx));
-    __ push(rdx);  // push hi first
-    __ push(rax);
-    __ fld_d(Address(rsp, 0));
-    __ addptr(rsp, 2*wordSize);
-    debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
-  } else {
-    __ fld_d(daddress(rbx));
-  }
+  __ fld_d(daddress(rbx));
 }
 
 
@@ -511,7 +490,6 @@
   transition(vtos, atos);
   locals_index(rbx);
   __ movptr(rax, aaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagReference, rbx));
 }
 
 
@@ -527,7 +505,6 @@
   transition(vtos, itos);
   locals_index_wide(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 
@@ -536,7 +513,6 @@
   locals_index_wide(rbx);
   __ movptr(rax, laddress(rbx));
   NOT_LP64(__ movl(rdx, haddress(rbx)));
-  debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
 }
 
 
@@ -544,26 +520,13 @@
   transition(vtos, ftos);
   locals_index_wide(rbx);
   __ fld_s(faddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 
 void TemplateTable::wide_dload() {
   transition(vtos, dtos);
   locals_index_wide(rbx);
-  if (TaggedStackInterpreter) {
-    // Get double out of locals array, onto temp stack and load with
-    // float instruction into ST0
-    __ movl(rax, laddress(rbx));
-    __ movl(rdx, haddress(rbx));
-    __ push(rdx);  // push hi first
-    __ push(rax);
-    __ fld_d(Address(rsp, 0));
-    __ addl(rsp, 2*wordSize);
-    debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
-  } else {
-    __ fld_d(daddress(rbx));
-  }
+  __ fld_d(daddress(rbx));
 }
 
 
@@ -571,7 +534,6 @@
   transition(vtos, atos);
   locals_index_wide(rbx);
   __ movptr(rax, aaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagReference, rbx));
 }
 
 void TemplateTable::index_check(Register array, Register index) {
@@ -672,7 +634,6 @@
   // load index out of locals
   locals_index(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 
   // rdx: array
   index_check(rdx, rax);
@@ -695,7 +656,6 @@
 void TemplateTable::iload(int n) {
   transition(vtos, itos);
   __ movl(rax, iaddress(n));
-  debug_only(__ verify_local_tag(frame::TagValue, n));
 }
 
 
@@ -703,39 +663,24 @@
   transition(vtos, ltos);
   __ movptr(rax, laddress(n));
   NOT_LP64(__ movptr(rdx, haddress(n)));
-  debug_only(__ verify_local_tag(frame::TagCategory2, n));
 }
 
 
 void TemplateTable::fload(int n) {
   transition(vtos, ftos);
   __ fld_s(faddress(n));
-  debug_only(__ verify_local_tag(frame::TagValue, n));
 }
 
 
 void TemplateTable::dload(int n) {
   transition(vtos, dtos);
-  if (TaggedStackInterpreter) {
-    // Get double out of locals array, onto temp stack and load with
-    // float instruction into ST0
-    __ movl(rax, laddress(n));
-    __ movl(rdx, haddress(n));
-    __ push(rdx);  // push hi first
-    __ push(rax);
-    __ fld_d(Address(rsp, 0));
-    __ addptr(rsp, 2*wordSize);  // reset rsp
-    debug_only(__ verify_local_tag(frame::TagCategory2, n));
-  } else {
-    __ fld_d(daddress(n));
-  }
+  __ fld_d(daddress(n));
 }
 
 
 void TemplateTable::aload(int n) {
   transition(vtos, atos);
   __ movptr(rax, aaddress(n));
-  debug_only(__ verify_local_tag(frame::TagReference, n));
 }
 
 
@@ -809,7 +754,6 @@
   transition(itos, vtos);
   locals_index(rbx);
   __ movl(iaddress(rbx), rax);
-  __ tag_local(frame::TagValue, rbx);
 }
 
 
@@ -818,7 +762,6 @@
   locals_index(rbx);
   __ movptr(laddress(rbx), rax);
   NOT_LP64(__ movptr(haddress(rbx), rdx));
-  __ tag_local(frame::TagCategory2, rbx);
 }
 
 
@@ -826,34 +769,21 @@
   transition(ftos, vtos);
   locals_index(rbx);
   __ fstp_s(faddress(rbx));
-  __ tag_local(frame::TagValue, rbx);
 }
 
 
 void TemplateTable::dstore() {
   transition(dtos, vtos);
   locals_index(rbx);
-  if (TaggedStackInterpreter) {
-    // Store double on stack and reload into locals nonadjacently
-    __ subptr(rsp, 2 * wordSize);
-    __ fstp_d(Address(rsp, 0));
-    __ pop(rax);
-    __ pop(rdx);
-    __ movptr(laddress(rbx), rax);
-    __ movptr(haddress(rbx), rdx);
-    __ tag_local(frame::TagCategory2, rbx);
-  } else {
-    __ fstp_d(daddress(rbx));
-  }
+  __ fstp_d(daddress(rbx));
 }
 
 
 void TemplateTable::astore() {
   transition(vtos, vtos);
-  __ pop_ptr(rax, rdx);   // will need to pop tag too
+  __ pop_ptr(rax);
   locals_index(rbx);
   __ movptr(aaddress(rbx), rax);
-  __ tag_local(rdx, rbx);    // need to store same tag in local may be returnAddr
 }
 
 
@@ -862,7 +792,6 @@
   __ pop_i(rax);
   locals_index_wide(rbx);
   __ movl(iaddress(rbx), rax);
-  __ tag_local(frame::TagValue, rbx);
 }
 
 
@@ -872,7 +801,6 @@
   locals_index_wide(rbx);
   __ movptr(laddress(rbx), rax);
   NOT_LP64(__ movl(haddress(rbx), rdx));
-  __ tag_local(frame::TagCategory2, rbx);
 }
 
 
@@ -888,10 +816,9 @@
 
 void TemplateTable::wide_astore() {
   transition(vtos, vtos);
-  __ pop_ptr(rax, rdx);
+  __ pop_ptr(rax);
   locals_index_wide(rbx);
   __ movptr(aaddress(rbx), rax);
-  __ tag_local(rdx, rbx);
 }
 
 
@@ -990,7 +917,7 @@
 
   // Pop stack arguments
   __ bind(done);
-  __ addptr(rsp, 3 * Interpreter::stackElementSize());
+  __ addptr(rsp, 3 * Interpreter::stackElementSize);
 }
 
 
@@ -1024,7 +951,6 @@
 void TemplateTable::istore(int n) {
   transition(itos, vtos);
   __ movl(iaddress(n), rax);
-  __ tag_local(frame::TagValue, n);
 }
 
 
@@ -1032,58 +958,45 @@
   transition(ltos, vtos);
   __ movptr(laddress(n), rax);
   NOT_LP64(__ movptr(haddress(n), rdx));
-  __ tag_local(frame::TagCategory2, n);
 }
 
 
 void TemplateTable::fstore(int n) {
   transition(ftos, vtos);
   __ fstp_s(faddress(n));
-  __ tag_local(frame::TagValue, n);
 }
 
 
 void TemplateTable::dstore(int n) {
   transition(dtos, vtos);
-  if (TaggedStackInterpreter) {
-    __ subptr(rsp, 2 * wordSize);
-    __ fstp_d(Address(rsp, 0));
-    __ pop(rax);
-    __ pop(rdx);
-    __ movl(laddress(n), rax);
-    __ movl(haddress(n), rdx);
-    __ tag_local(frame::TagCategory2, n);
-  } else {
-    __ fstp_d(daddress(n));
-  }
+  __ fstp_d(daddress(n));
 }
 
 
 void TemplateTable::astore(int n) {
   transition(vtos, vtos);
-  __ pop_ptr(rax, rdx);
+  __ pop_ptr(rax);
   __ movptr(aaddress(n), rax);
-  __ tag_local(rdx, n);
 }
 
 
 void TemplateTable::pop() {
   transition(vtos, vtos);
-  __ addptr(rsp, Interpreter::stackElementSize());
+  __ addptr(rsp, Interpreter::stackElementSize);
 }
 
 
 void TemplateTable::pop2() {
   transition(vtos, vtos);
-  __ addptr(rsp, 2*Interpreter::stackElementSize());
+  __ addptr(rsp, 2*Interpreter::stackElementSize);
 }
 
 
 void TemplateTable::dup() {
   transition(vtos, vtos);
   // stack: ..., a
-  __ load_ptr_and_tag(0, rax, rdx);
-  __ push_ptr(rax, rdx);
+  __ load_ptr(0, rax);
+  __ push_ptr(rax);
   // stack: ..., a, a
 }
 
@@ -1091,11 +1004,11 @@
 void TemplateTable::dup_x1() {
   transition(vtos, vtos);
   // stack: ..., a, b
-  __ load_ptr_and_tag(0, rax, rdx);  // load b
-  __ load_ptr_and_tag(1, rcx, rbx);  // load a
-  __ store_ptr_and_tag(1, rax, rdx); // store b
-  __ store_ptr_and_tag(0, rcx, rbx); // store a
-  __ push_ptr(rax, rdx);             // push b
+  __ load_ptr( 0, rax);  // load b
+  __ load_ptr( 1, rcx);  // load a
+  __ store_ptr(1, rax);  // store b
+  __ store_ptr(0, rcx);  // store a
+  __ push_ptr(rax);      // push b
   // stack: ..., b, a, b
 }
 
@@ -1103,15 +1016,15 @@
 void TemplateTable::dup_x2() {
   transition(vtos, vtos);
   // stack: ..., a, b, c
-  __ load_ptr_and_tag(0, rax, rdx);  // load c
-  __ load_ptr_and_tag(2, rcx, rbx);  // load a
-  __ store_ptr_and_tag(2, rax, rdx); // store c in a
-  __ push_ptr(rax, rdx);             // push c
+  __ load_ptr( 0, rax);  // load c
+  __ load_ptr( 2, rcx);  // load a
+  __ store_ptr(2, rax);  // store c in a
+  __ push_ptr(rax);      // push c
   // stack: ..., c, b, c, c
-  __ load_ptr_and_tag(2, rax, rdx);  // load b
-  __ store_ptr_and_tag(2, rcx, rbx); // store a in b
+  __ load_ptr( 2, rax);  // load b
+  __ store_ptr(2, rcx);  // store a in b
   // stack: ..., c, a, c, c
-  __ store_ptr_and_tag(1, rax, rdx); // store b in c
+  __ store_ptr(1, rax);  // store b in c
   // stack: ..., c, a, b, c
 }
 
@@ -1119,10 +1032,10 @@
 void TemplateTable::dup2() {
   transition(vtos, vtos);
   // stack: ..., a, b
-  __ load_ptr_and_tag(1, rax, rdx);  // load a
-  __ push_ptr(rax, rdx);             // push a
-  __ load_ptr_and_tag(1, rax, rdx);  // load b
-  __ push_ptr(rax, rdx);             // push b
+  __ load_ptr(1, rax);  // load a
+  __ push_ptr(rax);     // push a
+  __ load_ptr(1, rax);  // load b
+  __ push_ptr(rax);     // push b
   // stack: ..., a, b, a, b
 }
 
@@ -1130,17 +1043,17 @@
 void TemplateTable::dup2_x1() {
   transition(vtos, vtos);
   // stack: ..., a, b, c
-  __ load_ptr_and_tag(0, rcx, rbx);  // load c
-  __ load_ptr_and_tag(1, rax, rdx);  // load b
-  __ push_ptr(rax, rdx);             // push b
-  __ push_ptr(rcx, rbx);             // push c
+  __ load_ptr( 0, rcx);  // load c
+  __ load_ptr( 1, rax);  // load b
+  __ push_ptr(rax);      // push b
+  __ push_ptr(rcx);      // push c
   // stack: ..., a, b, c, b, c
-  __ store_ptr_and_tag(3, rcx, rbx); // store c in b
+  __ store_ptr(3, rcx);  // store c in b
   // stack: ..., a, c, c, b, c
-  __ load_ptr_and_tag(4, rcx, rbx);  // load a
-  __ store_ptr_and_tag(2, rcx, rbx); // store a in 2nd c
+  __ load_ptr( 4, rcx);  // load a
+  __ store_ptr(2, rcx);  // store a in 2nd c
   // stack: ..., a, c, a, b, c
-  __ store_ptr_and_tag(4, rax, rdx); // store b in a
+  __ store_ptr(4, rax);  // store b in a
   // stack: ..., b, c, a, b, c
   // stack: ..., b, c, a, b, c
 }
@@ -1149,19 +1062,19 @@
 void TemplateTable::dup2_x2() {
   transition(vtos, vtos);
   // stack: ..., a, b, c, d
-  __ load_ptr_and_tag(0, rcx, rbx);  // load d
-  __ load_ptr_and_tag(1, rax, rdx);  // load c
-  __ push_ptr(rax, rdx);             // push c
-  __ push_ptr(rcx, rbx);             // push d
+  __ load_ptr( 0, rcx);  // load d
+  __ load_ptr( 1, rax);  // load c
+  __ push_ptr(rax);      // push c
+  __ push_ptr(rcx);      // push d
   // stack: ..., a, b, c, d, c, d
-  __ load_ptr_and_tag(4, rax, rdx);  // load b
-  __ store_ptr_and_tag(2, rax, rdx); // store b in d
-  __ store_ptr_and_tag(4, rcx, rbx); // store d in b
+  __ load_ptr( 4, rax);  // load b
+  __ store_ptr(2, rax);  // store b in d
+  __ store_ptr(4, rcx);  // store d in b
   // stack: ..., a, d, c, b, c, d
-  __ load_ptr_and_tag(5, rcx, rbx);  // load a
-  __ load_ptr_and_tag(3, rax, rdx);  // load c
-  __ store_ptr_and_tag(3, rcx, rbx); // store a in c
-  __ store_ptr_and_tag(5, rax, rdx); // store c in a
+  __ load_ptr( 5, rcx);  // load a
+  __ load_ptr( 3, rax);  // load c
+  __ store_ptr(3, rcx);  // store a in c
+  __ store_ptr(5, rax);  // store c in a
   // stack: ..., c, d, a, b, c, d
   // stack: ..., c, d, a, b, c, d
 }
@@ -1170,10 +1083,10 @@
 void TemplateTable::swap() {
   transition(vtos, vtos);
   // stack: ..., a, b
-  __ load_ptr_and_tag(1, rcx, rbx);  // load a
-  __ load_ptr_and_tag(0, rax, rdx);  // load b
-  __ store_ptr_and_tag(0, rcx, rbx); // store a in b
-  __ store_ptr_and_tag(1, rax, rdx); // store b in a
+  __ load_ptr( 1, rcx);  // load a
+  __ load_ptr( 0, rax);  // load b
+  __ store_ptr(0, rcx);  // store a in b
+  __ store_ptr(1, rax);  // store b in a
   // stack: ..., b, a
 }
 
@@ -1181,12 +1094,12 @@
 void TemplateTable::iop2(Operation op) {
   transition(itos, itos);
   switch (op) {
-    case add  :                    __ pop_i(rdx); __ addl (rax, rdx); break;
+    case add  :                   __ pop_i(rdx); __ addl (rax, rdx); break;
     case sub  : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
-    case mul  :                    __ pop_i(rdx); __ imull(rax, rdx); break;
-    case _and :                    __ pop_i(rdx); __ andl (rax, rdx); break;
-    case _or  :                    __ pop_i(rdx); __ orl  (rax, rdx); break;
-    case _xor :                    __ pop_i(rdx); __ xorl (rax, rdx); break;
+    case mul  :                   __ pop_i(rdx); __ imull(rax, rdx); break;
+    case _and :                   __ pop_i(rdx); __ andl (rax, rdx); break;
+    case _or  :                   __ pop_i(rdx); __ orl  (rax, rdx); break;
+    case _xor :                   __ pop_i(rdx); __ xorl (rax, rdx); break;
     case shl  : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax);      break; // implicit masking of lower 5 bits by Intel shift instr.
     case shr  : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax);      break; // implicit masking of lower 5 bits by Intel shift instr.
     case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax);      break; // implicit masking of lower 5 bits by Intel shift instr.
@@ -1199,13 +1112,13 @@
   transition(ltos, ltos);
   __ pop_l(rbx, rcx);
   switch (op) {
-    case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
-    case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
-               __ mov(rax, rbx); __ mov(rdx, rcx); break;
-    case _and: __ andl(rax, rbx); __ andl(rdx, rcx); break;
-    case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
-    case _xor: __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
-    default : ShouldNotReachHere();
+    case add  : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
+    case sub  : __ subl(rbx, rax); __ sbbl(rcx, rdx);
+                __ mov (rax, rbx); __ mov (rdx, rcx); break;
+    case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
+    case _or  : __ orl (rax, rbx); __ orl (rdx, rcx); break;
+    case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
+    default   : ShouldNotReachHere();
   }
 }
 
@@ -1299,7 +1212,6 @@
 
 void TemplateTable::fop2(Operation op) {
   transition(ftos, ftos);
-  __ pop_ftos_to_rsp();  // pop ftos into rsp
   switch (op) {
     case add: __ fadd_s (at_rsp());                break;
     case sub: __ fsubr_s(at_rsp());                break;
@@ -1315,7 +1227,6 @@
 
 void TemplateTable::dop2(Operation op) {
   transition(dtos, dtos);
-  __ pop_dtos_to_rsp();  // pop dtos into rsp
 
   switch (op) {
     case add: __ fadd_d (at_rsp());                break;
@@ -1557,10 +1468,8 @@
 
 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
   if (is_float) {
-    __ pop_ftos_to_rsp();
     __ fld_s(at_rsp());
   } else {
-    __ pop_dtos_to_rsp();
     __ fld_d(at_rsp());
     __ pop(rdx);
   }
@@ -2854,7 +2763,6 @@
   transition(vtos, state);
   // get receiver
   __ movptr(rax, aaddress(0));
-  debug_only(__ verify_local_tag(frame::TagReference, 0));
   // access constant pool cache
   __ get_cache_and_index_at_bcp(rcx, rdx, 2);
   __ movptr(rbx, Address(rcx,
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu May 13 13:05:47 2010 -0700
@@ -58,7 +58,7 @@
 }
 
 static inline Address iaddress(Register r) {
-  return Address(r14, r, Address::times_8, Interpreter::value_offset_in_bytes());
+  return Address(r14, r, Address::times_8);
 }
 
 static inline Address laddress(Register r) {
@@ -418,7 +418,6 @@
 void TemplateTable::locals_index(Register reg, int offset) {
   __ load_unsigned_byte(reg, at_bcp(offset));
   __ negptr(reg);
-  if (TaggedStackInterpreter) __ shlptr(reg, 1);  // index = index*2
 }
 
 void TemplateTable::iload() {
@@ -460,53 +459,45 @@
   // Get the local value into tos
   locals_index(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 void TemplateTable::fast_iload2() {
   transition(vtos, itos);
   locals_index(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
   __ push(itos);
   locals_index(rbx, 3);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 void TemplateTable::fast_iload() {
   transition(vtos, itos);
   locals_index(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 void TemplateTable::lload() {
   transition(vtos, ltos);
   locals_index(rbx);
   __ movq(rax, laddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
 }
 
 void TemplateTable::fload() {
   transition(vtos, ftos);
   locals_index(rbx);
   __ movflt(xmm0, faddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 void TemplateTable::dload() {
   transition(vtos, dtos);
   locals_index(rbx);
   __ movdbl(xmm0, daddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
 }
 
 void TemplateTable::aload() {
   transition(vtos, atos);
   locals_index(rbx);
   __ movptr(rax, aaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagReference, rbx));
 }
 
 void TemplateTable::locals_index_wide(Register reg) {
@@ -514,42 +505,36 @@
   __ bswapl(reg);
   __ shrl(reg, 16);
   __ negptr(reg);
-  if (TaggedStackInterpreter) __ shlptr(reg, 1);  // index = index*2
 }
 
 void TemplateTable::wide_iload() {
   transition(vtos, itos);
   locals_index_wide(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 void TemplateTable::wide_lload() {
   transition(vtos, ltos);
   locals_index_wide(rbx);
   __ movq(rax, laddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
 }
 
 void TemplateTable::wide_fload() {
   transition(vtos, ftos);
   locals_index_wide(rbx);
   __ movflt(xmm0, faddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 void TemplateTable::wide_dload() {
   transition(vtos, dtos);
   locals_index_wide(rbx);
   __ movdbl(xmm0, daddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
 }
 
 void TemplateTable::wide_aload() {
   transition(vtos, atos);
   locals_index_wide(rbx);
   __ movptr(rax, aaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagReference, rbx));
 }
 
 void TemplateTable::index_check(Register array, Register index) {
@@ -654,7 +639,6 @@
   // load index out of locals
   locals_index(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 
   // eax: index
   // rdx: array
@@ -681,31 +665,26 @@
 void TemplateTable::iload(int n) {
   transition(vtos, itos);
   __ movl(rax, iaddress(n));
-  debug_only(__ verify_local_tag(frame::TagValue, n));
 }
 
 void TemplateTable::lload(int n) {
   transition(vtos, ltos);
   __ movq(rax, laddress(n));
-  debug_only(__ verify_local_tag(frame::TagCategory2, n));
 }
 
 void TemplateTable::fload(int n) {
   transition(vtos, ftos);
   __ movflt(xmm0, faddress(n));
-  debug_only(__ verify_local_tag(frame::TagValue, n));
 }
 
 void TemplateTable::dload(int n) {
   transition(vtos, dtos);
   __ movdbl(xmm0, daddress(n));
-  debug_only(__ verify_local_tag(frame::TagCategory2, n));
 }
 
 void TemplateTable::aload(int n) {
   transition(vtos, atos);
   __ movptr(rax, aaddress(n));
-  debug_only(__ verify_local_tag(frame::TagReference, n));
 }
 
 void TemplateTable::aload_0() {
@@ -791,36 +770,31 @@
   transition(itos, vtos);
   locals_index(rbx);
   __ movl(iaddress(rbx), rax);
-  __ tag_local(frame::TagValue, rbx);
 }
 
 void TemplateTable::lstore() {
   transition(ltos, vtos);
   locals_index(rbx);
   __ movq(laddress(rbx), rax);
-  __ tag_local(frame::TagCategory2, rbx);
 }
 
 void TemplateTable::fstore() {
   transition(ftos, vtos);
   locals_index(rbx);
   __ movflt(faddress(rbx), xmm0);
-  __ tag_local(frame::TagValue, rbx);
 }
 
 void TemplateTable::dstore() {
   transition(dtos, vtos);
   locals_index(rbx);
   __ movdbl(daddress(rbx), xmm0);
-  __ tag_local(frame::TagCategory2, rbx);
 }
 
 void TemplateTable::astore() {
   transition(vtos, vtos);
-  __ pop_ptr(rax, rdx);    // will need to pop tag too
+  __ pop_ptr(rax);
   locals_index(rbx);
   __ movptr(aaddress(rbx), rax);
-  __ tag_local(rdx, rbx);  // store tag from stack, might be returnAddr
 }
 
 void TemplateTable::wide_istore() {
@@ -828,7 +802,6 @@
   __ pop_i();
   locals_index_wide(rbx);
   __ movl(iaddress(rbx), rax);
-  __ tag_local(frame::TagValue, rbx);
 }
 
 void TemplateTable::wide_lstore() {
@@ -836,7 +809,6 @@
   __ pop_l();
   locals_index_wide(rbx);
   __ movq(laddress(rbx), rax);
-  __ tag_local(frame::TagCategory2, rbx);
 }
 
 void TemplateTable::wide_fstore() {
@@ -844,7 +816,6 @@
   __ pop_f();
   locals_index_wide(rbx);
   __ movflt(faddress(rbx), xmm0);
-  __ tag_local(frame::TagValue, rbx);
 }
 
 void TemplateTable::wide_dstore() {
@@ -852,15 +823,13 @@
   __ pop_d();
   locals_index_wide(rbx);
   __ movdbl(daddress(rbx), xmm0);
-  __ tag_local(frame::TagCategory2, rbx);
 }
 
 void TemplateTable::wide_astore() {
   transition(vtos, vtos);
-  __ pop_ptr(rax, rdx);    // will need to pop tag too
+  __ pop_ptr(rax);
   locals_index_wide(rbx);
   __ movptr(aaddress(rbx), rax);
-  __ tag_local(rdx, rbx);  // store tag from stack, might be returnAddr
 }
 
 void TemplateTable::iastore() {
@@ -972,7 +941,7 @@
 
   // Pop stack arguments
   __ bind(done);
-  __ addptr(rsp, 3 * Interpreter::stackElementSize());
+  __ addptr(rsp, 3 * Interpreter::stackElementSize);
 }
 
 void TemplateTable::bastore() {
@@ -1010,130 +979,125 @@
 void TemplateTable::istore(int n) {
   transition(itos, vtos);
   __ movl(iaddress(n), rax);
-  __ tag_local(frame::TagValue, n);
 }
 
 void TemplateTable::lstore(int n) {
   transition(ltos, vtos);
   __ movq(laddress(n), rax);
-  __ tag_local(frame::TagCategory2, n);
 }
 
 void TemplateTable::fstore(int n) {
   transition(ftos, vtos);
   __ movflt(faddress(n), xmm0);
-  __ tag_local(frame::TagValue, n);
 }
 
 void TemplateTable::dstore(int n) {
   transition(dtos, vtos);
   __ movdbl(daddress(n), xmm0);
-  __ tag_local(frame::TagCategory2, n);
 }
 
 void TemplateTable::astore(int n) {
   transition(vtos, vtos);
-  __ pop_ptr(rax, rdx);
+  __ pop_ptr(rax);
   __ movptr(aaddress(n), rax);
-  __ tag_local(rdx, n);
 }
 
 void TemplateTable::pop() {
   transition(vtos, vtos);
-  __ addptr(rsp, Interpreter::stackElementSize());
+  __ addptr(rsp, Interpreter::stackElementSize);
 }
 
 void TemplateTable::pop2() {
   transition(vtos, vtos);
-  __ addptr(rsp, 2 * Interpreter::stackElementSize());
+  __ addptr(rsp, 2 * Interpreter::stackElementSize);
 }
 
 void TemplateTable::dup() {
   transition(vtos, vtos);
-  __ load_ptr_and_tag(0, rax, rdx);
-  __ push_ptr(rax, rdx);
+  __ load_ptr(0, rax);
+  __ push_ptr(rax);
   // stack: ..., a, a
 }
 
 void TemplateTable::dup_x1() {
   transition(vtos, vtos);
   // stack: ..., a, b
-  __ load_ptr_and_tag(0, rax, rdx);  // load b
-  __ load_ptr_and_tag(1, rcx, rbx);  // load a
-  __ store_ptr_and_tag(1, rax, rdx); // store b
-  __ store_ptr_and_tag(0, rcx, rbx); // store a
-  __ push_ptr(rax, rdx);             // push b
+  __ load_ptr( 0, rax);  // load b
+  __ load_ptr( 1, rcx);  // load a
+  __ store_ptr(1, rax);  // store b
+  __ store_ptr(0, rcx);  // store a
+  __ push_ptr(rax);      // push b
   // stack: ..., b, a, b
 }
 
 void TemplateTable::dup_x2() {
   transition(vtos, vtos);
   // stack: ..., a, b, c
-  __ load_ptr_and_tag(0, rax, rdx);  // load c
-  __ load_ptr_and_tag(2, rcx, rbx);  // load a
-  __ store_ptr_and_tag(2, rax, rdx); // store c in a
-  __ push_ptr(rax, rdx);             // push c
+  __ load_ptr( 0, rax);  // load c
+  __ load_ptr( 2, rcx);  // load a
+  __ store_ptr(2, rax);  // store c in a
+  __ push_ptr(rax);      // push c
   // stack: ..., c, b, c, c
-  __ load_ptr_and_tag(2, rax, rdx);  // load b
-  __ store_ptr_and_tag(2, rcx, rbx); // store a in b
+  __ load_ptr( 2, rax);  // load b
+  __ store_ptr(2, rcx);  // store a in b
   // stack: ..., c, a, c, c
-  __ store_ptr_and_tag(1, rax, rdx); // store b in c
+  __ store_ptr(1, rax);  // store b in c
   // stack: ..., c, a, b, c
 }
 
 void TemplateTable::dup2() {
   transition(vtos, vtos);
   // stack: ..., a, b
-  __ load_ptr_and_tag(1, rax, rdx);  // load a
-  __ push_ptr(rax, rdx);             // push a
-  __ load_ptr_and_tag(1, rax, rdx);  // load b
-  __ push_ptr(rax, rdx);             // push b
+  __ load_ptr(1, rax);  // load a
+  __ push_ptr(rax);     // push a
+  __ load_ptr(1, rax);  // load b
+  __ push_ptr(rax);     // push b
   // stack: ..., a, b, a, b
 }
 
 void TemplateTable::dup2_x1() {
   transition(vtos, vtos);
   // stack: ..., a, b, c
-  __ load_ptr_and_tag(0, rcx, rbx);  // load c
-  __ load_ptr_and_tag(1, rax, rdx);  // load b
-  __ push_ptr(rax, rdx);             // push b
-  __ push_ptr(rcx, rbx);             // push c
+  __ load_ptr( 0, rcx);  // load c
+  __ load_ptr( 1, rax);  // load b
+  __ push_ptr(rax);      // push b
+  __ push_ptr(rcx);      // push c
   // stack: ..., a, b, c, b, c
-  __ store_ptr_and_tag(3, rcx, rbx); // store c in b
+  __ store_ptr(3, rcx);  // store c in b
   // stack: ..., a, c, c, b, c
-  __ load_ptr_and_tag(4, rcx, rbx);  // load a
-  __ store_ptr_and_tag(2, rcx, rbx); // store a in 2nd c
+  __ load_ptr( 4, rcx);  // load a
+  __ store_ptr(2, rcx);  // store a in 2nd c
   // stack: ..., a, c, a, b, c
-  __ store_ptr_and_tag(4, rax, rdx); // store b in a
+  __ store_ptr(4, rax);  // store b in a
   // stack: ..., b, c, a, b, c
 }
 
 void TemplateTable::dup2_x2() {
   transition(vtos, vtos);
   // stack: ..., a, b, c, d
-  __ load_ptr_and_tag(0, rcx, rbx);  // load d
-  __ load_ptr_and_tag(1, rax, rdx);  // load c
-  __ push_ptr(rax, rdx);             // push c
-  __ push_ptr(rcx, rbx);             // push d
+  __ load_ptr( 0, rcx);  // load d
+  __ load_ptr( 1, rax);  // load c
+  __ push_ptr(rax);      // push c
+  __ push_ptr(rcx);      // push d
   // stack: ..., a, b, c, d, c, d
-  __ load_ptr_and_tag(4, rax, rdx);  // load b
-  __ store_ptr_and_tag(2, rax, rdx); // store b in d
-  __ store_ptr_and_tag(4, rcx, rbx); // store d in b
+  __ load_ptr( 4, rax);  // load b
+  __ store_ptr(2, rax);  // store b in d
+  __ store_ptr(4, rcx);  // store d in b
   // stack: ..., a, d, c, b, c, d
-  __ load_ptr_and_tag(5, rcx, rbx);  // load a
-  __ load_ptr_and_tag(3, rax, rdx);  // load c
-  __ store_ptr_and_tag(3, rcx, rbx); // store a in c
-  __ store_ptr_and_tag(5, rax, rdx); // store c in a
+  __ load_ptr( 5, rcx);  // load a
+  __ load_ptr( 3, rax);  // load c
+  __ store_ptr(3, rcx);  // store a in c
+  __ store_ptr(5, rax);  // store c in a
   // stack: ..., c, d, a, b, c, d
 }
 
 void TemplateTable::swap() {
   transition(vtos, vtos);
   // stack: ..., a, b
-  __ load_ptr_and_tag(1, rcx, rbx);  // load a
-  __ load_ptr_and_tag(0, rax, rdx);  // load b
-  __ store_ptr_and_tag(0, rcx, rbx); // store a in b
-  __ store_ptr_and_tag(1, rax, rdx); // store b in a
+  __ load_ptr( 1, rcx);  // load a
+  __ load_ptr( 0, rax);  // load b
+  __ store_ptr(0, rcx);  // store a in b
+  __ store_ptr(1, rax);  // store b in a
   // stack: ..., b, a
 }
 
@@ -1156,12 +1120,12 @@
 void TemplateTable::lop2(Operation op) {
   transition(ltos, ltos);
   switch (op) {
-  case add  :                    __ pop_l(rdx); __ addptr (rax, rdx); break;
-  case sub  : __ mov(rdx, rax);  __ pop_l(rax); __ subptr (rax, rdx); break;
-  case _and :                    __ pop_l(rdx); __ andptr (rax, rdx); break;
-  case _or  :                    __ pop_l(rdx); __ orptr  (rax, rdx); break;
-  case _xor :                    __ pop_l(rdx); __ xorptr (rax, rdx); break;
-  default : ShouldNotReachHere();
+  case add  :                    __ pop_l(rdx); __ addptr(rax, rdx); break;
+  case sub  : __ mov(rdx, rax);  __ pop_l(rax); __ subptr(rax, rdx); break;
+  case _and :                    __ pop_l(rdx); __ andptr(rax, rdx); break;
+  case _or  :                    __ pop_l(rdx); __ orptr (rax, rdx); break;
+  case _xor :                    __ pop_l(rdx); __ xorptr(rax, rdx); break;
+  default   : ShouldNotReachHere();
   }
 }
 
@@ -1250,7 +1214,7 @@
   switch (op) {
   case add:
     __ addss(xmm0, at_rsp());
-    __ addptr(rsp, Interpreter::stackElementSize());
+    __ addptr(rsp, Interpreter::stackElementSize);
     break;
   case sub:
     __ movflt(xmm1, xmm0);
@@ -1259,7 +1223,7 @@
     break;
   case mul:
     __ mulss(xmm0, at_rsp());
-    __ addptr(rsp, Interpreter::stackElementSize());
+    __ addptr(rsp, Interpreter::stackElementSize);
     break;
   case div:
     __ movflt(xmm1, xmm0);
@@ -1282,7 +1246,7 @@
   switch (op) {
   case add:
     __ addsd(xmm0, at_rsp());
-    __ addptr(rsp, 2 * Interpreter::stackElementSize());
+    __ addptr(rsp, 2 * Interpreter::stackElementSize);
     break;
   case sub:
     __ movdbl(xmm1, xmm0);
@@ -1291,7 +1255,7 @@
     break;
   case mul:
     __ mulsd(xmm0, at_rsp());
-    __ addptr(rsp, 2 * Interpreter::stackElementSize());
+    __ addptr(rsp, 2 * Interpreter::stackElementSize);
     break;
   case div:
     __ movdbl(xmm1, xmm0);
@@ -2782,7 +2746,6 @@
 
   // get receiver
   __ movptr(rax, aaddress(0));
-  debug_only(__ verify_local_tag(frame::TagReference, 0));
   // access constant pool cache
   __ get_cache_and_index_at_bcp(rcx, rdx, 2);
   __ movptr(rbx,
@@ -2858,7 +2821,6 @@
   if (load_receiver) {
     __ movl(recv, flags);
     __ andl(recv, 0xFF);
-    if (TaggedStackInterpreter) __ shll(recv, 1);  // index*2
     Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
     __ movptr(recv, recv_addr);
     __ verify_oop(recv);
@@ -3610,13 +3572,11 @@
   __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
   // last dim is on top of stack; we want address of first one:
   // first_addr = last_addr + (ndims - 1) * wordSize
-  if (TaggedStackInterpreter) __ shll(rax, 1);  // index*2
   __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
   call_VM(rax,
           CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
           c_rarg1);
   __ load_unsigned_byte(rbx, at_bcp(3));
-  if (TaggedStackInterpreter) __ shll(rbx, 1);  // index*2
   __ lea(rsp, Address(rsp, rbx, Address::times_8));
 }
 #endif // !CC_INTERP
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Thu May 13 13:05:47 2010 -0700
@@ -37,15 +37,18 @@
   thread->reset_last_Java_frame();              \
   fixup_after_potential_safepoint()
 
-void CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) {
+int CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) {
   JavaThread *thread = (JavaThread *) THREAD;
 
   // Allocate and initialize our frame.
-  InterpreterFrame *frame = InterpreterFrame::build(method, CHECK);
+  InterpreterFrame *frame = InterpreterFrame::build(method, CHECK_0);
   thread->push_zero_frame(frame);
 
   // Execute those bytecodes!
   main_loop(0, THREAD);
+
+  // No deoptimized frames on the stack
+  return 0;
 }
 
 void CppInterpreter::main_loop(int recurse, TRAPS) {
@@ -165,7 +168,7 @@
     stack->push(result[-i]);
 }
 
-void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
+int CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
   // Make sure method is native and not abstract
   assert(method->is_native() && !method->is_abstract(), "should be");
 
@@ -173,7 +176,7 @@
   ZeroStack *stack = thread->zero_stack();
 
   // Allocate and initialize our frame
-  InterpreterFrame *frame = InterpreterFrame::build(method, CHECK);
+  InterpreterFrame *frame = InterpreterFrame::build(method, CHECK_0);
   thread->push_zero_frame(frame);
   interpreterState istate = frame->interpreter_state();
   intptr_t *locals = istate->locals();
@@ -430,25 +433,26 @@
       ShouldNotReachHere();
     }
   }
+
+  // No deoptimized frames on the stack
+  return 0;
 }
 
-void CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) {
+int CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) {
   JavaThread *thread = (JavaThread *) THREAD;
   ZeroStack *stack = thread->zero_stack();
   intptr_t *locals = stack->sp();
 
   // Drop into the slow path if we need a safepoint check
   if (SafepointSynchronize::do_call_back()) {
-    normal_entry(method, 0, THREAD);
-    return;
+    return normal_entry(method, 0, THREAD);
   }
 
   // Load the object pointer and drop into the slow path
   // if we have a NullPointerException
   oop object = LOCALS_OBJECT(0);
   if (object == NULL) {
-    normal_entry(method, 0, THREAD);
-    return;
+    return normal_entry(method, 0, THREAD);
   }
 
   // Read the field index from the bytecode, which looks like this:
@@ -470,15 +474,14 @@
   constantPoolCacheOop cache = method->constants()->cache();
   ConstantPoolCacheEntry* entry = cache->entry_at(index);
   if (!entry->is_resolved(Bytecodes::_getfield)) {
-    normal_entry(method, 0, THREAD);
-    return;
+    return normal_entry(method, 0, THREAD);
   }
 
   // Get the result and push it onto the stack
   switch (entry->flag_state()) {
   case ltos:
   case dtos:
-    stack->overflow_check(1, CHECK);
+    stack->overflow_check(1, CHECK_0);
     stack->alloc(wordSize);
     break;
   }
@@ -558,20 +561,25 @@
       ShouldNotReachHere();
     }
   }
+
+  // No deoptimized frames on the stack
+  return 0;
 }
 
-void CppInterpreter::empty_entry(methodOop method, intptr_t UNUSED, TRAPS) {
+int CppInterpreter::empty_entry(methodOop method, intptr_t UNUSED, TRAPS) {
   JavaThread *thread = (JavaThread *) THREAD;
   ZeroStack *stack = thread->zero_stack();
 
   // Drop into the slow path if we need a safepoint check
   if (SafepointSynchronize::do_call_back()) {
-    normal_entry(method, 0, THREAD);
-    return;
+    return normal_entry(method, 0, THREAD);
   }
 
   // Pop our parameters
   stack->set_sp(stack->sp() + method->size_of_parameters());
+
+  // No deoptimized frames on the stack
+  return 0;
 }
 
 InterpreterFrame *InterpreterFrame::build(const methodOop method, TRAPS) {
@@ -833,7 +841,7 @@
   int callee_extra_locals = callee_locals - callee_param_count;
 
   if (interpreter_frame) {
-    intptr_t *locals        = interpreter_frame->sp() + method->max_locals();
+    intptr_t *locals        = interpreter_frame->fp() + method->max_locals();
     interpreterState istate = interpreter_frame->get_interpreterState();
     intptr_t *monitor_base  = (intptr_t*) istate;
     intptr_t *stack_base    = monitor_base - monitor_words;
--- a/src/cpu/zero/vm/cppInterpreter_zero.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/zero/vm/cppInterpreter_zero.hpp	Thu May 13 13:05:47 2010 -0700
@@ -29,10 +29,10 @@
 
  public:
   // Method entries
-  static void normal_entry(methodOop method, intptr_t UNUSED, TRAPS);
-  static void native_entry(methodOop method, intptr_t UNUSED, TRAPS);
-  static void accessor_entry(methodOop method, intptr_t UNUSED, TRAPS);
-  static void empty_entry(methodOop method, intptr_t UNUSED, TRAPS);
+  static int normal_entry(methodOop method, intptr_t UNUSED, TRAPS);
+  static int native_entry(methodOop method, intptr_t UNUSED, TRAPS);
+  static int accessor_entry(methodOop method, intptr_t UNUSED, TRAPS);
+  static int empty_entry(methodOop method, intptr_t UNUSED, TRAPS);
 
  public:
   // Main loop of normal_entry
--- a/src/cpu/zero/vm/entry_zero.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/zero/vm/entry_zero.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,20 +41,30 @@
   }
 
  private:
-  typedef void (*NormalEntryFunc)(methodOop method,
-                                  intptr_t  base_pc,
-                                  TRAPS);
-  typedef void (*OSREntryFunc)(methodOop method,
-                               address   osr_buf,
-                               intptr_t  base_pc,
-                               TRAPS);
+  typedef int (*NormalEntryFunc)(methodOop method,
+                                 intptr_t  base_pc,
+                                 TRAPS);
+  typedef int (*OSREntryFunc)(methodOop method,
+                              address   osr_buf,
+                              intptr_t  base_pc,
+                              TRAPS);
 
  public:
   void invoke(methodOop method, TRAPS) const {
-    ((NormalEntryFunc) entry_point())(method, (intptr_t) this, THREAD);
+    maybe_deoptimize(
+      ((NormalEntryFunc) entry_point())(method, (intptr_t) this, THREAD),
+      THREAD);
   }
   void invoke_osr(methodOop method, address osr_buf, TRAPS) const {
-    ((OSREntryFunc) entry_point())(method, osr_buf, (intptr_t) this, THREAD);
+    maybe_deoptimize(
+      ((OSREntryFunc) entry_point())(method, osr_buf, (intptr_t) this, THREAD),
+      THREAD);
+  }
+
+ private:
+  static void maybe_deoptimize(int deoptimized_frames, TRAPS) {
+    if (deoptimized_frames)
+      CppInterpreter::main_loop(deoptimized_frames - 1, THREAD);
   }
 
  public:
--- a/src/cpu/zero/vm/frame_zero.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/zero/vm/frame_zero.cpp	Thu May 13 13:05:47 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008, 2009 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,10 @@
   return zeroframe()->is_interpreter_frame();
 }
 
+bool frame::is_fake_stub_frame() const {
+  return zeroframe()->is_fake_stub_frame();
+}
+
 frame frame::sender_for_entry_frame(RegisterMap *map) const {
   assert(zeroframe()->is_entry_frame(), "wrong type of frame");
   assert(map != NULL, "map must be set");
@@ -44,14 +48,14 @@
          "sender should be next Java frame");
   map->clear();
   assert(map->include_argument_oops(), "should be set by clear");
-  return frame(sender_sp(), sp() + 1);
+  return frame(zeroframe()->next(), sender_sp());
 }
 
 frame frame::sender_for_nonentry_frame(RegisterMap *map) const {
   assert(zeroframe()->is_interpreter_frame() ||
          zeroframe()->is_shark_frame() ||
          zeroframe()->is_fake_stub_frame(), "wrong type of frame");
-  return frame(sender_sp(), sp() + 1);
+  return frame(zeroframe()->next(), sender_sp());
 }
 
 frame frame::sender(RegisterMap* map) const {
@@ -172,8 +176,8 @@
   char *valuebuf = buf + buflen;
 
   // Print each word of the frame
-  for (intptr_t *addr = fp(); addr <= sp(); addr++) {
-    int offset = sp() - addr;
+  for (intptr_t *addr = sp(); addr <= fp(); addr++) {
+    int offset = fp() - addr;
 
     // Fill in default values, then try and improve them
     snprintf(fieldbuf, buflen, "word[%d]", offset);
--- a/src/cpu/zero/vm/frame_zero.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/zero/vm/frame_zero.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008, 2009 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,17 +32,18 @@
 
   // Constructor
  public:
-  frame(intptr_t* sp, intptr_t* fp);
+  frame(ZeroFrame* zeroframe, intptr_t* sp);
 
-  // The sp of a Zero frame is the address of the highest word in
-  // that frame.  We keep track of the lowest address too, so the
-  // boundaries of the frame are available for debug printing.
  private:
-  intptr_t* _fp;
+  ZeroFrame* _zeroframe;
 
  public:
+  const ZeroFrame *zeroframe() const {
+    return _zeroframe;
+  }
+
   intptr_t* fp() const {
-    return _fp;
+    return (intptr_t *) zeroframe();
   }
 
 #ifdef CC_INTERP
@@ -50,10 +51,6 @@
 #endif // CC_INTERP
 
  public:
-  const ZeroFrame *zeroframe() const {
-    return (ZeroFrame *) sp();
-  }
-
   const EntryFrame *zero_entryframe() const {
     return zeroframe()->as_entry_frame();
   }
@@ -65,6 +62,9 @@
   }
 
  public:
+  bool is_fake_stub_frame() const;
+
+ public:
   frame sender_for_nonentry_frame(RegisterMap* map) const;
 
  public:
--- a/src/cpu/zero/vm/frame_zero.inline.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/zero/vm/frame_zero.inline.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008, 2009 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,16 +26,16 @@
 // Constructors
 
 inline frame::frame() {
+  _zeroframe = NULL;
   _sp = NULL;
-  _fp = NULL;
   _pc = NULL;
   _cb = NULL;
   _deopt_state = unknown;
 }
 
-inline frame::frame(intptr_t* sp, intptr_t* fp) {
+inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
+  _zeroframe = zf;
   _sp = sp;
-  _fp = fp;
   switch (zeroframe()->type()) {
   case ZeroFrame::ENTRY_FRAME:
     _pc = StubRoutines::call_stub_return_pc();
@@ -66,7 +66,7 @@
 // Accessors
 
 inline intptr_t* frame::sender_sp() const {
-  return (intptr_t *) zeroframe()->next();
+  return fp() + 1;
 }
 
 inline intptr_t* frame::link() const {
@@ -120,7 +120,7 @@
 // we can distinguish identity and younger/older relationship. NULL
 // represents an invalid (incomparable) frame.
 inline intptr_t* frame::id() const {
-  return sp();
+  return fp();
 }
 
 inline JavaCallWrapper* frame::entry_frame_call_wrapper() const {
--- a/src/cpu/zero/vm/interpreter_zero.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/zero/vm/interpreter_zero.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * Copyright 2007, 2008 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -36,26 +36,14 @@
 
  public:
   static int expr_index_at(int i) {
-    return stackElementWords() * i;
-  }
-  static int expr_tag_index_at(int i) {
-    assert(TaggedStackInterpreter, "should not call this");
-    Unimplemented();
+    return stackElementWords * i;
   }
 
   static int expr_offset_in_bytes(int i) {
-    return stackElementSize() * i;
-  }
-  static int expr_tag_offset_in_bytes(int i) {
-    assert(TaggedStackInterpreter, "should not call this");
-    Unimplemented();
+    return stackElementSize * i;
   }
 
   static int local_index_at(int i) {
     assert(i <= 0, "local direction already negated");
-    return stackElementWords() * i + (value_offset_in_bytes() / wordSize);
+    return stackElementWords * i;
   }
-  static int local_tag_index_at(int i) {
-    assert(TaggedStackInterpreter, "should not call this");
-    Unimplemented();
-  }
--- a/src/cpu/zero/vm/javaFrameAnchor_zero.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/zero/vm/javaFrameAnchor_zero.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,21 +23,31 @@
  *
  */
 
+ private:
+  ZeroFrame* volatile _last_Java_fp;
+
  public:
   // Each arch must define reset, save, restore
   // These are used by objects that only care about:
   //  1 - initializing a new state (thread creation, javaCalls)
   //  2 - saving a current state (javaCalls)
   //  3 - restoring an old state (javaCalls)
+  // Note that whenever _last_Java_sp != NULL other anchor fields
+  // must be valid.  The profiler apparently depends on this.
 
   void clear() {
     // clearing _last_Java_sp must be first
     _last_Java_sp = NULL;
     // fence?
+    _last_Java_fp = NULL;
     _last_Java_pc = NULL;
   }
 
   void copy(JavaFrameAnchor* src) {
+    set(src->_last_Java_sp, src->_last_Java_pc, src->_last_Java_fp);
+  }
+
+  void set(intptr_t* sp, address pc, ZeroFrame* fp) {
     // In order to make sure the transition state is valid for "this"
     // We must clear _last_Java_sp before copying the rest of the new
     // data
@@ -46,13 +56,14 @@
     // previous version (pd_cache_state) don't NULL _last_Java_sp
     // unless the value is changing
     //
-    if (_last_Java_sp != src->_last_Java_sp)
+    if (_last_Java_sp != sp)
       _last_Java_sp = NULL;
 
-    _last_Java_pc = src->_last_Java_pc;
+    _last_Java_fp = fp;
+    _last_Java_pc = pc;
     // Must be last so profiler will always see valid frame if
     // has_last_frame() is true
-    _last_Java_sp = src->_last_Java_sp;
+    _last_Java_sp = sp;
   }
 
   bool walkable() {
@@ -67,6 +78,10 @@
     return _last_Java_sp;
   }
 
-  void set_last_Java_sp(intptr_t* sp) {
-    _last_Java_sp = sp;
+  ZeroFrame* last_Java_fp() const {
+    return _last_Java_fp;
   }
+
+  static ByteSize last_Java_fp_offset() {
+    return byte_offset_of(JavaFrameAnchor, _last_Java_fp);
+  }
--- a/src/cpu/zero/vm/methodHandles_zero.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/zero/vm/methodHandles_zero.cpp	Thu May 13 13:05:47 2010 -0700
@@ -26,6 +26,10 @@
 #include "incls/_precompiled.incl"
 #include "incls/_methodHandles_zero.cpp.incl"
 
+int MethodHandles::adapter_conversion_ops_supported_mask() {
+  ShouldNotCallThis();
+}
+
 void MethodHandles::generate_method_handle_stub(MacroAssembler*          masm,
                                                 MethodHandles::EntryKind ek) {
   ShouldNotCallThis();
--- a/src/cpu/zero/vm/stack_zero.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/zero/vm/stack_zero.cpp	Thu May 13 13:05:47 2010 -0700
@@ -26,12 +26,18 @@
 #include "incls/_precompiled.incl"
 #include "incls/_stack_zero.cpp.incl"
 
+int ZeroStack::suggest_size(Thread *thread) const {
+  assert(needs_setup(), "already set up");
+  return align_size_down(abi_stack_available(thread) / 2, wordSize);
+}
+
 void ZeroStack::handle_overflow(TRAPS) {
   JavaThread *thread = (JavaThread *) THREAD;
 
   // Set up the frame anchor if it isn't already
   bool has_last_Java_frame = thread->has_last_Java_frame();
   if (!has_last_Java_frame) {
+    intptr_t *sp = thread->zero_stack()->sp();
     ZeroFrame *frame = thread->top_zero_frame();
     while (frame) {
       if (frame->is_shark_frame())
@@ -44,13 +50,14 @@
           break;
       }
 
+      sp = ((intptr_t *) frame) + 1;
       frame = frame->next();
     }
 
     if (frame == NULL)
       fatal("unrecoverable stack overflow");
 
-    thread->set_last_Java_frame(frame);
+    thread->set_last_Java_frame(frame, sp);
   }
 
   // Throw the exception
@@ -71,3 +78,9 @@
   if (!has_last_Java_frame)
     thread->reset_last_Java_frame();
 }
+
+#ifndef PRODUCT
+void ZeroStack::zap(int c) {
+  memset(_base, c, available_words() * wordSize);
+}
+#endif // PRODUCT
--- a/src/cpu/zero/vm/stack_zero.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/zero/vm/stack_zero.hpp	Thu May 13 13:05:47 2010 -0700
@@ -42,6 +42,8 @@
     return _base == NULL;
   }
 
+  int suggest_size(Thread *thread) const;
+
   void setup(void *mem, size_t size) {
     assert(needs_setup(), "already set up");
     assert(!(size & WordAlignmentMask), "unaligned");
@@ -67,6 +69,9 @@
     _sp = new_sp;
   }
 
+  int total_words() const {
+    return _top - _base;
+  }
   int available_words() const {
     return _sp - _base;
   }
@@ -89,12 +94,16 @@
   int shadow_pages_size() const {
     return _shadow_pages_size;
   }
+  int abi_stack_available(Thread *thread) const;
 
  public:
   void overflow_check(int required_words, TRAPS);
   static void handle_overflow(TRAPS);
 
  public:
+  void zap(int c) PRODUCT_RETURN;
+
+ public:
   static ByteSize base_offset() {
     return byte_offset_of(ZeroStack, _base);
   }
--- a/src/cpu/zero/vm/stack_zero.inline.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/zero/vm/stack_zero.inline.hpp	Thu May 13 13:05:47 2010 -0700
@@ -25,19 +25,24 @@
 
 // This function should match SharkStack::CreateStackOverflowCheck
 inline void ZeroStack::overflow_check(int required_words, TRAPS) {
-  JavaThread *thread = (JavaThread *) THREAD;
-
   // Check the Zero stack
-  if (required_words > available_words()) {
+  if (available_words() < required_words) {
     handle_overflow(THREAD);
     return;
   }
 
   // Check the ABI stack
-  address stack_top = thread->stack_base() - thread->stack_size();
-  int free_stack = ((address) &stack_top) - stack_top;
-  if (free_stack < shadow_pages_size()) {
+  if (abi_stack_available(THREAD) < 0) {
     handle_overflow(THREAD);
     return;
   }
 }
+
+// This method returns the amount of ABI stack available for us
+// to use under normal circumstances.  Note that the returned
+// value can be negative.
+inline int ZeroStack::abi_stack_available(Thread *thread) const {
+  int stack_used = thread->stack_base() - (address) &stack_used;
+  int stack_free = thread->stack_size() - stack_used;
+  return stack_free - shadow_pages_size();
+}
--- a/src/cpu/zero/vm/stubGenerator_zero.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/cpu/zero/vm/stubGenerator_zero.cpp	Thu May 13 13:05:47 2010 -0700
@@ -51,10 +51,7 @@
     // Set up the stack if necessary
     bool stack_needs_teardown = false;
     if (stack->needs_setup()) {
-      size_t stack_used = thread->stack_base() - (address) &stack_used;
-      size_t stack_free = thread->stack_size() - stack_used;
-      size_t zero_stack_size = align_size_down(stack_free / 2, wordSize);
-
+      size_t zero_stack_size = stack->suggest_size(thread);
       stack->setup(alloca(zero_stack_size), zero_stack_size);
       stack_needs_teardown = true;
     }
--- a/src/os/linux/vm/attachListener_linux.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/os/linux/vm/attachListener_linux.cpp	Thu May 13 13:05:47 2010 -0700
@@ -461,7 +461,7 @@
   if (init_at_startup() || is_initialized()) {
     return false;               // initialized at startup or already initialized
   }
-  char fn[128];
+  char fn[PATH_MAX+1];
   sprintf(fn, ".attach_pid%d", os::current_process_id());
   int ret;
   struct stat64 st;
--- a/src/os/linux/vm/os_linux.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Thu May 13 13:05:47 2010 -0700
@@ -2305,7 +2305,7 @@
     return;
   }
 
-  char buf[40];
+  char buf[PATH_MAX+1];
   int num = Atomic::add(1, &cnt);
 
   snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
--- a/src/os/solaris/vm/attachListener_solaris.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/os/solaris/vm/attachListener_solaris.cpp	Thu May 13 13:05:47 2010 -0700
@@ -592,7 +592,7 @@
   if (init_at_startup() || is_initialized()) {
     return false;               // initialized at startup or already initialized
   }
-  char fn[128];
+  char fn[PATH_MAX+1];
   sprintf(fn, ".attach_pid%d", os::current_process_id());
   int ret;
   struct stat64 st;
--- a/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,9 +30,9 @@
 define_pd_global(intx, ThreadStackSize,          1024); // 0 => use system default
 define_pd_global(intx, VMThreadStackSize,        1024);
 #else
-// ThreadStackSize 320 allows TaggedStackInterpreter and a couple of test cases
-// to run while keeping the number of threads that can be created high.
-// System default ThreadStackSize appears to be 512 which is too big.
+// ThreadStackSize 320 allows a couple of test cases to run while
+// keeping the number of threads that can be created high.  System
+// default ThreadStackSize appears to be 512 which is too big.
 define_pd_global(intx, ThreadStackSize,          320);
 define_pd_global(intx, VMThreadStackSize,        512);
 #endif // AMD64
--- a/src/os_cpu/linux_zero/vm/thread_linux_zero.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/os_cpu/linux_zero/vm/thread_linux_zero.hpp	Thu May 13 13:05:47 2010 -0700
@@ -68,19 +68,30 @@
 
  public:
   void set_last_Java_frame() {
-    set_last_Java_frame(top_zero_frame());
+    set_last_Java_frame(top_zero_frame(), zero_stack()->sp());
   }
   void reset_last_Java_frame() {
-    set_last_Java_frame(NULL);
+    frame_anchor()->zap();
+  }
+  void set_last_Java_frame(ZeroFrame* fp, intptr_t* sp) {
+    frame_anchor()->set(sp, NULL, fp);
   }
-  void set_last_Java_frame(ZeroFrame* frame) {
-    frame_anchor()->set_last_Java_sp((intptr_t *) frame);
+
+ public:
+  ZeroFrame* last_Java_fp() {
+    return frame_anchor()->last_Java_fp();
   }
 
  private:
   frame pd_last_frame() {
     assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
-    return frame(last_Java_sp(), zero_stack()->sp());
+    return frame(last_Java_fp(), last_Java_sp());
+  }
+
+ public:
+  static ByteSize last_Java_fp_offset() {
+    return byte_offset_of(JavaThread, _anchor) +
+      JavaFrameAnchor::last_Java_fp_offset();
   }
 
  public:
--- a/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,8 +31,8 @@
 define_pd_global(intx, VMThreadStackSize,        1024);
 define_pd_global(uintx,JVMInvokeMethodSlack,     8*K);
 #else
-// ThreadStackSize 320 allows TaggedStackInterpreter and a couple of test cases
-// to run while keeping the number of threads that can be created high.
+// ThreadStackSize 320 allows a couple of test cases to run while
+// keeping the number of threads that can be created high.
 define_pd_global(intx, ThreadStackSize,          320);
 define_pd_global(intx, VMThreadStackSize,        512);
 define_pd_global(uintx,JVMInvokeMethodSlack,     10*K);
--- a/src/share/vm/c1/c1_LIR.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/c1/c1_LIR.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1062,7 +1062,7 @@
       is_invokedynamic()  // An invokedynamic is always a MethodHandle call site.
       ||
       (method()->holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
-       method()->name()           == ciSymbol::invoke_name());
+       methodOopDesc::is_method_handle_invoke_name(method()->name()->sid()));
   }
 
   intptr_t vtable_offset() const {
--- a/src/share/vm/ci/ciEnv.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/ci/ciEnv.cpp	Thu May 13 13:05:47 2010 -0700
@@ -731,26 +731,29 @@
 // ciEnv::get_fake_invokedynamic_method_impl
 ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
                                                     int index, Bytecodes::Code bc) {
+  // Compare the following logic with InterpreterRuntime::resolve_invokedynamic.
   assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic");
 
-  // Get the CallSite from the constant pool cache.
-  ConstantPoolCacheEntry* cpc_entry = cpool->cache()->secondary_entry_at(index);
-  assert(cpc_entry != NULL && cpc_entry->is_secondary_entry(), "sanity");
-  Handle call_site = cpc_entry->f1();
+  bool is_resolved = cpool->cache()->main_entry_at(index)->is_resolved(bc);
+  if (is_resolved && (oop) cpool->cache()->secondary_entry_at(index)->f1() == NULL)
+    // FIXME: code generation could allow for null (unlinked) call site
+    is_resolved = false;
 
-  // Call site might not be linked yet.
-  if (call_site.is_null()) {
+  // Call site might not be resolved yet.  We could create a real invoker method from the
+  // compiler, but it is simpler to stop the code path here with an unlinked method.
+  if (!is_resolved) {
     ciInstanceKlass* mh_klass = get_object(SystemDictionary::MethodHandle_klass())->as_instance_klass();
-    ciSymbol*       sig_sym   = get_object(cpool->signature_ref_at(index))->as_symbol();
-    return get_unloaded_method(mh_klass, ciSymbol::invoke_name(), sig_sym);
+    ciSymbol*        sig_sym  = get_object(cpool->signature_ref_at(index))->as_symbol();
+    return get_unloaded_method(mh_klass, ciSymbol::invokeExact_name(), sig_sym);
   }
 
-  // Get the methodOop from the CallSite.
-  methodOop method_oop = (methodOop) java_dyn_CallSite::vmmethod(call_site());
-  assert(method_oop != NULL, "sanity");
-  assert(method_oop->is_method_handle_invoke(), "consistent");
+  // Get the invoker methodOop from the constant pool.
+  intptr_t f2_value = cpool->cache()->main_entry_at(index)->f2();
+  methodOop signature_invoker = methodOop(f2_value);
+  assert(signature_invoker != NULL && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(),
+         "correct result from LinkResolver::resolve_invokedynamic");
 
-  return get_object(method_oop)->as_method();
+  return get_object(signature_invoker)->as_method();
 }
 
 
--- a/src/share/vm/ci/ciObjectFactory.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/ci/ciObjectFactory.cpp	Thu May 13 13:05:47 2010 -0700
@@ -103,7 +103,7 @@
     for (i = vmSymbols::FIRST_SID; i < vmSymbols::SID_LIMIT; i++) {
       symbolHandle sym_handle = vmSymbolHandles::symbol_handle_at((vmSymbols::SID) i);
       assert(vmSymbols::find_sid(sym_handle()) == i, "1-1 mapping");
-      ciSymbol* sym = new (_arena) ciSymbol(sym_handle);
+      ciSymbol* sym = new (_arena) ciSymbol(sym_handle, (vmSymbols::SID) i);
       init_ident_of(sym);
       _shared_ci_symbols[i] = sym;
     }
@@ -273,7 +273,8 @@
 
   if (o->is_symbol()) {
     symbolHandle h_o(THREAD, (symbolOop)o);
-    return new (arena()) ciSymbol(h_o);
+    assert(vmSymbols::find_sid(h_o()) == vmSymbols::NO_SID, "");
+    return new (arena()) ciSymbol(h_o, vmSymbols::NO_SID);
   } else if (o->is_klass()) {
     KlassHandle h_k(THREAD, (klassOop)o);
     Klass* k = ((klassOop)o)->klass_part();
--- a/src/share/vm/ci/ciSymbol.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/ci/ciSymbol.cpp	Thu May 13 13:05:47 2010 -0700
@@ -29,7 +29,17 @@
 // ciSymbol::ciSymbol
 //
 // Preallocated handle variant.  Used with handles from vmSymboHandles.
-ciSymbol::ciSymbol(symbolHandle h_s) : ciObject(h_s) {
+ciSymbol::ciSymbol(symbolHandle h_s, vmSymbols::SID sid)
+  : ciObject(h_s), _sid(sid)
+{
+  assert(sid_ok(), "must be in vmSymbols");
+}
+
+// Normal case for non-famous symbols.
+ciSymbol::ciSymbol(symbolOop s)
+  : ciObject(s), _sid(vmSymbols::NO_SID)
+{
+  assert(sid_ok(), "must not be in vmSymbols");
 }
 
 // ciSymbol
--- a/src/share/vm/ci/ciSymbol.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/ci/ciSymbol.hpp	Thu May 13 13:05:47 2010 -0700
@@ -36,8 +36,11 @@
   friend class ciObjArrayKlass;
 
 private:
-  ciSymbol(symbolOop s) : ciObject(s) {}
-  ciSymbol(symbolHandle s);   // for use with vmSymbolHandles
+  const vmSymbols::SID _sid;
+  DEBUG_ONLY( bool sid_ok() { return vmSymbols::find_sid(get_symbolOop()) == _sid; } )
+
+  ciSymbol(symbolOop s);  // normal case, for symbols not mentioned in vmSymbols
+  ciSymbol(symbolHandle s, vmSymbols::SID sid);   // for use with vmSymbolHandles
 
   symbolOop get_symbolOop() const { return (symbolOop)get_oop(); }
 
@@ -52,6 +55,9 @@
   static ciSymbol* make_impl(const char* s);
 
 public:
+  // The enumeration ID from vmSymbols, or vmSymbols::NO_SID if none.
+  vmSymbols::SID sid() const { return _sid; }
+
   // The text of the symbol as a null-terminated utf8 string.
   const char* as_utf8();
   int         utf8_length();
--- a/src/share/vm/classfile/classFileParser.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/classfile/classFileParser.cpp	Thu May 13 13:05:47 2010 -0700
@@ -1838,7 +1838,8 @@
     _has_vanilla_constructor = true;
   }
 
-  if (EnableMethodHandles && m->is_method_handle_invoke()) {
+  if (EnableMethodHandles && (m->is_method_handle_invoke() ||
+                              m->is_method_handle_adapter())) {
     THROW_MSG_(vmSymbols::java_lang_VirtualMachineError(),
                "Method handle invokers must be defined internally to the VM", nullHandle);
   }
--- a/src/share/vm/classfile/dictionary.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/classfile/dictionary.cpp	Thu May 13 13:05:47 2010 -0700
@@ -561,10 +561,11 @@
 
 
 SymbolPropertyEntry* SymbolPropertyTable::find_entry(int index, unsigned int hash,
-                                                     symbolHandle sym) {
-  assert(index == index_for(sym), "incorrect index?");
+                                                     symbolHandle sym,
+                                                     intptr_t sym_mode) {
+  assert(index == index_for(sym, sym_mode), "incorrect index?");
   for (SymbolPropertyEntry* p = bucket(index); p != NULL; p = p->next()) {
-    if (p->hash() == hash && p->symbol() == sym()) {
+    if (p->hash() == hash && p->symbol() == sym() && p->symbol_mode() == sym_mode) {
       return p;
     }
   }
@@ -573,12 +574,12 @@
 
 
 SymbolPropertyEntry* SymbolPropertyTable::add_entry(int index, unsigned int hash,
-                                                    symbolHandle sym) {
+                                                    symbolHandle sym, intptr_t sym_mode) {
   assert_locked_or_safepoint(SystemDictionary_lock);
-  assert(index == index_for(sym), "incorrect index?");
-  assert(find_entry(index, hash, sym) == NULL, "no double entry");
+  assert(index == index_for(sym, sym_mode), "incorrect index?");
+  assert(find_entry(index, hash, sym, sym_mode) == NULL, "no double entry");
 
-  SymbolPropertyEntry* p = new_entry(hash, sym());
+  SymbolPropertyEntry* p = new_entry(hash, sym(), sym_mode);
   Hashtable::add_entry(index, p);
   return p;
 }
--- a/src/share/vm/classfile/dictionary.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/classfile/dictionary.hpp	Thu May 13 13:05:47 2010 -0700
@@ -223,12 +223,16 @@
 class SymbolPropertyEntry : public HashtableEntry {
   friend class VMStructs;
  private:
+  intptr_t _symbol_mode;  // secondary key
   oop     _property_oop;
   address _property_data;
 
  public:
   symbolOop symbol() const          { return (symbolOop) literal(); }
 
+  intptr_t symbol_mode() const      { return _symbol_mode; }
+  void set_symbol_mode(intptr_t m)  { _symbol_mode = m; }
+
   oop      property_oop() const     { return _property_oop; }
   void set_property_oop(oop p)      { _property_oop = p; }
 
@@ -248,6 +252,7 @@
 
   void print_on(outputStream* st) const {
     symbol()->print_value_on(st);
+    st->print("/mode="INTX_FORMAT, symbol_mode());
     st->print(" -> ");
     bool printed = false;
     if (property_oop() != NULL) {
@@ -285,8 +290,9 @@
     ShouldNotReachHere();
   }
 
-  SymbolPropertyEntry* new_entry(unsigned int hash, symbolOop symbol) {
+  SymbolPropertyEntry* new_entry(unsigned int hash, symbolOop symbol, intptr_t symbol_mode) {
     SymbolPropertyEntry* entry = (SymbolPropertyEntry*) Hashtable::new_entry(hash, symbol);
+    entry->set_symbol_mode(symbol_mode);
     entry->set_property_oop(NULL);
     entry->set_property_data(NULL);
     return entry;
@@ -300,16 +306,20 @@
     Hashtable::free_entry(entry);
   }
 
-  unsigned int compute_hash(symbolHandle sym) {
+  unsigned int compute_hash(symbolHandle sym, intptr_t symbol_mode) {
     // Use the regular identity_hash.
-    return Hashtable::compute_hash(sym);
+    return Hashtable::compute_hash(sym) ^ symbol_mode;
+  }
+
+  int index_for(symbolHandle name, intptr_t symbol_mode) {
+    return hash_to_index(compute_hash(name, symbol_mode));
   }
 
   // need not be locked; no state change
-  SymbolPropertyEntry* find_entry(int index, unsigned int hash, symbolHandle name);
+  SymbolPropertyEntry* find_entry(int index, unsigned int hash, symbolHandle name, intptr_t name_mode);
 
   // must be done under SystemDictionary_lock
-  SymbolPropertyEntry* add_entry(int index, unsigned int hash, symbolHandle name);
+  SymbolPropertyEntry* add_entry(int index, unsigned int hash, symbolHandle name, intptr_t name_mode);
 
   // GC support
   void oops_do(OopClosure* f);
--- a/src/share/vm/classfile/javaClasses.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/classfile/javaClasses.cpp	Thu May 13 13:05:47 2010 -0700
@@ -2446,24 +2446,20 @@
 
 // Support for java_dyn_CallSite
 
-int java_dyn_CallSite::_type_offset;
 int java_dyn_CallSite::_target_offset;
-int java_dyn_CallSite::_vmmethod_offset;
+int java_dyn_CallSite::_caller_method_offset;
+int java_dyn_CallSite::_caller_bci_offset;
 
 void java_dyn_CallSite::compute_offsets() {
   if (!EnableInvokeDynamic)  return;
   klassOop k = SystemDictionary::CallSite_klass();
   if (k != NULL) {
-    compute_offset(_type_offset,   k, vmSymbols::type_name(),   vmSymbols::java_dyn_MethodType_signature(), true);
-    compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_dyn_MethodHandle_signature(), true);
-    compute_offset(_vmmethod_offset, k, vmSymbols::vmmethod_name(), vmSymbols::object_signature(), true);
+    compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_dyn_MethodHandle_signature());
+    compute_offset(_caller_method_offset, k, vmSymbols::vmmethod_name(), vmSymbols::sun_dyn_MemberName_signature());
+    compute_offset(_caller_bci_offset, k, vmSymbols::vmindex_name(), vmSymbols::int_signature());
   }
 }
 
-oop java_dyn_CallSite::type(oop site) {
-  return site->obj_field(_type_offset);
-}
-
 oop java_dyn_CallSite::target(oop site) {
   return site->obj_field(_target_offset);
 }
@@ -2472,12 +2468,20 @@
   site->obj_field_put(_target_offset, target);
 }
 
-oop java_dyn_CallSite::vmmethod(oop site) {
-  return site->obj_field(_vmmethod_offset);
+oop java_dyn_CallSite::caller_method(oop site) {
+  return site->obj_field(_caller_method_offset);
+}
+
+void java_dyn_CallSite::set_caller_method(oop site, oop ref) {
+  site->obj_field_put(_caller_method_offset, ref);
 }
 
-void java_dyn_CallSite::set_vmmethod(oop site, oop ref) {
-  site->obj_field_put(_vmmethod_offset, ref);
+jint java_dyn_CallSite::caller_bci(oop site) {
+  return site->int_field(_caller_bci_offset);
+}
+
+void java_dyn_CallSite::set_caller_bci(oop site, jint bci) {
+  site->int_field_put(_caller_bci_offset, bci);
 }
 
 
--- a/src/share/vm/classfile/javaClasses.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/classfile/javaClasses.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1068,21 +1068,22 @@
   friend class JavaClasses;
 
 private:
-  static int _type_offset;
   static int _target_offset;
-  static int _vmmethod_offset;
+  static int _caller_method_offset;
+  static int _caller_bci_offset;
 
   static void compute_offsets();
 
 public:
   // Accessors
-  static oop            type(oop site);
-
   static oop            target(oop site);
   static void       set_target(oop site, oop target);
 
-  static oop            vmmethod(oop site);
-  static void       set_vmmethod(oop site, oop ref);
+  static oop            caller_method(oop site);
+  static void       set_caller_method(oop site, oop ref);
+
+  static jint           caller_bci(oop site);
+  static void       set_caller_bci(oop site, jint bci);
 
   // Testers
   static bool is_subclass(klassOop klass) {
@@ -1094,8 +1095,8 @@
 
   // Accessors for code generation:
   static int target_offset_in_bytes()           { return _target_offset; }
-  static int type_offset_in_bytes()             { return _type_offset; }
-  static int vmmethod_offset_in_bytes()         { return _vmmethod_offset; }
+  static int caller_method_offset_in_bytes()    { return _caller_method_offset; }
+  static int caller_bci_offset_in_bytes()       { return _caller_bci_offset; }
 };
 
 
--- a/src/share/vm/classfile/systemDictionary.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/classfile/systemDictionary.cpp	Thu May 13 13:05:47 2010 -0700
@@ -2341,118 +2341,150 @@
 }
 
 
-methodOop SystemDictionary::find_method_handle_invoke(symbolHandle signature,
-                                                      Handle class_loader,
-                                                      Handle protection_domain,
+methodOop SystemDictionary::find_method_handle_invoke(symbolHandle name,
+                                                      symbolHandle signature,
+                                                      KlassHandle accessing_klass,
                                                       TRAPS) {
   if (!EnableMethodHandles)  return NULL;
-  assert(class_loader.is_null() && protection_domain.is_null(),
-         "cannot load specialized versions of MethodHandle.invoke");
   if (invoke_method_table() == NULL) {
     // create this side table lazily
     _invoke_method_table = new SymbolPropertyTable(_invoke_method_size);
   }
-  unsigned int hash  = invoke_method_table()->compute_hash(signature);
+  vmSymbols::SID name_id = vmSymbols::find_sid(name());
+  assert(name_id != vmSymbols::NO_SID, "must be a known name");
+  unsigned int hash  = invoke_method_table()->compute_hash(signature, name_id);
   int          index = invoke_method_table()->hash_to_index(hash);
-  SymbolPropertyEntry* spe = invoke_method_table()->find_entry(index, hash, signature);
+  SymbolPropertyEntry* spe = invoke_method_table()->find_entry(index, hash, signature, name_id);
+  methodHandle non_cached_result;
   if (spe == NULL || spe->property_oop() == NULL) {
+    spe = NULL;
     // Must create lots of stuff here, but outside of the SystemDictionary lock.
     if (THREAD->is_Compiler_thread())
       return NULL;              // do not attempt from within compiler
-    Handle mt = compute_method_handle_type(signature(),
-                                           class_loader, protection_domain,
-                                           CHECK_NULL);
+    bool found_on_bcp = false;
+    Handle mt = find_method_handle_type(signature(), accessing_klass, found_on_bcp, CHECK_NULL);
     KlassHandle  mh_klass = SystemDictionaryHandles::MethodHandle_klass();
-    methodHandle m = methodOopDesc::make_invoke_method(mh_klass, signature,
+    methodHandle m = methodOopDesc::make_invoke_method(mh_klass, name, signature,
                                                        mt, CHECK_NULL);
     // Now grab the lock.  We might have to throw away the new method,
     // if a racing thread has managed to install one at the same time.
-    {
+    if (found_on_bcp) {
       MutexLocker ml(SystemDictionary_lock, Thread::current());
-      spe = invoke_method_table()->find_entry(index, hash, signature);
+      spe = invoke_method_table()->find_entry(index, hash, signature, name_id);
       if (spe == NULL)
-        spe = invoke_method_table()->add_entry(index, hash, signature);
+        spe = invoke_method_table()->add_entry(index, hash, signature, name_id);
       if (spe->property_oop() == NULL)
         spe->set_property_oop(m());
+    } else {
+      non_cached_result = m;
     }
   }
-  methodOop m = (methodOop) spe->property_oop();
-  assert(m->is_method(), "");
-  return m;
+  if (spe != NULL && spe->property_oop() != NULL) {
+    assert(spe->property_oop()->is_method(), "");
+    return (methodOop) spe->property_oop();
+  } else {
+    return non_cached_result();
+  }
 }
 
 // Ask Java code to find or construct a java.dyn.MethodType for the given
 // signature, as interpreted relative to the given class loader.
 // Because of class loader constraints, all method handle usage must be
 // consistent with this loader.
-Handle SystemDictionary::compute_method_handle_type(symbolHandle signature,
-                                                    Handle class_loader,
-                                                    Handle protection_domain,
-                                                    TRAPS) {
+Handle SystemDictionary::find_method_handle_type(symbolHandle signature,
+                                                 KlassHandle accessing_klass,
+                                                 bool& return_bcp_flag,
+                                                 TRAPS) {
+  Handle class_loader, protection_domain;
+  bool is_on_bcp = true;  // keep this true as long as we can materialize from the boot classloader
   Handle empty;
   int npts = ArgumentCount(signature()).size();
   objArrayHandle pts = oopFactory::new_objArray(SystemDictionary::Class_klass(), npts, CHECK_(empty));
   int arg = 0;
   Handle rt;                            // the return type from the signature
   for (SignatureStream ss(signature()); !ss.is_done(); ss.next()) {
-    oop mirror;
-    if (!ss.is_object()) {
-      mirror = Universe::java_mirror(ss.type());
-    } else {
-      symbolOop    name_oop = ss.as_symbol(CHECK_(empty));
-      symbolHandle name(THREAD, name_oop);
-      klassOop klass = resolve_or_fail(name,
-                                       class_loader, protection_domain,
-                                       true, CHECK_(empty));
-      mirror = Klass::cast(klass)->java_mirror();
+    oop mirror = NULL;
+    if (is_on_bcp) {
+      mirror = ss.as_java_mirror(class_loader, protection_domain,
+                                 SignatureStream::ReturnNull, CHECK_(empty));
+      if (mirror == NULL) {
+        // fall back from BCP to accessing_klass
+        if (accessing_klass.not_null()) {
+          class_loader      = Handle(THREAD, instanceKlass::cast(accessing_klass())->class_loader());
+          protection_domain = Handle(THREAD, instanceKlass::cast(accessing_klass())->protection_domain());
+        }
+        is_on_bcp = false;
+      }
+    }
+    if (!is_on_bcp) {
+      // Resolve, throwing a real error if it doesn't work.
+      mirror = ss.as_java_mirror(class_loader, protection_domain,
+                                 SignatureStream::NCDFError, CHECK_(empty));
     }
     if (ss.at_return_type())
       rt = Handle(THREAD, mirror);
     else
       pts->obj_at_put(arg++, mirror);
+    // Check accessibility.
+    if (ss.is_object() && accessing_klass.not_null()) {
+      klassOop sel_klass = java_lang_Class::as_klassOop(mirror);
+      // Emulate constantPoolOopDesc::verify_constant_pool_resolve.
+      if (Klass::cast(sel_klass)->oop_is_objArray())
+        sel_klass = objArrayKlass::cast(sel_klass)->bottom_klass();
+      if (Klass::cast(sel_klass)->oop_is_instance()) {
+        KlassHandle sel_kh(THREAD, sel_klass);
+        LinkResolver::check_klass_accessability(accessing_klass, sel_kh, CHECK_(empty));
+      }
+    }
   }
   assert(arg == npts, "");
 
-  // call MethodType java.dyn.MethodType::makeImpl(Class rt, Class[] pts, false, true)
-  bool varargs = false, trusted = true;
+  // call sun.dyn.MethodHandleNatives::findMethodType(Class rt, Class[] pts) -> MethodType
   JavaCallArguments args(Handle(THREAD, rt()));
   args.push_oop(pts());
-  args.push_int(false);
-  args.push_int(trusted);
   JavaValue result(T_OBJECT);
   JavaCalls::call_static(&result,
-                         SystemDictionary::MethodType_klass(),
-                         vmSymbols::makeImpl_name(), vmSymbols::makeImpl_signature(),
+                         SystemDictionary::MethodHandleNatives_klass(),
+                         vmSymbols::findMethodHandleType_name(),
+                         vmSymbols::findMethodHandleType_signature(),
                          &args, CHECK_(empty));
+
+  // report back to the caller with the MethodType and the "on_bcp" flag
+  return_bcp_flag = is_on_bcp;
   return Handle(THREAD, (oop) result.get_jobject());
 }
 
 
 // Ask Java code to find or construct a java.dyn.CallSite for the given
 // name and signature, as interpreted relative to the given class loader.
-Handle SystemDictionary::make_dynamic_call_site(KlassHandle caller,
-                                                int caller_method_idnum,
+Handle SystemDictionary::make_dynamic_call_site(Handle bootstrap_method,
+                                                symbolHandle name,
+                                                methodHandle signature_invoker,
+                                                Handle info,
+                                                methodHandle caller_method,
                                                 int caller_bci,
-                                                symbolHandle name,
-                                                methodHandle mh_invdyn,
                                                 TRAPS) {
   Handle empty;
-  // call java.dyn.CallSite::makeSite(caller, name, mtype, cmid, cbci)
+  Handle caller_mname = MethodHandles::new_MemberName(CHECK_(empty));
+  MethodHandles::init_MemberName(caller_mname(), caller_method());
+
+  // call sun.dyn.MethodHandleNatives::makeDynamicCallSite(bootm, name, mtype, info, caller_mname, caller_pos)
   oop name_str_oop = StringTable::intern(name(), CHECK_(empty)); // not a handle!
-  JavaCallArguments args(Handle(THREAD, caller->java_mirror()));
+  JavaCallArguments args(Handle(THREAD, bootstrap_method()));
   args.push_oop(name_str_oop);
-  args.push_oop(mh_invdyn->method_handle_type());
-  args.push_int(caller_method_idnum);
+  args.push_oop(signature_invoker->method_handle_type());
+  args.push_oop(info());
+  args.push_oop(caller_mname());
   args.push_int(caller_bci);
   JavaValue result(T_OBJECT);
   JavaCalls::call_static(&result,
-                         SystemDictionary::CallSite_klass(),
-                         vmSymbols::makeSite_name(), vmSymbols::makeSite_signature(),
+                         SystemDictionary::MethodHandleNatives_klass(),
+                         vmSymbols::makeDynamicCallSite_name(),
+                         vmSymbols::makeDynamicCallSite_signature(),
                          &args, CHECK_(empty));
   oop call_site_oop = (oop) result.get_jobject();
   assert(call_site_oop->is_oop()
          /*&& java_dyn_CallSite::is_instance(call_site_oop)*/, "must be sane");
-  java_dyn_CallSite::set_vmmethod(call_site_oop, mh_invdyn());
   if (TraceMethodHandles) {
 #ifndef PRODUCT
     tty->print_cr("Linked invokedynamic bci=%d site="INTPTR_FORMAT":", caller_bci, call_site_oop);
@@ -2463,9 +2495,7 @@
   return call_site_oop;
 }
 
-Handle SystemDictionary::find_bootstrap_method(KlassHandle caller,
-                                               KlassHandle search_bootstrap_klass,
-                                               TRAPS) {
+Handle SystemDictionary::find_bootstrap_method(KlassHandle caller, TRAPS) {
   Handle empty;
   if (!caller->oop_is_instance())  return empty;
 
@@ -2476,57 +2506,12 @@
     if (TraceMethodHandles) {
       tty->print_cr("bootstrap method for "PTR_FORMAT" cached as "PTR_FORMAT":", ik(), boot_method_oop);
     }
-    NOT_PRODUCT(if (!boot_method_oop->is_oop()) { tty->print_cr("*** boot MH of "PTR_FORMAT" = "PTR_FORMAT, ik(), boot_method_oop); ik()->print(); });
     assert(boot_method_oop->is_oop()
            && java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
     return Handle(THREAD, boot_method_oop);
   }
-  boot_method_oop = NULL;  // GC safety
 
-  // call java.dyn.Linkage::findBootstrapMethod(caller, sbk)
-  JavaCallArguments args(Handle(THREAD, ik->java_mirror()));
-  if (search_bootstrap_klass.is_null())
-    args.push_oop(Handle());
-  else
-    args.push_oop(search_bootstrap_klass->java_mirror());
-  JavaValue result(T_OBJECT);
-  JavaCalls::call_static(&result,
-                         SystemDictionary::Linkage_klass(),
-                         vmSymbols::findBootstrapMethod_name(),
-                         vmSymbols::findBootstrapMethod_signature(),
-                         &args, CHECK_(empty));
-  boot_method_oop = (oop) result.get_jobject();
-
-  if (boot_method_oop != NULL) {
-    if (TraceMethodHandles) {
-#ifndef PRODUCT
-      tty->print_cr("--------");
-      tty->print_cr("bootstrap method for "PTR_FORMAT" computed as "PTR_FORMAT":", ik(), boot_method_oop);
-      ik()->print();
-      boot_method_oop->print();
-      tty->print_cr("========");
-#endif //PRODUCT
-    }
-    assert(boot_method_oop->is_oop()
-           && java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
-    // probably no race conditions, but let's be careful:
-    if (Atomic::cmpxchg_ptr(boot_method_oop, ik->adr_bootstrap_method(), NULL) == NULL)
-      ik->set_bootstrap_method(boot_method_oop);
-    else
-      boot_method_oop = ik->bootstrap_method();
-  } else {
-    if (TraceMethodHandles) {
-#ifndef PRODUCT
-      tty->print_cr("--------");
-      tty->print_cr("bootstrap method for "PTR_FORMAT" computed as NULL:", ik());
-      ik()->print();
-      tty->print_cr("========");
-#endif //PRODUCT
-    }
-    boot_method_oop = ik->bootstrap_method();
-  }
-
-  return Handle(THREAD, boot_method_oop);
+  return empty;
 }
 
 // Since the identity hash code for symbols changes when the symbols are
--- a/src/share/vm/classfile/systemDictionary.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/classfile/systemDictionary.hpp	Thu May 13 13:05:47 2010 -0700
@@ -136,6 +136,7 @@
   template(MethodHandle_klass,           java_dyn_MethodHandle,          Opt) \
   template(MemberName_klass,             sun_dyn_MemberName,             Opt) \
   template(MethodHandleImpl_klass,       sun_dyn_MethodHandleImpl,       Opt) \
+  template(MethodHandleNatives_klass,    sun_dyn_MethodHandleNatives,    Opt) \
   template(AdapterMethodHandle_klass,    sun_dyn_AdapterMethodHandle,    Opt) \
   template(BoundMethodHandle_klass,      sun_dyn_BoundMethodHandle,      Opt) \
   template(DirectMethodHandle_klass,     sun_dyn_DirectMethodHandle,     Opt) \
@@ -463,29 +464,28 @@
 
   // JSR 292
   // find the java.dyn.MethodHandles::invoke method for a given signature
-  static methodOop find_method_handle_invoke(symbolHandle signature,
-                                             Handle class_loader,
-                                             Handle protection_domain,
+  static methodOop find_method_handle_invoke(symbolHandle name,
+                                             symbolHandle signature,
+                                             KlassHandle accessing_klass,
                                              TRAPS);
-  // ask Java to compute the java.dyn.MethodType object for a given signature
-  static Handle    compute_method_handle_type(symbolHandle signature,
-                                              Handle class_loader,
-                                              Handle protection_domain,
-                                              TRAPS);
+  // ask Java to compute a java.dyn.MethodType object for a given signature
+  static Handle    find_method_handle_type(symbolHandle signature,
+                                           KlassHandle accessing_klass,
+                                           bool& return_bcp_flag,
+                                           TRAPS);
   // ask Java to create a dynamic call site, while linking an invokedynamic op
-  static Handle    make_dynamic_call_site(KlassHandle caller,
-                                          int caller_method_idnum,
+  static Handle    make_dynamic_call_site(Handle bootstrap_method,
+                                          // Callee information:
+                                          symbolHandle name,
+                                          methodHandle signature_invoker,
+                                          Handle info,
+                                          // Caller information:
+                                          methodHandle caller_method,
                                           int caller_bci,
-                                          symbolHandle name,
-                                          methodHandle mh_invoke,
                                           TRAPS);
 
   // coordinate with Java about bootstrap methods
-  static Handle    find_bootstrap_method(KlassHandle caller,
-                                         // This argument is non-null only when a
-                                         // classfile attribute has been found:
-                                         KlassHandle search_bootstrap_klass,
-                                         TRAPS);
+  static Handle    find_bootstrap_method(KlassHandle caller, TRAPS);
 
   // Utility for printing loader "name" as part of tracing constraints
   static const char* loader_name(oop loader) {
--- a/src/share/vm/classfile/vmSymbols.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/classfile/vmSymbols.hpp	Thu May 13 13:05:47 2010 -0700
@@ -137,6 +137,7 @@
   template(java_lang_CloneNotSupportedException,      "java/lang/CloneNotSupportedException")     \
   template(java_lang_IllegalAccessException,          "java/lang/IllegalAccessException")         \
   template(java_lang_IllegalArgumentException,        "java/lang/IllegalArgumentException")       \
+  template(java_lang_IllegalStateException,           "java/lang/IllegalStateException")          \
   template(java_lang_IllegalMonitorStateException,    "java/lang/IllegalMonitorStateException")   \
   template(java_lang_IllegalThreadStateException,     "java/lang/IllegalThreadStateException")    \
   template(java_lang_IndexOutOfBoundsException,       "java/lang/IndexOutOfBoundsException")      \
@@ -201,6 +202,11 @@
   template(newField_signature,                        "(Lsun/reflect/FieldInfo;)Ljava/lang/reflect/Field;") \
   template(newMethod_name,                            "newMethod")                                \
   template(newMethod_signature,                       "(Lsun/reflect/MethodInfo;)Ljava/lang/reflect/Method;") \
+  /* the following two names must be in order: */                                                 \
+  template(invokeExact_name,                          "invokeExact")                              \
+  template(invokeGeneric_name,                        "invokeGeneric")                            \
+  template(invokeVarargs_name,                        "invokeVarargs")                            \
+  template(star_name,                                 "*") /*not really a name*/                  \
   template(invoke_name,                               "invoke")                                   \
   template(override_name,                             "override")                                 \
   template(parameterTypes_name,                       "parameterTypes")                           \
@@ -231,16 +237,17 @@
   template(java_dyn_MethodTypeForm,                   "java/dyn/MethodTypeForm")                  \
   template(java_dyn_MethodTypeForm_signature,         "Ljava/dyn/MethodTypeForm;")                \
   template(sun_dyn_MemberName,                        "sun/dyn/MemberName")                       \
+  template(sun_dyn_MemberName_signature,              "Lsun/dyn/MemberName;")                     \
   template(sun_dyn_MethodHandleImpl,                  "sun/dyn/MethodHandleImpl")                 \
+  template(sun_dyn_MethodHandleNatives,               "sun/dyn/MethodHandleNatives")              \
   template(sun_dyn_AdapterMethodHandle,               "sun/dyn/AdapterMethodHandle")              \
   template(sun_dyn_BoundMethodHandle,                 "sun/dyn/BoundMethodHandle")                \
   template(sun_dyn_DirectMethodHandle,                "sun/dyn/DirectMethodHandle")               \
-  template(makeImpl_name,                             "makeImpl") /*MethodType::makeImpl*/        \
-  template(makeImpl_signature,    "(Ljava/lang/Class;[Ljava/lang/Class;ZZ)Ljava/dyn/MethodType;") \
-  template(makeSite_name,                             "makeSite") /*CallSite::makeSite*/          \
-  template(makeSite_signature,    "(Ljava/lang/Class;Ljava/lang/String;Ljava/dyn/MethodType;II)Ljava/dyn/CallSite;") \
-  template(findBootstrapMethod_name,                  "findBootstrapMethod")                      \
-  template(findBootstrapMethod_signature, "(Ljava/lang/Class;Ljava/lang/Class;)Ljava/dyn/MethodHandle;") \
+  /* internal up-calls made only by the JVM, via class sun.dyn.MethodHandleNatives: */            \
+  template(findMethodHandleType_name,                 "findMethodHandleType")                     \
+  template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") \
+  template(makeDynamicCallSite_name,                  "makeDynamicCallSite")                      \
+  template(makeDynamicCallSite_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Lsun/dyn/MemberName;I)Ljava/dyn/CallSite;") \
   NOT_LP64(  do_alias(machine_word_signature,         int_signature)  )                           \
   LP64_ONLY( do_alias(machine_word_signature,         long_signature) )                           \
                                                                                                   \
@@ -408,8 +415,9 @@
   template(void_classloader_signature,                "()Ljava/lang/ClassLoader;")                                \
   template(void_object_signature,                     "()Ljava/lang/Object;")                                     \
   template(void_class_signature,                      "()Ljava/lang/Class;")                                      \
-  template(void_string_signature,                     "()Ljava/lang/String;")                                      \
-  template(object_array_object_object_signature,      "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;")\
+  template(void_string_signature,                     "()Ljava/lang/String;")                                     \
+  template(object_array_object_signature,             "([Ljava/lang/Object;)Ljava/lang/Object;")                  \
+  template(object_object_array_object_signature,      "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;")\
   template(exception_void_signature,                  "(Ljava/lang/Exception;)V")                                 \
   template(protectiondomain_signature,                "[Ljava/security/ProtectionDomain;")                        \
   template(accesscontrolcontext_signature,            "Ljava/security/AccessControlContext;")                     \
@@ -863,11 +871,15 @@
   do_intrinsic(_Object_init,              java_lang_Object, object_initializer_name, void_method_signature,        F_R)   \
   /*    (symbol object_initializer_name defined above) */                                                                 \
                                                                                                                           \
-  do_intrinsic(_invoke,                   java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \
+  do_intrinsic(_invoke,                   java_lang_reflect_Method, invoke_name, object_object_array_object_signature, F_R) \
   /*   (symbols invoke_name and invoke_signature defined above) */                                                      \
   do_intrinsic(_checkSpreadArgument,      sun_dyn_MethodHandleImpl, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) \
    do_name(    checkSpreadArgument_name,       "checkSpreadArgument")                                                   \
    do_name(    checkSpreadArgument_signature,  "(Ljava/lang/Object;I)V")                                                \
+  do_intrinsic(_invokeExact,              java_dyn_MethodHandle, invokeExact_name,   object_array_object_signature, F_RN) \
+  do_intrinsic(_invokeGeneric,            java_dyn_MethodHandle, invokeGeneric_name, object_array_object_signature, F_RN) \
+  do_intrinsic(_invokeVarargs,            java_dyn_MethodHandle, invokeVarargs_name, object_array_object_signature, F_R)  \
+  do_intrinsic(_invokeDynamic,            java_dyn_InvokeDynamic, star_name,         object_array_object_signature, F_SN) \
                                                                                                                         \
   /* unboxing methods: */                                                                                               \
   do_intrinsic(_booleanValue,             java_lang_Boolean,      booleanValue_name, void_boolean_signature, F_R)       \
--- a/src/share/vm/includeDB_core	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/includeDB_core	Thu May 13 13:05:47 2010 -0700
@@ -2867,6 +2867,7 @@
 methodHandles.hpp                       globals.hpp
 methodHandles.hpp                       interfaceSupport.hpp
 methodHandles.hpp                       javaClasses.hpp
+methodHandles.hpp                       no_precompiled_headers
 methodHandles.hpp                       vmSymbols.hpp
 
 methodHandles.cpp                       allocation.inline.hpp
@@ -2930,6 +2931,7 @@
 methodOop.cpp                           jvmtiExport.hpp
 methodOop.cpp                           klassOop.hpp
 methodOop.cpp                           methodDataOop.hpp
+methodOop.cpp                           methodHandleWalk.hpp
 methodOop.cpp                           methodOop.hpp
 methodOop.cpp                           nativeLookup.hpp
 methodOop.cpp                           oop.inline.hpp
@@ -4075,6 +4077,7 @@
 systemDictionary.cpp                    klass.inline.hpp
 systemDictionary.cpp                    loaderConstraints.hpp
 systemDictionary.cpp                    methodDataOop.hpp
+systemDictionary.cpp                    methodHandles.hpp
 systemDictionary.cpp                    mutexLocker.hpp
 systemDictionary.cpp                    objArrayKlass.hpp
 systemDictionary.cpp                    oop.inline.hpp
--- a/src/share/vm/includeDB_zero	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/includeDB_zero	Thu May 13 13:05:47 2010 -0700
@@ -61,6 +61,7 @@
 
 stack_<arch>.cpp                        interpreterRuntime.hpp
 stack_<arch>.cpp                        stack_<arch>.hpp
+stack_<arch>.cpp                        stack_<arch>.inline.hpp
 
 stubGenerator_<arch>.cpp                stack_<arch>.inline.hpp
 
--- a/src/share/vm/interpreter/abstractInterpreter.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/interpreter/abstractInterpreter.hpp	Thu May 13 13:05:47 2010 -0700
@@ -167,60 +167,15 @@
   // Debugging/printing
   static void       print();                                    // prints the interpreter code
 
-  // Support for Tagged Stacks
-  //
-  // Tags are stored on the Java Expression stack above the value:
-  //
-  //  tag
-  //  value
-  //
-  // For double values:
-  //
-  //  tag2
-  //  high word
-  //  tag1
-  //  low word
-
  public:
-  static int stackElementWords()   { return TaggedStackInterpreter ? 2 : 1; }
-  static int stackElementSize()    { return stackElementWords()*wordSize; }
-  static int logStackElementSize() { return
-                 TaggedStackInterpreter? LogBytesPerWord+1 : LogBytesPerWord; }
-
-  // Tag is at pointer, value is one below for a stack growing down
-  // (or above for stack growing up)
-  static int  value_offset_in_bytes()  {
-    return TaggedStackInterpreter ?
-      frame::interpreter_frame_expression_stack_direction() * wordSize : 0;
-  }
-  static int  tag_offset_in_bytes()    {
-    assert(TaggedStackInterpreter, "should not call this");
-    return 0;
-  }
-
-  // Tagged Locals
-  // Locals are stored relative to Llocals:
-  //
-  // tag    <- Llocals[n]
-  // value
-  //
-  // Category 2 types are indexed as:
-  //
-  // tag    <- Llocals[-n]
-  // high word
-  // tag    <- Llocals[-n+1]
-  // low word
-  //
+  // Interpreter helpers
+  const static int stackElementWords   = 1;
+  const static int stackElementSize    = stackElementWords * wordSize;
+  const static int logStackElementSize = LogBytesPerWord;
 
   // Local values relative to locals[n]
   static int  local_offset_in_bytes(int n) {
-    return ((frame::interpreter_frame_expression_stack_direction() * n) *
-            stackElementSize()) + value_offset_in_bytes();
-  }
-  static int  local_tag_offset_in_bytes(int n) {
-    assert(TaggedStackInterpreter, "should not call this");
-    return ((frame::interpreter_frame_expression_stack_direction() * n) *
-            stackElementSize()) + tag_offset_in_bytes();
+    return ((frame::interpreter_frame_expression_stack_direction() * n) * stackElementSize);
   }
 
   // access to stacked values according to type:
@@ -237,29 +192,15 @@
   static jlong long_in_slot(intptr_t* slot_addr) {
     if (sizeof(intptr_t) >= sizeof(jlong)) {
       return *(jlong*) slot_addr;
-    } else if (!TaggedStackInterpreter) {
+    } else {
       return Bytes::get_native_u8((address)slot_addr);
-    } else {
-      assert(sizeof(intptr_t) * 2 == sizeof(jlong), "ILP32");
-      // assemble the long in memory order (not arithmetic order)
-      union { jlong j; jint i[2]; } u;
-      u.i[0] = (jint) slot_addr[0*stackElementSize()];
-      u.i[1] = (jint) slot_addr[1*stackElementSize()];
-      return u.j;
     }
   }
   static void set_long_in_slot(intptr_t* slot_addr, jlong value) {
     if (sizeof(intptr_t) >= sizeof(jlong)) {
       *(jlong*) slot_addr = value;
-    } else if (!TaggedStackInterpreter) {
+    } else {
       Bytes::put_native_u8((address)slot_addr, value);
-    } else {
-      assert(sizeof(intptr_t) * 2 == sizeof(jlong), "ILP32");
-      // assemble the long in memory order (not arithmetic order)
-      union { jlong j; jint i[2]; } u;
-      u.j = value;
-      slot_addr[0*stackElementSize()] = (intptr_t) u.i[0];
-      slot_addr[1*stackElementSize()] = (intptr_t) u.i[1];
     }
   }
   static void get_jvalue_in_slot(intptr_t* slot_addr, BasicType type, jvalue* value) {
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -189,7 +189,7 @@
 
 // JavaStack Implementation
 #define MORE_STACK(count)  \
-    (topOfStack -= ((count) * Interpreter::stackElementWords()))
+    (topOfStack -= ((count) * Interpreter::stackElementWords))
 
 
 #define UPDATE_PC(opsize) {pc += opsize; }
@@ -1950,8 +1950,8 @@
         jint size = STACK_INT(-1);
         // stack grows down, dimensions are up!
         jint *dimarray =
-                   (jint*)&topOfStack[dims * Interpreter::stackElementWords()+
-                                      Interpreter::stackElementWords()-1];
+                   (jint*)&topOfStack[dims * Interpreter::stackElementWords+
+                                      Interpreter::stackElementWords-1];
         //adjust pointer to start of stack element
         CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
                 handle_exception);
@@ -2375,7 +2375,7 @@
     assert(except_oop(), "No exception to process");
     intptr_t continuation_bci;
     // expression stack is emptied
-    topOfStack = istate->stack_base() - Interpreter::stackElementWords();
+    topOfStack = istate->stack_base() - Interpreter::stackElementWords;
     CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()),
             handle_exception);
 
@@ -2692,219 +2692,141 @@
 // The implementations are platform dependent. We have to worry about alignment
 // issues on some machines which can change on the same platform depending on
 // whether it is an LP64 machine also.
-#ifdef ASSERT
-void BytecodeInterpreter::verify_stack_tag(intptr_t *tos, frame::Tag tag, int offset) {
-  if (TaggedStackInterpreter) {
-    frame::Tag t = (frame::Tag)tos[Interpreter::expr_tag_index_at(-offset)];
-    assert(t == tag, "stack tag mismatch");
-  }
-}
-#endif // ASSERT
-
 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) {
-  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
   return (address) tos[Interpreter::expr_index_at(-offset)];
 }
 
 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) {
-  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
   return *((jint*) &tos[Interpreter::expr_index_at(-offset)]);
 }
 
 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) {
-  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
   return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]);
 }
 
 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) {
-  debug_only(verify_stack_tag(tos, frame::TagReference, offset));
   return (oop)tos [Interpreter::expr_index_at(-offset)];
 }
 
 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) {
-  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
-  debug_only(verify_stack_tag(tos, frame::TagValue, offset-1));
   return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d;
 }
 
 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) {
-  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
-  debug_only(verify_stack_tag(tos, frame::TagValue, offset-1));
   return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l;
 }
 
-void BytecodeInterpreter::tag_stack(intptr_t *tos, frame::Tag tag, int offset) {
-  if (TaggedStackInterpreter)
-    tos[Interpreter::expr_tag_index_at(-offset)] = (intptr_t)tag;
-}
-
 // only used for value types
 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value,
                                                         int offset) {
-  tag_stack(tos, frame::TagValue, offset);
   *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value;
 }
 
 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value,
                                                        int offset) {
-  tag_stack(tos, frame::TagValue, offset);
   *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value;
 }
 
 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value,
                                                          int offset) {
-  tag_stack(tos, frame::TagValue, offset);
   *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value;
 }
 
 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value,
                                                           int offset) {
-  tag_stack(tos, frame::TagReference, offset);
   *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value;
 }
 
 // needs to be platform dep for the 32 bit platforms.
 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value,
                                                           int offset) {
-  tag_stack(tos, frame::TagValue, offset);
-  tag_stack(tos, frame::TagValue, offset-1);
   ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value;
 }
 
 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos,
                                               address addr, int offset) {
-  tag_stack(tos, frame::TagValue, offset);
-  tag_stack(tos, frame::TagValue, offset-1);
   (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d =
                         ((VMJavaVal64*)addr)->d);
 }
 
 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value,
                                                         int offset) {
-  tag_stack(tos, frame::TagValue, offset);
   ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
-  tag_stack(tos, frame::TagValue, offset-1);
   ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value;
 }
 
 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos,
                                             address addr, int offset) {
-  tag_stack(tos, frame::TagValue, offset);
   ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
-  tag_stack(tos, frame::TagValue, offset-1);
   ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l =
                         ((VMJavaVal64*)addr)->l;
 }
 
 // Locals
 
-#ifdef ASSERT
-void BytecodeInterpreter::verify_locals_tag(intptr_t *locals, frame::Tag tag,
-                                     int offset) {
-  if (TaggedStackInterpreter) {
-    frame::Tag t = (frame::Tag)locals[Interpreter::local_tag_index_at(-offset)];
-    assert(t == tag, "locals tag mismatch");
-  }
-}
-#endif // ASSERT
 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) {
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
   return (address)locals[Interpreter::local_index_at(-offset)];
 }
 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) {
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
   return (jint)locals[Interpreter::local_index_at(-offset)];
 }
 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) {
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
   return (jfloat)locals[Interpreter::local_index_at(-offset)];
 }
 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) {
-  debug_only(verify_locals_tag(locals, frame::TagReference, offset));
   return (oop)locals[Interpreter::local_index_at(-offset)];
 }
 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) {
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
   return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d;
 }
 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) {
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset+1));
   return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l;
 }
 
 // Returns the address of locals value.
 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) {
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset+1));
   return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
 }
 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) {
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset+1));
   return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
 }
 
-void BytecodeInterpreter::tag_locals(intptr_t *locals, frame::Tag tag, int offset) {
-  if (TaggedStackInterpreter)
-    locals[Interpreter::local_tag_index_at(-offset)] = (intptr_t)tag;
-}
-
 // Used for local value or returnAddress
 void BytecodeInterpreter::set_locals_slot(intptr_t *locals,
                                    address value, int offset) {
-  tag_locals(locals, frame::TagValue, offset);
   *((address*)&locals[Interpreter::local_index_at(-offset)]) = value;
 }
 void BytecodeInterpreter::set_locals_int(intptr_t *locals,
                                    jint value, int offset) {
-  tag_locals(locals, frame::TagValue, offset);
   *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value;
 }
 void BytecodeInterpreter::set_locals_float(intptr_t *locals,
                                    jfloat value, int offset) {
-  tag_locals(locals, frame::TagValue, offset);
   *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value;
 }
 void BytecodeInterpreter::set_locals_object(intptr_t *locals,
                                    oop value, int offset) {
-  tag_locals(locals, frame::TagReference, offset);
   *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value;
 }
 void BytecodeInterpreter::set_locals_double(intptr_t *locals,
                                    jdouble value, int offset) {
-  tag_locals(locals, frame::TagValue, offset);
-  tag_locals(locals, frame::TagValue, offset+1);
   ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value;
 }
 void BytecodeInterpreter::set_locals_long(intptr_t *locals,
                                    jlong value, int offset) {
-  tag_locals(locals, frame::TagValue, offset);
-  tag_locals(locals, frame::TagValue, offset+1);
   ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value;
 }
 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals,
                                    address addr, int offset) {
-  tag_locals(locals, frame::TagValue, offset);
-  tag_locals(locals, frame::TagValue, offset+1);
   ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d;
 }
 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals,
                                    address addr, int offset) {
-  tag_locals(locals, frame::TagValue, offset);
-  tag_locals(locals, frame::TagValue, offset+1);
   ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l;
 }
 
 void BytecodeInterpreter::astore(intptr_t* tos,    int stack_offset,
                           intptr_t* locals, int locals_offset) {
-  // Copy tag from stack to locals.  astore's operand can be returnAddress
-  // and may not be TagReference
-  if (TaggedStackInterpreter) {
-    frame::Tag t = (frame::Tag) tos[Interpreter::expr_tag_index_at(-stack_offset)];
-    locals[Interpreter::local_tag_index_at(-locals_offset)] = (intptr_t)t;
-  }
   intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)];
   locals[Interpreter::local_index_at(-locals_offset)] = value;
 }
@@ -2912,10 +2834,6 @@
 
 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset,
                                    int to_offset) {
-  if (TaggedStackInterpreter) {
-    tos[Interpreter::expr_tag_index_at(-to_offset)] =
-                      (intptr_t)tos[Interpreter::expr_tag_index_at(-from_offset)];
-  }
   tos[Interpreter::expr_index_at(-to_offset)] =
                       (intptr_t)tos[Interpreter::expr_index_at(-from_offset)];
 }
@@ -2964,16 +2882,9 @@
 void BytecodeInterpreter::swap(intptr_t *tos) {
   // swap top two elements
   intptr_t val = tos[Interpreter::expr_index_at(1)];
-  frame::Tag t;
-  if (TaggedStackInterpreter) {
-    t = (frame::Tag) tos[Interpreter::expr_tag_index_at(1)];
-  }
   // Copy -2 entry to -1
   copy_stack_slot(tos, -2, -1);
   // Store saved -1 entry into -2
-  if (TaggedStackInterpreter) {
-    tos[Interpreter::expr_tag_index_at(2)] = (intptr_t)t;
-  }
   tos[Interpreter::expr_index_at(2)] = val;
 }
 // --------------------------------------------------------------------------------
--- a/src/share/vm/interpreter/bytecodeInterpreter.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/interpreter/bytecodeInterpreter.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -510,8 +510,6 @@
 static jdouble stack_double(intptr_t *tos, int offset);
 static jlong stack_long(intptr_t *tos, int offset);
 
-static void tag_stack(intptr_t *tos, frame::Tag tag, int offset);
-
 // only used for value types
 static void set_stack_slot(intptr_t *tos, address value, int offset);
 static void set_stack_int(intptr_t *tos, int value, int offset);
@@ -537,8 +535,6 @@
 static address locals_long_at(intptr_t* locals, int offset);
 static address locals_double_at(intptr_t* locals, int offset);
 
-static void tag_locals(intptr_t *locals, frame::Tag tag, int offset);
-
 static void set_locals_slot(intptr_t *locals, address value, int offset);
 static void set_locals_int(intptr_t *locals, jint value, int offset);
 static void set_locals_float(intptr_t *locals, jfloat value, int offset);
@@ -557,8 +553,6 @@
 static void copy_stack_slot(intptr_t *tos, int from_offset, int to_offset);
 
 #ifndef PRODUCT
-static void verify_locals_tag(intptr_t *locals, frame::Tag tag, int offset);
-static void verify_stack_tag(intptr_t *tos, frame::Tag tag, int offset);
 static const char* C_msg(BytecodeInterpreter::messages msg);
 void print();
 #endif // PRODUCT
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Thu May 13 13:05:47 2010 -0700
@@ -691,24 +691,21 @@
 
   methodHandle caller_method(thread, method(thread));
 
-  // first determine if there is a bootstrap method
-  {
-    KlassHandle caller_klass(thread, caller_method->method_holder());
-    Handle bootm = SystemDictionary::find_bootstrap_method(caller_klass, KlassHandle(), CHECK);
-    if (bootm.is_null()) {
-      // If there is no bootstrap method, throw IncompatibleClassChangeError.
-      // This is a valid generic error type for resolution (JLS 12.3.3).
-      char buf[200];
-      jio_snprintf(buf, sizeof(buf), "Class %s has not declared a bootstrap method for invokedynamic",
-                   (Klass::cast(caller_klass()))->external_name());
-      THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
-    }
-  }
+  // first find the bootstrap method
+  KlassHandle caller_klass(thread, caller_method->method_holder());
+  Handle bootm = SystemDictionary::find_bootstrap_method(caller_klass, CHECK);
 
   constantPoolHandle pool(thread, caller_method->constants());
   pool->set_invokedynamic();    // mark header to flag active call sites
 
-  int site_index = four_byte_index(thread);
+  int caller_bci = 0;
+  int site_index = 0;
+  { address caller_bcp = bcp(thread);
+    caller_bci = caller_method->bci_from(caller_bcp);
+    site_index = Bytes::get_native_u4(caller_bcp+1);
+  }
+  assert(site_index == four_byte_index(thread), "");
+  assert(constantPoolCacheOopDesc::is_secondary_index(site_index), "proper format");
   // there is a second CPC entries that is of interest; it caches signature info:
   int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index();
 
@@ -732,23 +729,32 @@
   // The method (f2 entry) of the main entry is the MH.invoke for the
   // invokedynamic target call signature.
   intptr_t f2_value = pool->cache()->entry_at(main_index)->f2();
-  methodHandle mh_invdyn(THREAD, (methodOop) f2_value);
-  assert(mh_invdyn.not_null() && mh_invdyn->is_method() && mh_invdyn->is_method_handle_invoke(),
+  methodHandle signature_invoker(THREAD, (methodOop) f2_value);
+  assert(signature_invoker.not_null() && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(),
          "correct result from LinkResolver::resolve_invokedynamic");
 
   symbolHandle call_site_name(THREAD, pool->name_ref_at(site_index));
+
+  Handle info;  // NYI: Other metadata from a new kind of CP entry.  (Annotations?)
+
+  // this is the index which gets stored on the CallSite object (as "callerPosition"):
+  int call_site_position = constantPoolCacheOopDesc::decode_secondary_index(site_index);
+
   Handle call_site
-    = SystemDictionary::make_dynamic_call_site(caller_method->method_holder(),
-                                               caller_method->method_idnum(),
-                                               caller_method->bci_from(bcp(thread)),
+    = SystemDictionary::make_dynamic_call_site(bootm,
+                                               // Callee information:
                                                call_site_name,
-                                               mh_invdyn,
+                                               signature_invoker,
+                                               info,
+                                               // Caller information:
+                                               caller_method,
+                                               caller_bci,
                                                CHECK);
 
   // In the secondary entry, the f1 field is the call site, and the f2 (index)
-  // field is some data about the invoke site.
-  int extra_data = 0;
-  pool->cache()->secondary_entry_at(site_index)->set_dynamic_call(call_site(), extra_data);
+  // field is some data about the invoke site.  Currently, it is just the BCI.
+  // Later, it might be changed to help manage inlining dependencies.
+  pool->cache()->secondary_entry_at(site_index)->set_dynamic_call(call_site, signature_invoker);
 }
 IRT_END
 
@@ -1067,7 +1073,7 @@
   jlong_accessor u;
   jint* newval = (jint*)value;
   u.words[0] = newval[0];
-  u.words[1] = newval[Interpreter::stackElementWords()]; // skip if tag
+  u.words[1] = newval[Interpreter::stackElementWords]; // skip if tag
   fvalue.j = u.long_value;
 #endif // _LP64
 
@@ -1252,6 +1258,6 @@
   ArgumentSizeComputer asc(invoke->signature());
   int size_of_arguments = (asc.size() + (invoke->has_receiver() ? 1 : 0)); // receiver
   Copy::conjoint_bytes(src_address, dest_address,
-                       size_of_arguments * Interpreter::stackElementSize());
+                       size_of_arguments * Interpreter::stackElementSize);
 IRT_END
 #endif
--- a/src/share/vm/interpreter/linkResolver.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/interpreter/linkResolver.cpp	Thu May 13 13:05:47 2010 -0700
@@ -138,6 +138,15 @@
 
 void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS) {
   methodOop result_oop = klass->uncached_lookup_method(name(), signature());
+  if (EnableMethodHandles && result_oop != NULL) {
+    switch (result_oop->intrinsic_id()) {
+    case vmIntrinsics::_invokeExact:
+    case vmIntrinsics::_invokeGeneric:
+    case vmIntrinsics::_invokeDynamic:
+      // Do not link directly to these.  The VM must produce a synthetic one using lookup_implicit_method.
+      return;
+    }
+  }
   result = methodHandle(THREAD, result_oop);
 }
 
@@ -163,12 +172,16 @@
   result = methodHandle(THREAD, ik->lookup_method_in_all_interfaces(name(), signature()));
 }
 
-void LinkResolver::lookup_implicit_method(methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS) {
+void LinkResolver::lookup_implicit_method(methodHandle& result,
+                                          KlassHandle klass, symbolHandle name, symbolHandle signature,
+                                          KlassHandle current_klass,
+                                          TRAPS) {
   if (EnableMethodHandles && MethodHandles::enabled() &&
-      name == vmSymbolHandles::invoke_name() && klass() == SystemDictionary::MethodHandle_klass()) {
-    methodOop result_oop = SystemDictionary::find_method_handle_invoke(signature,
-                                                                       Handle(),
-                                                                       Handle(),
+      klass() == SystemDictionary::MethodHandle_klass() &&
+      methodOopDesc::is_method_handle_invoke_name(name())) {
+    methodOop result_oop = SystemDictionary::find_method_handle_invoke(name,
+                                                                       signature,
+                                                                       current_klass,
                                                                        CHECK);
     if (result_oop != NULL) {
       assert(result_oop->is_method_handle_invoke() && result_oop->signature() == signature(), "consistent");
@@ -239,7 +252,7 @@
   // The class is java.dyn.MethodHandle
   resolved_klass = SystemDictionaryHandles::MethodHandle_klass();
 
-  symbolHandle method_name = vmSymbolHandles::invoke_name();
+  symbolHandle method_name = vmSymbolHandles::invokeExact_name();
 
   symbolHandle method_signature(THREAD, pool->signature_ref_at(index));
   KlassHandle  current_klass   (THREAD, pool->pool_holder());
@@ -279,7 +292,7 @@
 
     if (resolved_method.is_null()) {
       // JSR 292:  see if this is an implicitly generated method MethodHandle.invoke(*...)
-      lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, CHECK);
+      lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, CHECK);
     }
 
     if (resolved_method.is_null()) {
@@ -1041,13 +1054,14 @@
 
   // At this point, we only need the signature, and can ignore the name.
   symbolHandle method_signature(THREAD, pool->signature_ref_at(raw_index));  // raw_index works directly
-  symbolHandle method_name = vmSymbolHandles::invoke_name();
+  symbolHandle method_name = vmSymbolHandles::invokeExact_name();
   KlassHandle resolved_klass = SystemDictionaryHandles::MethodHandle_klass();
 
-  // JSR 292:  this must be an implicitly generated method MethodHandle.invoke(*...)
+  // JSR 292:  this must be an implicitly generated method MethodHandle.invokeExact(*...)
   // The extra MH receiver will be inserted into the stack on every call.
   methodHandle resolved_method;
-  lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, CHECK);
+  KlassHandle current_klass(THREAD, pool->pool_holder());
+  lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, CHECK);
   if (resolved_method.is_null()) {
     THROW(vmSymbols::java_lang_InternalError());
   }
--- a/src/share/vm/interpreter/linkResolver.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/interpreter/linkResolver.hpp	Thu May 13 13:05:47 2010 -0700
@@ -103,7 +103,8 @@
   static void lookup_method_in_klasses          (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS);
   static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS);
   static void lookup_method_in_interfaces       (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS);
-  static void lookup_implicit_method            (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS);
+  static void lookup_implicit_method            (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature,
+                                                 KlassHandle current_klass, TRAPS);
 
   static int vtable_index_of_miranda_method(KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS);
 
--- a/src/share/vm/memory/universe.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/memory/universe.cpp	Thu May 13 13:05:47 2010 -0700
@@ -1045,7 +1045,7 @@
   k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_reflect_Method(), true, CHECK_false);
   k_h = instanceKlassHandle(THREAD, k);
   k_h->link_class(CHECK_false);
-  m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_array_object_object_signature());
+  m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_object_array_object_signature());
   if (m == NULL || m->is_static()) {
     THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
       "java.lang.reflect.Method.invoke", false);
--- a/src/share/vm/oops/cpCacheOop.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/oops/cpCacheOop.cpp	Thu May 13 13:05:47 2010 -0700
@@ -218,18 +218,19 @@
 }
 
 
-void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, int extra_data) {
-  methodOop method = (methodOop) java_dyn_CallSite::vmmethod(call_site());
-  assert(method->is_method(), "must be initialized properly");
-  int param_size = method->size_of_parameters();
+void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site,
+                                              methodHandle signature_invoker) {
+  int param_size = signature_invoker->size_of_parameters();
   assert(param_size >= 1, "method argument size must include MH.this");
   param_size -= 1;              // do not count MH.this; it is not stacked for invokedynamic
   if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) {
     // racing threads might be trying to install their own favorites
     set_f1(call_site());
   }
-  set_f2(extra_data);
-  set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | param_size);
+  //set_f2(0);
+  bool is_final = true;
+  assert(signature_invoker->is_final_method(), "is_final");
+  set_flags(as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size);
   // do not do set_bytecode on a secondary CP cache entry
   //set_bytecode_1(Bytecodes::_invokedynamic);
 }
--- a/src/share/vm/oops/cpCacheOop.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/oops/cpCacheOop.hpp	Thu May 13 13:05:47 2010 -0700
@@ -181,7 +181,7 @@
 
   void set_dynamic_call(
     Handle call_site,                            // Resolved java.dyn.CallSite (f1)
-    int extra_data                               // (f2)
+    methodHandle signature_invoker               // determines signature information
   );
 
   void set_parameter_size(int value) {
--- a/src/share/vm/oops/methodKlass.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/oops/methodKlass.cpp	Thu May 13 13:05:47 2010 -0700
@@ -236,8 +236,10 @@
   assert(obj->is_method(), "must be method");
   Klass::oop_print_on(obj, st);
   methodOop m = methodOop(obj);
+  // get the effect of PrintOopAddress, always, for methods:
+  st->print   (" - this oop:          "INTPTR_FORMAT, (intptr_t)m);
   st->print   (" - method holder:     ");    m->method_holder()->print_value_on(st); st->cr();
-  st->print   (" - constants:         " INTPTR_FORMAT, " ", (address)m->constants());
+  st->print   (" - constants:         "INTPTR_FORMAT" ", (address)m->constants());
   m->constants()->print_value_on(st); st->cr();
   st->print   (" - access:            0x%x  ", m->access_flags().as_int()); m->access_flags().print_on(st); st->cr();
   st->print   (" - name:              ");    m->name()->print_value_on(st); st->cr();
@@ -246,6 +248,10 @@
   st->print_cr(" - max locals:        %d",   m->max_locals());
   st->print_cr(" - size of params:    %d",   m->size_of_parameters());
   st->print_cr(" - method size:       %d",   m->method_size());
+  if (m->intrinsic_id() != vmIntrinsics::_none)
+    st->print_cr(" - intrinsic id:      %d %s", m->intrinsic_id(), vmIntrinsics::name_at(m->intrinsic_id()));
+  if (m->highest_tier_compile() != CompLevel_none)
+    st->print_cr(" - highest tier:      %d", m->highest_tier_compile());
   st->print_cr(" - vtable index:      %d",   m->_vtable_index);
   st->print_cr(" - i2i entry:         " INTPTR_FORMAT, m->interpreter_entry());
   st->print_cr(" - adapter:           " INTPTR_FORMAT, m->adapter());
--- a/src/share/vm/oops/methodOop.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/oops/methodOop.cpp	Thu May 13 13:05:47 2010 -0700
@@ -306,7 +306,7 @@
 
 int methodOopDesc::extra_stack_words() {
   // not an inline function, to avoid a header dependency on Interpreter
-  return extra_stack_entries() * Interpreter::stackElementSize();
+  return extra_stack_entries() * Interpreter::stackElementSize;
 }
 
 
@@ -807,9 +807,19 @@
   return false;
 }
 
+bool methodOopDesc::is_method_handle_invoke_name(vmSymbols::SID name_sid) {
+  switch (name_sid) {
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name):  // FIXME: remove this transitional form
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name):
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name):
+    return true;
+  }
+  return false;
+}
+
 // Constant pool structure for invoke methods:
 enum {
-  _imcp_invoke_name = 1,        // utf8: 'invoke'
+  _imcp_invoke_name = 1,        // utf8: 'invokeExact' or 'invokeGeneric'
   _imcp_invoke_signature,       // utf8: (variable symbolOop)
   _imcp_method_type_value,      // string: (variable java/dyn/MethodType, sic)
   _imcp_limit
@@ -839,14 +849,15 @@
 //
 // Tests if this method is an internal adapter frame from the
 // MethodHandleCompiler.
+// Must be consistent with MethodHandleCompiler::get_method_oop().
 bool methodOopDesc::is_method_handle_adapter() const {
-  return ((name() == vmSymbols::invoke_name() &&
-           method_holder() == SystemDictionary::MethodHandle_klass())
-          ||
-          method_holder() == SystemDictionary::InvokeDynamic_klass());
+  return (is_method_handle_invoke_name(name()) &&
+          is_synthetic() &&
+          MethodHandleCompiler::klass_is_method_handle_adapter_holder(method_holder()));
 }
 
 methodHandle methodOopDesc::make_invoke_method(KlassHandle holder,
+                                               symbolHandle name,
                                                symbolHandle signature,
                                                Handle method_type, TRAPS) {
   methodHandle empty;
@@ -865,7 +876,7 @@
     constantPoolOop cp_oop = oopFactory::new_constantPool(_imcp_limit, IsSafeConc, CHECK_(empty));
     cp = constantPoolHandle(THREAD, cp_oop);
   }
-  cp->symbol_at_put(_imcp_invoke_name,       vmSymbols::invoke_name());
+  cp->symbol_at_put(_imcp_invoke_name,       name());
   cp->symbol_at_put(_imcp_invoke_signature,  signature());
   cp->string_at_put(_imcp_method_type_value, vmSymbols::void_signature());
   cp->set_pool_holder(holder());
@@ -882,7 +893,7 @@
   m->set_constants(cp());
   m->set_name_index(_imcp_invoke_name);
   m->set_signature_index(_imcp_invoke_signature);
-  assert(m->name() == vmSymbols::invoke_name(), "");
+  assert(is_method_handle_invoke_name(m->name()), "");
   assert(m->signature() == signature(), "");
 #ifdef CC_INTERP
   ResultTypeFinder rtf(signature());
@@ -1033,6 +1044,24 @@
       id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
       break;
     }
+    break;
+
+  // Signature-polymorphic methods: MethodHandle.invoke*, InvokeDynamic.*.
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_MethodHandle):
+    if (is_static() || !is_native())  break;
+    switch (name_id) {
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name):
+      id = vmIntrinsics::_invokeGeneric; break;
+    default:
+      if (is_method_handle_invoke_name(name()))
+        id = vmIntrinsics::_invokeExact;
+      break;
+    }
+    break;
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_InvokeDynamic):
+    if (!is_static() || !is_native())  break;
+    id = vmIntrinsics::_invokeDynamic;
+    break;
   }
 
   if (id != vmIntrinsics::_none) {
@@ -1114,6 +1143,20 @@
     return ( a < b ? -1 : (a == b ? 0 : 1));
   }
 
+  // We implement special compare versions for narrow oops to avoid
+  // testing for UseCompressedOops on every comparison.
+  static int method_compare_narrow(narrowOop* a, narrowOop* b) {
+    methodOop m = (methodOop)oopDesc::load_decode_heap_oop(a);
+    methodOop n = (methodOop)oopDesc::load_decode_heap_oop(b);
+    return m->name()->fast_compare(n->name());
+  }
+
+  static int method_compare_narrow_idempotent(narrowOop* a, narrowOop* b) {
+    int i = method_compare_narrow(a, b);
+    if (i != 0) return i;
+    return ( a < b ? -1 : (a == b ? 0 : 1));
+  }
+
   typedef int (*compareFn)(const void*, const void*);
 }
 
@@ -1166,7 +1209,7 @@
 
     // Use a simple bubble sort for small number of methods since
     // qsort requires a functional pointer call for each comparison.
-    if (UseCompressedOops || length < 8) {
+    if (length < 8) {
       bool sorted = true;
       for (int i=length-1; i>0; i--) {
         for (int j=0; j<i; j++) {
@@ -1182,10 +1225,10 @@
           sorted = true;
       }
     } else {
-      // XXX This doesn't work for UseCompressedOops because the compare fn
-      // will have to decode the methodOop anyway making it not much faster
-      // than above.
-      compareFn compare = (compareFn) (idempotent ? method_compare_idempotent : method_compare);
+      compareFn compare =
+        (UseCompressedOops ?
+         (compareFn) (idempotent ? method_compare_narrow_idempotent : method_compare_narrow):
+         (compareFn) (idempotent ? method_compare_idempotent : method_compare));
       qsort(methods->base(), length, heapOopSize, compare);
     }
 
--- a/src/share/vm/oops/methodOop.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/oops/methodOop.hpp	Thu May 13 13:05:47 2010 -0700
@@ -525,11 +525,16 @@
 
   // JSR 292 support
   bool is_method_handle_invoke() const              { return access_flags().is_method_handle_invoke(); }
+  static bool is_method_handle_invoke_name(vmSymbols::SID name_sid);
+  static bool is_method_handle_invoke_name(symbolOop name) {
+    return is_method_handle_invoke_name(vmSymbols::find_sid(name));
+  }
   // Tests if this method is an internal adapter frame from the
   // MethodHandleCompiler.
   bool is_method_handle_adapter() const;
   static methodHandle make_invoke_method(KlassHandle holder,
-                                         symbolHandle signature,
+                                         symbolHandle name, //invokeExact or invokeGeneric
+                                         symbolHandle signature, //anything at all
                                          Handle method_type,
                                          TRAPS);
   // these operate only on invoke methods:
--- a/src/share/vm/opto/bytecodeInfo.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Thu May 13 13:05:47 2010 -0700
@@ -477,12 +477,7 @@
   }
   int new_depth_adjust = 0;
   if (caller_jvms->method() != NULL) {
-    if ((caller_jvms->method()->name() == ciSymbol::invoke_name() &&
-         caller_jvms->method()->holder()->name() == ciSymbol::java_dyn_MethodHandle())
-        || caller_jvms->method()->holder()->name() == ciSymbol::java_dyn_InvokeDynamic())
-      /* @@@ FIXME:
     if (caller_jvms->method()->is_method_handle_adapter())
-      */
       new_depth_adjust -= 1;  // don't count actions in MH or indy adapter frames
     else if (callee_method->is_method_handle_invoke()) {
       new_depth_adjust -= 1;  // don't count method handle calls from java.dyn implem
--- a/src/share/vm/opto/library_call.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/opto/library_call.cpp	Thu May 13 13:05:47 2010 -0700
@@ -809,8 +809,7 @@
   Node* no_ctrl = NULL;
 
   ciInstanceKlass* klass = env()->String_klass();
-  const TypeInstPtr* string_type =
-        TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
+  const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
 
   const TypeAryPtr* value_type =
         TypeAryPtr::make(TypePtr::NotNull,
@@ -883,8 +882,7 @@
   }
 
   ciInstanceKlass* klass = env()->String_klass();
-  const TypeInstPtr* string_type =
-    TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
+  const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
   Node* no_ctrl = NULL;
 
   // Get counts for string and argument
@@ -958,14 +956,16 @@
     }
   }
 
-  const TypeInstPtr* string_type =
-    TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
+  const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
 
   Node* no_ctrl = NULL;
   Node* receiver_cnt;
   Node* argument_cnt;
 
   if (!stopped()) {
+    // Properly cast the argument to String
+    argument = _gvn.transform(new (C, 2) CheckCastPPNode(control(), argument, string_type));
+
     // Get counts for string and argument
     Node* receiver_cnta = basic_plus_adr(receiver, receiver, count_offset);
     receiver_cnt  = make_load(no_ctrl, receiver_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset));
@@ -1090,7 +1090,7 @@
   const int offset_offset = java_lang_String::offset_offset_in_bytes();
 
   ciInstanceKlass* klass = env()->String_klass();
-  const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
+  const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
   const TypeAryPtr*  source_type = TypeAryPtr::make(TypePtr::NotNull, TypeAry::make(TypeInt::CHAR,TypeInt::POS), ciTypeArrayKlass::make(T_CHAR), true, 0);
 
   Node* sourceOffseta = basic_plus_adr(string_object, string_object, offset_offset);
@@ -1175,7 +1175,9 @@
   Node *receiver = pop();
 
   Node* result;
-  if (Matcher::has_match_rule(Op_StrIndexOf) &&
+  // Disable the use of pcmpestri until it can be guaranteed that
+  // the load doesn't cross into the uncommited space.
+  if (false && Matcher::has_match_rule(Op_StrIndexOf) &&
       UseSSE42Intrinsics) {
     // Generate SSE4.2 version of indexOf
     // We currently only have match rules that use SSE4.2
@@ -1199,8 +1201,7 @@
     Node* no_ctrl  = NULL;
 
     ciInstanceKlass* klass = env()->String_klass();
-    const TypeInstPtr* string_type =
-      TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
+    const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
 
     // Get counts for string and substr
     Node* source_cnta = basic_plus_adr(receiver, receiver, count_offset);
--- a/src/share/vm/prims/methodHandleWalk.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/prims/methodHandleWalk.cpp	Thu May 13 13:05:47 2010 -0700
@@ -1173,9 +1173,9 @@
   // has no receiver, normal MH calls do.
   int flags_bits;
   if (for_invokedynamic())
-    flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL | JVM_ACC_STATIC);
+    flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL | JVM_ACC_SYNTHETIC | JVM_ACC_STATIC);
   else
-    flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL);
+    flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL | JVM_ACC_SYNTHETIC);
 
   bool is_conc_safe = true;
   methodOop m_oop = oopFactory::new_method(bytecode_length(),
@@ -1217,6 +1217,7 @@
   }
 #endif //PRODUCT
 
+  assert(m->is_method_handle_adapter(), "must be recognized as an adapter");
   return m;
 }
 
--- a/src/share/vm/prims/methodHandles.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/prims/methodHandles.cpp	Thu May 13 13:05:47 2010 -0700
@@ -366,6 +366,13 @@
   VM_INDEX_UNINITIALIZED = sun_dyn_MemberName::VM_INDEX_UNINITIALIZED
 };
 
+Handle MethodHandles::new_MemberName(TRAPS) {
+  Handle empty;
+  instanceKlassHandle k(THREAD, SystemDictionary::MemberName_klass());
+  if (!k->is_initialized())  k->initialize(CHECK_(empty));
+  return Handle(THREAD, k->allocate_instance(THREAD));
+}
+
 void MethodHandles::init_MemberName(oop mname_oop, oop target_oop) {
   if (target_oop->klass() == SystemDictionary::reflect_Field_klass()) {
     oop clazz = java_lang_reflect_Field::clazz(target_oop); // fd.field_holder()
@@ -394,16 +401,18 @@
   sun_dyn_MemberName::set_vmtarget(mname_oop, vmtarget);
   sun_dyn_MemberName::set_vmindex(mname_oop,  vmindex);
   sun_dyn_MemberName::set_flags(mname_oop,    flags);
+  sun_dyn_MemberName::set_clazz(mname_oop,    Klass::cast(m->method_holder())->java_mirror());
 }
 
 void MethodHandles::init_MemberName(oop mname_oop, klassOop field_holder, AccessFlags mods, int offset) {
   int flags = (IS_FIELD | (jushort)( mods.as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS ));
   oop vmtarget = field_holder;
-  int vmindex  = offset;  // implies no info yet
+  int vmindex  = offset;  // determines the field uniquely when combined with static bit
   assert(vmindex != VM_INDEX_UNINITIALIZED, "bad alias on vmindex");
   sun_dyn_MemberName::set_vmtarget(mname_oop, vmtarget);
   sun_dyn_MemberName::set_vmindex(mname_oop,  vmindex);
   sun_dyn_MemberName::set_flags(mname_oop,    flags);
+  sun_dyn_MemberName::set_clazz(mname_oop,    Klass::cast(field_holder)->java_mirror());
 }
 
 
@@ -466,16 +475,25 @@
   if (name.is_null())  return;  // no such name
   name_str = NULL;  // safety
 
+  Handle polymorphic_method_type;
+  bool polymorphic_signature = false;
+  if ((flags & ALL_KINDS) == IS_METHOD &&
+      (defc() == SystemDictionary::InvokeDynamic_klass() ||
+       (defc() == SystemDictionary::MethodHandle_klass() &&
+        methodOopDesc::is_method_handle_invoke_name(name()))))
+    polymorphic_signature = true;
+
   // convert the external string or reflective type to an internal signature
-  bool force_signature = (name() == vmSymbols::invoke_name());
   symbolHandle type; {
     symbolOop type_sym = NULL;
     if (java_dyn_MethodType::is_instance(type_str)) {
-      type_sym = java_dyn_MethodType::as_signature(type_str, force_signature, CHECK);
+      type_sym = java_dyn_MethodType::as_signature(type_str, polymorphic_signature, CHECK);
+      if (polymorphic_signature)
+        polymorphic_method_type = Handle(THREAD, type_str);  //preserve exactly
     } else if (java_lang_Class::is_instance(type_str)) {
-      type_sym = java_lang_Class::as_signature(type_str, force_signature, CHECK);
+      type_sym = java_lang_Class::as_signature(type_str, false, CHECK);
     } else if (java_lang_String::is_instance(type_str)) {
-      if (force_signature) {
+      if (polymorphic_signature) {
         type     = java_lang_String::as_symbol(type_str, CHECK);
       } else {
         type_sym = java_lang_String::as_symbol_or_null(type_str);
@@ -508,7 +526,7 @@
         }
         if (HAS_PENDING_EXCEPTION) {
           CLEAR_PENDING_EXCEPTION;
-          return;
+          break;  // go to second chance
         }
       }
       methodHandle m = result.resolved_method();
@@ -582,8 +600,42 @@
       sun_dyn_MemberName::set_modifiers(mname(), mods);
       return;
     }
+  default:
+    THROW_MSG(vmSymbols::java_lang_InternalError(), "unrecognized MemberName format");
   }
-  THROW_MSG(vmSymbols::java_lang_InternalError(), "unrecognized MemberName format");
+
+  // Second chance.
+  if (polymorphic_method_type.not_null()) {
+    // Look on a non-null class loader.
+    Handle cur_class_loader;
+    const int nptypes = java_dyn_MethodType::ptype_count(polymorphic_method_type());
+    for (int i = 0; i <= nptypes; i++) {
+      oop type_mirror;
+      if (i < nptypes)  type_mirror = java_dyn_MethodType::ptype(polymorphic_method_type(), i);
+      else              type_mirror = java_dyn_MethodType::rtype(polymorphic_method_type());
+      klassOop example_type = java_lang_Class::as_klassOop(type_mirror);
+      if (example_type == NULL)  continue;
+      oop class_loader = Klass::cast(example_type)->class_loader();
+      if (class_loader == NULL || class_loader == cur_class_loader())  continue;
+      cur_class_loader = Handle(THREAD, class_loader);
+      methodOop m = SystemDictionary::find_method_handle_invoke(name,
+                                                                type,
+                                                                KlassHandle(THREAD, example_type),
+                                                                THREAD);
+      if (HAS_PENDING_EXCEPTION) {
+        CLEAR_PENDING_EXCEPTION;
+        m = NULL;
+        // try again with a different class loader...
+      }
+      if (m != NULL) {
+        int mods = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS);
+        sun_dyn_MemberName::set_vmtarget(mname(),  m);
+        sun_dyn_MemberName::set_vmindex(mname(),   m->vtable_index());
+        sun_dyn_MemberName::set_modifiers(mname(), mods);
+        return;
+      }
+    }
+  }
 }
 
 // Conversely, a member name which is only initialized from JVM internals
@@ -775,6 +827,20 @@
 }
 
 
+// Decode this java.lang.Class object into an instanceKlass, if possible.
+// Throw IAE if not
+instanceKlassHandle MethodHandles::resolve_instance_klass(oop java_mirror_oop, TRAPS) {
+  instanceKlassHandle empty;
+  klassOop caller = NULL;
+  if (java_lang_Class::is_instance(java_mirror_oop)) {
+    caller = java_lang_Class::as_klassOop(java_mirror_oop);
+  }
+  if (caller == NULL || !Klass::cast(caller)->oop_is_instance()) {
+    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), "not a class", empty);
+  }
+  return instanceKlassHandle(THREAD, caller);
+}
+
 
 
 // Decode the vmtarget field of a method handle.
@@ -970,6 +1036,13 @@
       pnum += 1;
       mnum += 1;
     }
+    klassOop  pklass = NULL;
+    BasicType ptype  = T_OBJECT;
+    if (ptype_oop != NULL)
+      ptype = java_lang_Class::as_BasicType(ptype_oop, &pklass);
+    else
+      // null does not match any non-reference; use Object to report the error
+      pklass = SystemDictionary::Object_klass();
     klassOop  mklass = NULL;
     BasicType mtype  = ss.type();
     if (mtype == T_ARRAY)  mtype = T_OBJECT; // fold all refs to T_OBJECT
@@ -978,21 +1051,22 @@
         // null matches any reference
         continue;
       }
+      KlassHandle pklass_handle(THREAD, pklass); pklass = NULL;
       // If we fail to resolve types at this point, we will throw an error.
       symbolOop    name_oop = ss.as_symbol(CHECK);
       symbolHandle name(THREAD, name_oop);
       instanceKlass* mk = instanceKlass::cast(m->method_holder());
       Handle loader(THREAD, mk->class_loader());
       Handle domain(THREAD, mk->protection_domain());
-      mklass = SystemDictionary::resolve_or_fail(name, loader, domain,
-                                                 true, CHECK);
+      mklass = SystemDictionary::resolve_or_null(name, loader, domain, CHECK);
+      pklass = pklass_handle();
+      if (mklass == NULL && pklass != NULL &&
+          Klass::cast(pklass)->name() == name() &&
+          m->is_method_handle_invoke()) {
+        // Assume a match.  We can't really decode the signature of MH.invoke*.
+        continue;
+      }
     }
-    if (ptype_oop == NULL) {
-      // null does not match any non-reference; use Object to report the error
-      ptype_oop = object_java_mirror();
-    }
-    klassOop  pklass = NULL;
-    BasicType ptype  = java_lang_Class::as_BasicType(ptype_oop, &pklass);
     if (!ss.at_return_type()) {
       err = check_argument_type_change(ptype, pklass, mtype, mklass, mnum);
     } else {
@@ -2115,31 +2189,26 @@
     KlassHandle caller(THREAD, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(caller_jh)));
     // If this were a bytecode, the first access check would be against
     // the "reference class" mentioned in the CONSTANT_Methodref.
-    // For that class, we use the defining class of m,
-    // or a more specific receiver limit if available.
-    klassOop reference_klass = m->method_holder();  // OK approximation
-    if (receiver_limit != NULL && receiver_limit != reference_klass) {
-      if (!Klass::cast(receiver_limit)->is_subtype_of(reference_klass))
-        THROW_MSG(vmSymbols::java_lang_InternalError(), "receiver limit out of bounds");  // Java code bug
-      reference_klass = receiver_limit;
-    }
-    // Emulate LinkResolver::check_klass_accessability.
-    if (!Reflection::verify_class_access(caller->as_klassOop(),
-                                         reference_klass,
-                                         true)) {
-      THROW_MSG(vmSymbols::java_lang_InternalError(), Klass::cast(m->method_holder())->external_name());
-    }
+    // We don't know at this point which class that was, and if we
+    // check against m.method_holder we might get the wrong answer.
+    // So we just make sure to handle this check when the resolution
+    // happens, when we call resolve_MemberName.
+    //
+    // (A public class can inherit public members from private supers,
+    // and it would be wrong to check access against the private super
+    // if the original symbolic reference was against the public class.)
+    //
     // If there were a bytecode, the next step would be to lookup the method
     // in the reference class, then then check the method's access bits.
     // Emulate LinkResolver::check_method_accessability.
     klassOop resolved_klass = m->method_holder();
     if (!Reflection::verify_field_access(caller->as_klassOop(),
-                                         resolved_klass, reference_klass,
+                                         resolved_klass, resolved_klass,
                                          m->access_flags(),
                                          true)) {
       // %%% following cutout belongs in Reflection::verify_field_access?
       bool same_pm = Reflection::is_same_package_member(caller->as_klassOop(),
-                                                        reference_klass, THREAD);
+                                                        resolved_klass, THREAD);
       if (!same_pm) {
         THROW_MSG(vmSymbols::java_lang_InternalError(), m->name_and_sig_as_C_string());
       }
@@ -2244,6 +2313,8 @@
   case MethodHandles::GC_JVM_STACK_MOVE_UNIT:
     // return number of words per slot, signed according to stack direction
     return MethodHandles::stack_move_unit();
+  case MethodHandles::GC_CONV_OP_IMPLEMENTED_MASK:
+    return MethodHandles::adapter_conversion_ops_supported_mask();
   }
   return 0;
 }
@@ -2342,7 +2413,22 @@
 JVM_ENTRY(void, MHI_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh)) {
   if (mname_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
   Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh));
-  // %%% take caller into account!
+
+  // The trusted Java code that calls this method should already have performed
+  // access checks on behalf of the given caller.  But, we can verify this.
+  if (VerifyMethodHandles && caller_jh != NULL) {
+    klassOop reference_klass = java_lang_Class::as_klassOop(sun_dyn_MemberName::clazz(mname()));
+    if (reference_klass != NULL) {
+      // Emulate LinkResolver::check_klass_accessability.
+      klassOop caller = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(caller_jh));
+      if (!Reflection::verify_class_access(caller,
+                                           reference_klass,
+                                           true)) {
+        THROW_MSG(vmSymbols::java_lang_InternalError(), Klass::cast(reference_klass)->external_name());
+      }
+    }
+  }
+
   MethodHandles::resolve_MemberName(mname, CHECK);
 }
 JVM_END
@@ -2387,12 +2473,48 @@
 }
 JVM_END
 
+JVM_ENTRY(void, MHI_registerBootstrap(JNIEnv *env, jobject igcls, jclass caller_jh, jobject bsm_jh)) {
+  instanceKlassHandle ik = MethodHandles::resolve_instance_klass(caller_jh, THREAD);
+  ik->link_class(CHECK);
+  if (!java_dyn_MethodHandle::is_instance(JNIHandles::resolve(bsm_jh))) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "method handle");
+  }
+  const char* err = NULL;
+  if (ik->is_initialized() || ik->is_in_error_state()) {
+    err = "too late: class is already initialized";
+  } else {
+    ObjectLocker ol(ik, THREAD);  // note:  this should be a recursive lock
+    if (ik->is_not_initialized() ||
+        (ik->is_being_initialized() && ik->is_reentrant_initialization(THREAD))) {
+      if (ik->bootstrap_method() != NULL) {
+        err = "class is already equipped with a bootstrap method";
+      } else {
+        ik->set_bootstrap_method(JNIHandles::resolve_non_null(bsm_jh));
+        err = NULL;
+      }
+    } else {
+      err = "class is already initialized";
+      if (ik->is_being_initialized())
+        err = "class is already being initialized in a different thread";
+    }
+  }
+  if (err != NULL) {
+    THROW_MSG(vmSymbols::java_lang_IllegalStateException(), err);
+  }
+}
+JVM_END
 
-JVM_ENTRY(void, MH_linkCallSite(JNIEnv *env, jobject igcls, jobject site_jh, jobject target_jh)) {
+JVM_ENTRY(jobject, MHI_getBootstrap(JNIEnv *env, jobject igcls, jclass caller_jh)) {
+  instanceKlassHandle ik = MethodHandles::resolve_instance_klass(caller_jh, THREAD);
+  return JNIHandles::make_local(THREAD, ik->bootstrap_method());
+}
+JVM_END
+
+JVM_ENTRY(void, MHI_setCallSiteTarget(JNIEnv *env, jobject igcls, jobject site_jh, jobject target_jh)) {
   // No special action required, yet.
   oop site_oop = JNIHandles::resolve(site_jh);
-  if (site_oop == NULL || site_oop->klass() != SystemDictionary::CallSite_klass())
-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "call site");
+  if (!java_dyn_CallSite::is_instance(site_oop))
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "not a CallSite");
   java_dyn_CallSite::set_target(site_oop, JNIHandles::resolve(target_jh));
 }
 JVM_END
@@ -2442,7 +2564,9 @@
 
 // More entry points specifically for EnableInvokeDynamic.
 static JNINativeMethod methods2[] = {
-  {CC"linkCallSite",            CC"("CST MH")V",                FN_PTR(MH_linkCallSite)}
+  {CC"registerBootstrap",       CC"("CLS MH")V",                FN_PTR(MHI_registerBootstrap)},
+  {CC"getBootstrap",            CC"("CLS")"MH,                  FN_PTR(MHI_getBootstrap)},
+  {CC"setCallSiteTarget",       CC"("CST MH")V",                FN_PTR(MHI_setCallSiteTarget)}
 };
 
 
--- a/src/share/vm/prims/methodHandles.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/prims/methodHandles.hpp	Thu May 13 13:05:47 2010 -0700
@@ -163,7 +163,7 @@
     default: ShouldNotReachHere();
     }
     // Return the size of the stack slots to move in bytes.
-    swap_bytes = swap_slots * Interpreter::stackElementSize();
+    swap_bytes = swap_slots * Interpreter::stackElementSize;
   }
 
   static int get_ek_adapter_opt_spread_info(EntryKind ek) {
@@ -216,10 +216,13 @@
     return (conv >> CONV_VMINFO_SHIFT) & CONV_VMINFO_MASK;
   }
 
+  // Bit mask of conversion_op values.  May vary by platform.
+  static int adapter_conversion_ops_supported_mask();
+
   // Offset in words that the interpreter stack pointer moves when an argument is pushed.
   // The stack_move value must always be a multiple of this.
   static int stack_move_unit() {
-    return frame::interpreter_frame_expression_stack_direction() * Interpreter::stackElementWords();
+    return frame::interpreter_frame_expression_stack_direction() * Interpreter::stackElementWords;
   }
 
   enum { CONV_VMINFO_SIGN_FLAG = 0x80 };
@@ -262,8 +265,9 @@
   // working with member names
   static void resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type
   static void expand_MemberName(Handle mname, int suppress, TRAPS);  // expand defc/name/type if missing
+  static Handle new_MemberName(TRAPS);  // must be followed by init_MemberName
   static void init_MemberName(oop mname_oop, oop target); // compute vmtarget/vmindex from target
-  static void init_MemberName(oop mname_oop, methodOop m, bool do_dispatch);
+  static void init_MemberName(oop mname_oop, methodOop m, bool do_dispatch = true);
   static void init_MemberName(oop mname_oop, klassOop field_holder, AccessFlags mods, int offset);
   static int find_MemberNames(klassOop k, symbolOop name, symbolOop sig,
                               int mflags, klassOop caller,
@@ -300,6 +304,7 @@
     // format of query to getConstant:
     GC_JVM_PUSH_LIMIT = 0,
     GC_JVM_STACK_MOVE_UNIT = 1,
+    GC_CONV_OP_IMPLEMENTED_MASK = 2,
 
     // format of result from getTarget / encode_target:
     ETF_HANDLE_OR_METHOD_NAME = 0, // all available data (immediate MH or method)
@@ -311,6 +316,11 @@
   static oop encode_target(Handle mh, int format, TRAPS); // report vmtarget (to Java code)
   static bool class_cast_needed(klassOop src, klassOop dst);
 
+  static instanceKlassHandle resolve_instance_klass(oop    java_mirror_oop, TRAPS);
+  static instanceKlassHandle resolve_instance_klass(jclass java_mirror_jh,  TRAPS) {
+    return resolve_instance_klass(JNIHandles::resolve(java_mirror_jh), THREAD);
+  }
+
  private:
   // These checkers operate on a pair of whole MethodTypes:
   static const char* check_method_type_change(oop src_mtype, int src_beg, int src_end,
@@ -430,12 +440,12 @@
                                RegisterOrConstant arg_slots,
                                int arg_mask,
                                Register argslot_reg,
-                               Register temp_reg, Register temp2_reg);
+                               Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
 
   static void remove_arg_slots(MacroAssembler* _masm,
                                RegisterOrConstant arg_slots,
                                Register argslot_reg,
-                               Register temp_reg, Register temp2_reg);
+                               Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
 };
 
 
--- a/src/share/vm/runtime/arguments.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Thu May 13 13:05:47 2010 -0700
@@ -2867,12 +2867,6 @@
   }
 #endif // _LP64
 
-  // MethodHandles code does not support TaggedStackInterpreter.
-  if (EnableMethodHandles && TaggedStackInterpreter) {
-    warning("TaggedStackInterpreter is not supported by MethodHandles code.  Disabling TaggedStackInterpreter.");
-    TaggedStackInterpreter = false;
-  }
-
   // Check the GC selections again.
   if (!check_gc_consistency()) {
     return JNI_EINVAL;
@@ -2915,11 +2909,6 @@
   LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
 #endif // CC_INTERP
 
-#ifdef ZERO
-  // Clear flags not supported by Zero
-  FLAG_SET_DEFAULT(TaggedStackInterpreter, false);
-#endif // ZERO
-
 #ifdef COMPILER2
   if (!UseBiasedLocking || EmitSync != 0) {
     UseOptoBiasInlining = false;
--- a/src/share/vm/runtime/frame.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/runtime/frame.cpp	Thu May 13 13:05:47 2010 -0700
@@ -468,42 +468,16 @@
   return &((*interpreter_frame_locals_addr())[n]);
 }
 
-frame::Tag frame::interpreter_frame_local_tag(int index) const {
-  const int n = Interpreter::local_tag_offset_in_bytes(index)/wordSize;
-  return (Tag)(*interpreter_frame_locals_addr()) [n];
-}
-
-void frame::interpreter_frame_set_local_tag(int index, Tag tag) const {
-  const int n = Interpreter::local_tag_offset_in_bytes(index)/wordSize;
-  (*interpreter_frame_locals_addr())[n] = (intptr_t)tag;
-}
-
 intptr_t* frame::interpreter_frame_expression_stack_at(jint offset) const {
   const int i = offset * interpreter_frame_expression_stack_direction();
-  const int n = ((i * Interpreter::stackElementSize()) +
-                 Interpreter::value_offset_in_bytes())/wordSize;
+  const int n = i * Interpreter::stackElementWords;
   return &(interpreter_frame_expression_stack()[n]);
 }
 
-frame::Tag frame::interpreter_frame_expression_stack_tag(jint offset) const {
-  const int i = offset * interpreter_frame_expression_stack_direction();
-  const int n = ((i * Interpreter::stackElementSize()) +
-                 Interpreter::tag_offset_in_bytes())/wordSize;
-  return (Tag)(interpreter_frame_expression_stack()[n]);
-}
-
-void frame::interpreter_frame_set_expression_stack_tag(jint offset,
-                                                       Tag tag) const {
-  const int i = offset * interpreter_frame_expression_stack_direction();
-  const int n = ((i * Interpreter::stackElementSize()) +
-                 Interpreter::tag_offset_in_bytes())/wordSize;
-  interpreter_frame_expression_stack()[n] = (intptr_t)tag;
-}
-
 jint frame::interpreter_frame_expression_stack_size() const {
   // Number of elements on the interpreter expression stack
   // Callers should span by stackElementWords
-  int element_size = Interpreter::stackElementWords();
+  int element_size = Interpreter::stackElementWords;
   if (frame::interpreter_frame_expression_stack_direction() < 0) {
     return (interpreter_frame_expression_stack() -
             interpreter_frame_tos_address() + 1)/element_size;
@@ -585,20 +559,12 @@
   for (i = 0; i < interpreter_frame_method()->max_locals(); i++ ) {
     intptr_t x = *interpreter_frame_local_at(i);
     st->print(" - local  [" INTPTR_FORMAT "]", x);
-    if (TaggedStackInterpreter) {
-      Tag x = interpreter_frame_local_tag(i);
-      st->print(" - local tag [" INTPTR_FORMAT "]", x);
-    }
     st->fill_to(23);
     st->print_cr("; #%d", i);
   }
   for (i = interpreter_frame_expression_stack_size() - 1; i >= 0; --i ) {
     intptr_t x = *interpreter_frame_expression_stack_at(i);
     st->print(" - stack  [" INTPTR_FORMAT "]", x);
-    if (TaggedStackInterpreter) {
-      Tag x = interpreter_frame_expression_stack_tag(i);
-      st->print(" - stack tag [" INTPTR_FORMAT "]", x);
-    }
     st->fill_to(23);
     st->print_cr("; #%d", i);
   }
@@ -950,103 +916,19 @@
     }
   }
 
-  if (TaggedStackInterpreter) {
-    // process locals & expression stack
-    InterpreterOopMap *mask = NULL;
-#ifdef ASSERT
-    InterpreterOopMap oopmap_mask;
-    OopMapCache::compute_one_oop_map(m, bci, &oopmap_mask);
-    mask = &oopmap_mask;
-#endif // ASSERT
-    oops_interpreted_locals_do(f, max_locals, mask);
-    oops_interpreted_expressions_do(f, signature, has_receiver,
-                                    m->max_stack(),
-                                    max_locals, mask);
+  InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f);
+
+  // process locals & expression stack
+  InterpreterOopMap mask;
+  if (query_oop_map_cache) {
+    m->mask_for(bci, &mask);
   } else {
-    InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f);
-
-    // process locals & expression stack
-    InterpreterOopMap mask;
-    if (query_oop_map_cache) {
-      m->mask_for(bci, &mask);
-    } else {
-      OopMapCache::compute_one_oop_map(m, bci, &mask);
-    }
-    mask.iterate_oop(&blk);
+    OopMapCache::compute_one_oop_map(m, bci, &mask);
   }
+  mask.iterate_oop(&blk);
 }
 
 
-void frame::oops_interpreted_locals_do(OopClosure *f,
-                                      int max_locals,
-                                      InterpreterOopMap *mask) {
-  // Process locals then interpreter expression stack
-  for (int i = 0; i < max_locals; i++ ) {
-    Tag tag = interpreter_frame_local_tag(i);
-    if (tag == TagReference) {
-      oop* addr = (oop*) interpreter_frame_local_at(i);
-      assert((intptr_t*)addr >= sp(), "must be inside the frame");
-      f->do_oop(addr);
-#ifdef ASSERT
-    } else {
-      assert(tag == TagValue, "bad tag value for locals");
-      oop* p = (oop*) interpreter_frame_local_at(i);
-      // Not always true - too bad.  May have dead oops without tags in locals.
-      // assert(*p == NULL || !(*p)->is_oop(), "oop not tagged on interpreter locals");
-      assert(*p == NULL || !mask->is_oop(i), "local oop map mismatch");
-#endif // ASSERT
-    }
-  }
-}
-
-void frame::oops_interpreted_expressions_do(OopClosure *f,
-                                      symbolHandle signature,
-                                      bool has_receiver,
-                                      int max_stack,
-                                      int max_locals,
-                                      InterpreterOopMap *mask) {
-  // There is no stack no matter what the esp is pointing to (native methods
-  // might look like expression stack is nonempty).
-  if (max_stack == 0) return;
-
-  // Point the top of the expression stack above arguments to a call so
-  // arguments aren't gc'ed as both stack values for callee and callee
-  // arguments in callee's locals.
-  int args_size = 0;
-  if (!signature.is_null()) {
-    args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0);
-  }
-
-  intptr_t *tos_addr = interpreter_frame_tos_at(args_size);
-  assert(args_size != 0 || tos_addr == interpreter_frame_tos_address(), "these are same");
-  intptr_t *frst_expr = interpreter_frame_expression_stack_at(0);
-  // In case of exceptions, the expression stack is invalid and the esp
-  // will be reset to express this condition. Therefore, we call f only
-  // if addr is 'inside' the stack (i.e., addr >= esp for Intel).
-  bool in_stack;
-  if (interpreter_frame_expression_stack_direction() > 0) {
-    in_stack = (intptr_t*)frst_expr <= tos_addr;
-  } else {
-    in_stack = (intptr_t*)frst_expr >= tos_addr;
-  }
-  if (!in_stack) return;
-
-  jint stack_size = interpreter_frame_expression_stack_size() - args_size;
-  for (int j = 0; j < stack_size; j++) {
-    Tag tag = interpreter_frame_expression_stack_tag(j);
-    if (tag == TagReference) {
-      oop *addr = (oop*) interpreter_frame_expression_stack_at(j);
-      f->do_oop(addr);
-#ifdef ASSERT
-    } else {
-      assert(tag == TagValue, "bad tag value for stack element");
-      oop *p = (oop*) interpreter_frame_expression_stack_at((j));
-      assert(*p == NULL || !mask->is_oop(j+max_locals), "stack oop map mismatch");
-#endif // ASSERT
-    }
-  }
-}
-
 void frame::oops_interpreted_arguments_do(symbolHandle signature, bool has_receiver, OopClosure* f) {
   InterpretedArgumentOopFinder finder(signature, has_receiver, this, f);
   finder.oops_do();
@@ -1306,29 +1188,18 @@
 
   int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
 
-  if (TaggedStackInterpreter) {
-    InterpreterOopMap *mask = NULL;
-#ifdef ASSERT
-    InterpreterOopMap oopmap_mask;
-    methodHandle method(thread, m);
-    OopMapCache::compute_one_oop_map(method, bci, &oopmap_mask);
-    mask = &oopmap_mask;
-#endif // ASSERT
-    oops_interpreted_locals_do(&_check_oop, max_locals, mask);
-  } else {
-    // process dynamic part
-    InterpreterFrameClosure value_blk(this, max_locals, m->max_stack(),
-                                      &_check_value);
-    InterpreterFrameClosure   oop_blk(this, max_locals, m->max_stack(),
-                                      &_check_oop  );
-    InterpreterFrameClosure  dead_blk(this, max_locals, m->max_stack(),
-                                      &_zap_dead   );
+  // process dynamic part
+  InterpreterFrameClosure value_blk(this, max_locals, m->max_stack(),
+                                    &_check_value);
+  InterpreterFrameClosure   oop_blk(this, max_locals, m->max_stack(),
+                                    &_check_oop  );
+  InterpreterFrameClosure  dead_blk(this, max_locals, m->max_stack(),
+                                    &_zap_dead   );
 
-    // get frame map
-    InterpreterOopMap mask;
-    m->mask_for(bci, &mask);
-    mask.iterate_all( &oop_blk, &value_blk, &dead_blk);
-  }
+  // get frame map
+  InterpreterOopMap mask;
+  m->mask_for(bci, &mask);
+  mask.iterate_all( &oop_blk, &value_blk, &dead_blk);
 }
 
 
--- a/src/share/vm/runtime/frame.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/runtime/frame.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -191,26 +191,10 @@
   intptr_t*  interpreter_frame_mdx_addr() const;
 
  public:
-  // Tags for TaggedStackInterpreter
-  enum Tag {
-      TagValue = 0,          // Important: must be zero to use G0 on sparc.
-      TagReference = 0x555,  // Reference type - is an oop that needs gc.
-      TagCategory2 = 0x666   // Only used internally by interpreter
-                             // and not written to the java stack.
-      // The values above are chosen so that misuse causes a crash
-      // with a recognizable value.
-  };
-
-  static Tag tag_for_basic_type(BasicType typ) {
-    return (typ == T_OBJECT ? TagReference : TagValue);
-  }
-
   // Locals
 
   // The _at version returns a pointer because the address is used for GC.
   intptr_t* interpreter_frame_local_at(int index) const;
-  Tag       interpreter_frame_local_tag(int index) const;
-  void      interpreter_frame_set_local_tag(int index, Tag tag) const;
 
   void interpreter_frame_set_locals(intptr_t* locs);
 
@@ -260,8 +244,6 @@
 
   // The _at version returns a pointer because the address is used for GC.
   intptr_t* interpreter_frame_expression_stack_at(jint offset) const;
-  Tag       interpreter_frame_expression_stack_tag(jint offset) const;
-  void      interpreter_frame_set_expression_stack_tag(jint offset, Tag tag) const;
 
   // top of expression stack
   intptr_t* interpreter_frame_tos_at(jint offset) const;
@@ -375,12 +357,6 @@
   void oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache = true);
 
  private:
-  void oops_interpreted_locals_do(OopClosure *f,
-                                 int max_locals,
-                                 InterpreterOopMap *mask);
-  void oops_interpreted_expressions_do(OopClosure *f, symbolHandle signature,
-                                 bool has_receiver, int max_stack, int max_locals,
-                                 InterpreterOopMap *mask);
   void oops_interpreted_arguments_do(symbolHandle signature, bool has_receiver, OopClosure* f);
 
   // Iteration of oops
--- a/src/share/vm/runtime/globals.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/runtime/globals.hpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3510,9 +3510,6 @@
   develop(bool, TraceInvokeDynamic, false,                                  \
           "trace internal invoke dynamic operations")                       \
                                                                             \
-  product(bool, TaggedStackInterpreter, false,                              \
-          "Insert tags in interpreter execution stack for oopmap generaion")\
-                                                                            \
   diagnostic(bool, PauseAtStartup,      false,                              \
           "Causes the VM to pause at startup time and wait for the pause "  \
           "file to be removed (default: ./vm.paused.<pid>)")                \
--- a/src/share/vm/runtime/javaCalls.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/runtime/javaCalls.cpp	Thu May 13 13:05:47 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -417,17 +417,9 @@
       // Handle conversion
       _value[i] = (intptr_t)Handle::raw_resolve((oop *)_value[i]);
     }
-    // The parameters are moved to the parameters array to include the tags.
-    if (TaggedStackInterpreter) {
-      // Tags are interspersed with arguments.  Tags are first.
-      int tagged_index = i*2;
-      _parameters[tagged_index]   = _is_oop[i] ? frame::TagReference :
-                                                 frame::TagValue;
-      _parameters[tagged_index+1] = _value[i];
-    }
   }
   // Return argument vector
-  return TaggedStackInterpreter ? _parameters : _value;
+  return _value;
 }
 
 
--- a/src/share/vm/runtime/javaCalls.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/runtime/javaCalls.hpp	Thu May 13 13:05:47 2010 -0700
@@ -66,11 +66,9 @@
   };
 
   intptr_t    _value_buffer [_default_size + 1];
-  intptr_t    _parameter_buffer [_default_size*2 + 1];
   bool        _is_oop_buffer[_default_size + 1];
 
   intptr_t*   _value;
-  intptr_t*   _parameters;
   bool*       _is_oop;
   int         _size;
   int         _max_size;
@@ -81,7 +79,6 @@
     _value    = &_value_buffer[1];
     _is_oop   = &_is_oop_buffer[1];
 
-    _parameters = &_parameter_buffer[0];
     _max_size = _default_size;
     _size = 0;
     _start_at_zero = false;
@@ -99,11 +96,10 @@
     if (max_size > _default_size) {
       _value  = NEW_RESOURCE_ARRAY(intptr_t, max_size + 1);
       _is_oop = NEW_RESOURCE_ARRAY(bool, max_size + 1);
-      if (TaggedStackInterpreter) {
-        _parameters  = NEW_RESOURCE_ARRAY(intptr_t, max_size*2 + 1);
-      }
+
       // Reserve room for potential receiver in value and is_oop
       _value++; _is_oop++;
+
       _max_size = max_size;
       _size = 0;
       _start_at_zero = false;
--- a/src/share/vm/runtime/sharedRuntime.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Thu May 13 13:05:47 2010 -0700
@@ -1557,7 +1557,7 @@
     methodOop actual_method = MethodHandles::decode_method(actual,
                                                           kignore, fignore);
     if (actual_method != NULL) {
-      if (actual_method->name() == vmSymbols::invoke_name())
+      if (methodOopDesc::is_method_handle_invoke_name(actual_method->name()))
         mhName = "$";
       else
         mhName = actual_method->signature()->as_C_string();
@@ -1842,14 +1842,11 @@
 
       case T_OBJECT:
       case T_ARRAY:
-        if (!TaggedStackInterpreter) {
 #ifdef _LP64
-          return T_LONG;
+        return T_LONG;
 #else
-          return T_INT;
+        return T_INT;
 #endif
-        }
-        return T_OBJECT;
 
       case T_INT:
       case T_LONG:
@@ -2595,17 +2592,9 @@
   // Copy the locals.  Order is preserved so that loading of longs works.
   // Since there's no GC I can copy the oops blindly.
   assert( sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
-  if (TaggedStackInterpreter) {
-    for (int i = 0; i < max_locals; i++) {
-      // copy only each local separately to the buffer avoiding the tag
-      buf[i] = *fr.interpreter_frame_local_at(max_locals-i-1);
-    }
-  } else {
-    Copy::disjoint_words(
-                       (HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
+  Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
                        (HeapWord*)&buf[0],
                        max_locals);
-  }
 
   // Inflate locks.  Copy the displaced headers.  Be careful, there can be holes.
   int i = max_locals;
--- a/src/share/vm/runtime/signature.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/runtime/signature.cpp	Thu May 13 13:05:47 2010 -0700
@@ -327,6 +327,26 @@
   return result;
 }
 
+klassOop SignatureStream::as_klass(Handle class_loader, Handle protection_domain,
+                                   FailureMode failure_mode, TRAPS) {
+  if (!is_object())  return NULL;
+  symbolOop name = as_symbol(CHECK_NULL);
+  if (failure_mode == ReturnNull) {
+    return SystemDictionary::resolve_or_null(name, class_loader, protection_domain, THREAD);
+  } else {
+    bool throw_error = (failure_mode == NCDFError);
+    return SystemDictionary::resolve_or_fail(name, class_loader, protection_domain, throw_error, THREAD);
+  }
+}
+
+oop SignatureStream::as_java_mirror(Handle class_loader, Handle protection_domain,
+                                    FailureMode failure_mode, TRAPS) {
+  if (!is_object())
+    return Universe::java_mirror(type());
+  klassOop klass = as_klass(class_loader, protection_domain, failure_mode, CHECK_NULL);
+  if (klass == NULL)  return NULL;
+  return Klass::cast(klass)->java_mirror();
+}
 
 symbolOop SignatureStream::as_symbol_or_null() {
   // Create a symbol from for string _begin _end
--- a/src/share/vm/runtime/signature.hpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/runtime/signature.hpp	Thu May 13 13:05:47 2010 -0700
@@ -402,6 +402,9 @@
   bool is_array() const;                         // True if this argument is an array
   BasicType type() const                         { return _type; }
   symbolOop as_symbol(TRAPS);
+  enum FailureMode { ReturnNull, CNFException, NCDFError };
+  klassOop as_klass(Handle class_loader, Handle protection_domain, FailureMode failure_mode, TRAPS);
+  oop as_java_mirror(Handle class_loader, Handle protection_domain, FailureMode failure_mode, TRAPS);
 
   // return same as_symbol except allocation of new symbols is avoided.
   symbolOop as_symbol_or_null();
--- a/src/share/vm/runtime/vframe.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/runtime/vframe.cpp	Thu May 13 13:05:47 2010 -0700
@@ -244,51 +244,30 @@
   StackValueCollection* result = new StackValueCollection(length);
 
   // Get oopmap describing oops and int for current bci
-  if (TaggedStackInterpreter) {
-    for(int i=0; i < length; i++) {
-      // Find stack location
-      intptr_t *addr = locals_addr_at(i);
-
-      // Depending on oop/int put it in the right package
-      StackValue *sv;
-      frame::Tag tag = fr().interpreter_frame_local_tag(i);
-      if (tag == frame::TagReference) {
-        // oop value
-        Handle h(*(oop *)addr);
-        sv = new StackValue(h);
-      } else {
-        // integer
-        sv = new StackValue(*addr);
-      }
-      assert(sv != NULL, "sanity check");
-      result->add(sv);
-    }
+  InterpreterOopMap oop_mask;
+  if (TraceDeoptimization && Verbose) {
+    methodHandle m_h(thread(), method());
+    OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
   } else {
-    InterpreterOopMap oop_mask;
-    if (TraceDeoptimization && Verbose) {
-      methodHandle m_h(thread(), method());
-      OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
-    } else {
-      method()->mask_for(bci(), &oop_mask);
-    }
-    // handle locals
-    for(int i=0; i < length; i++) {
-      // Find stack location
-      intptr_t *addr = locals_addr_at(i);
+    method()->mask_for(bci(), &oop_mask);
+  }
+  // handle locals
+  for(int i=0; i < length; i++) {
+    // Find stack location
+    intptr_t *addr = locals_addr_at(i);
 
-      // Depending on oop/int put it in the right package
-      StackValue *sv;
-      if (oop_mask.is_oop(i)) {
-        // oop value
-        Handle h(*(oop *)addr);
-        sv = new StackValue(h);
-      } else {
-        // integer
-        sv = new StackValue(*addr);
-      }
-      assert(sv != NULL, "sanity check");
-      result->add(sv);
+    // Depending on oop/int put it in the right package
+    StackValue *sv;
+    if (oop_mask.is_oop(i)) {
+      // oop value
+      Handle h(*(oop *)addr);
+      sv = new StackValue(h);
+    } else {
+      // integer
+      sv = new StackValue(*addr);
     }
+    assert(sv != NULL, "sanity check");
+    result->add(sv);
   }
   return result;
 }
@@ -331,53 +310,31 @@
   int nof_locals = method()->max_locals();
   StackValueCollection* result = new StackValueCollection(length);
 
-  if (TaggedStackInterpreter) {
-    // handle expressions
-    for(int i=0; i < length; i++) {
-      // Find stack location
-      intptr_t *addr = fr().interpreter_frame_expression_stack_at(i);
-      frame::Tag tag = fr().interpreter_frame_expression_stack_tag(i);
-
-      // Depending on oop/int put it in the right package
-      StackValue *sv;
-      if (tag == frame::TagReference) {
-        // oop value
-        Handle h(*(oop *)addr);
-        sv = new StackValue(h);
-      } else {
-        // otherwise
-        sv = new StackValue(*addr);
-      }
-      assert(sv != NULL, "sanity check");
-      result->add(sv);
-    }
+  InterpreterOopMap oop_mask;
+  // Get oopmap describing oops and int for current bci
+  if (TraceDeoptimization && Verbose) {
+    methodHandle m_h(method());
+    OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
   } else {
-    InterpreterOopMap oop_mask;
-    // Get oopmap describing oops and int for current bci
-    if (TraceDeoptimization && Verbose) {
-      methodHandle m_h(method());
-      OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
-    } else {
-      method()->mask_for(bci(), &oop_mask);
-    }
-    // handle expressions
-    for(int i=0; i < length; i++) {
-      // Find stack location
-      intptr_t *addr = fr().interpreter_frame_expression_stack_at(i);
+    method()->mask_for(bci(), &oop_mask);
+  }
+  // handle expressions
+  for(int i=0; i < length; i++) {
+    // Find stack location
+    intptr_t *addr = fr().interpreter_frame_expression_stack_at(i);
 
-      // Depending on oop/int put it in the right package
-      StackValue *sv;
-      if (oop_mask.is_oop(i + nof_locals)) {
-        // oop value
-        Handle h(*(oop *)addr);
-        sv = new StackValue(h);
-      } else {
-        // integer
-        sv = new StackValue(*addr);
-      }
-      assert(sv != NULL, "sanity check");
-      result->add(sv);
+    // Depending on oop/int put it in the right package
+    StackValue *sv;
+    if (oop_mask.is_oop(i + nof_locals)) {
+      // oop value
+      Handle h(*(oop *)addr);
+      sv = new StackValue(h);
+    } else {
+      // integer
+      sv = new StackValue(*addr);
     }
+    assert(sv != NULL, "sanity check");
+    result->add(sv);
   }
   return result;
 }
--- a/src/share/vm/runtime/vframeArray.cpp	Wed May 12 10:28:13 2010 -0700
+++ b/src/share/vm/runtime/vframeArray.cpp	Thu May 13 13:05:47 2010 -0700
@@ -309,11 +309,6 @@
       default:
         ShouldNotReachHere();
     }
-    if (TaggedStackInterpreter) {
-      // Write tag to the stack
-      iframe()->interpreter_frame_set_expression_stack_tag(i,
-                                  frame::tag_for_basic_type(value->type()));
-    }
   }
 
 
@@ -335,11 +330,6 @@
       default:
         ShouldNotReachHere();
     }
-    if (TaggedStackInterpreter) {
-      // Write tag to stack
-      iframe()->interpreter_frame_set_local_tag(i,
-                                  frame::tag_for_basic_type(value->type()));
-    }
   }
 
   if (is_top_frame && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
@@ -354,9 +344,8 @@
       void* saved_args = thread->popframe_preserved_args();
       assert(saved_args != NULL, "must have been saved by interpreter");
 #ifdef ASSERT
-      int stack_words = Interpreter::stackElementWords();
       assert(popframe_preserved_args_size_in_words <=
-             iframe()->interpreter_frame_expression_stack_size()*stack_words,
+             iframe()->interpreter_frame_expression_stack_size()*Interpreter::stackElementWords,
              "expression stack size should have been extended");
 #endif // ASSERT
       int top_element = iframe()->interpreter_frame_expression_stack_size()-1;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/6925573/SortMethodsTest.java	Thu May 13 13:05:47 2010 -0700
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2008-2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+
+import java.lang.reflect.Method;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.Vector;
+
+import javax.tools.Diagnostic;
+import javax.tools.DiagnosticCollector;
+import javax.tools.FileObject;
+import javax.tools.ForwardingJavaFileManager;
+import javax.tools.JavaCompiler;
+import javax.tools.JavaCompiler.CompilationTask;
+import javax.tools.JavaFileManager;
+import javax.tools.JavaFileObject;
+import javax.tools.JavaFileObject.Kind;
+import javax.tools.SimpleJavaFileObject;
+import javax.tools.StandardJavaFileManager;
+import javax.tools.ToolProvider;
+
+/*
+ * @test SortMethodsTest
+ * @bug 6925573
+ * @summary verify that class loading does not need quadratic time with regard to the number of class
+methods.
+ * @run main SortMethodsTest
+ * @author volker.simonis@gmail.com
+*/
+
+public class SortMethodsTest {
+
+  static String createClass(String name, int nrOfMethods) {
+    StringWriter sw = new StringWriter();
+    PrintWriter pw = new PrintWriter(sw);
+    pw.println("public class " + name + "{");
+    for (int i = 0; i < nrOfMethods; i++) {
+      pw.println("  public void m" + i + "() {}");
+    }
+    pw.println("  public static String sayHello() {");
+    pw.println("    return \"Hello from class \" + " + name +
+               ".class.getName() + \" with \" + " + name +
+               ".class.getDeclaredMethods().length + \" methods\";");
+    pw.println("  }");
+    pw.println("}");
+    pw.close();
+    return sw.toString();
+  }
+
+  public static void main(String args[]) {
+
+    JavaCompiler comp = ToolProvider.getSystemJavaCompiler();
+    DiagnosticCollector<JavaFileObject> diags = new DiagnosticCollector<JavaFileObject>();
+    final String cName = new String("ManyMethodsClass");
+    Vector<Long> results = new Vector<Long>();
+
+    for (int i = 6; i < 600000; i*=10) {
+      String klass =  createClass(cName, i);
+      JavaMemoryFileObject file = new JavaMemoryFileObject(cName, klass);
+      MemoryFileManager mfm = new MemoryFileManager(comp.getStandardFileManager(diags, null, null), file);
+      CompilationTask task = comp.getTask(null, mfm, diags, null, null, Arrays.asList(file));
+
+      if (task.call()) {
+        try {
+          MemoryClassLoader mcl = new MemoryClassLoader(file);
+          long start = System.nanoTime();
+          Class<? extends Object> c = Class.forName(cName, true, mcl);
+          long end = System.nanoTime();
+          results.add(end - start);
+          Method m = c.getDeclaredMethod("sayHello", new Class[0]);
+          String ret = (String)m.invoke(null, new Object[0]);
+          System.out.println(ret + " (loaded and resloved in " + (end - start) + "ns)");
+        } catch (Exception e) {
+          System.err.println(e);
+        }
+      }
+      else {
+        System.out.println(klass);
+        System.out.println();
+        for (Diagnostic diag : diags.getDiagnostics()) {
+          System.out.println(diag.getCode() + "\n" + diag.getKind() + "\n" + diag.getPosition());
+          System.out.println(diag.getSource() + "\n" + diag.getMessage(null));
+        }
+      }
+    }
+
+    long lastRatio = 0;
+    for (int i = 2; i < results.size(); i++) {
+      long normalized1 = Math.max(results.get(i-1) - results.get(0), 1);
+      long normalized2 = Math.max(results.get(i) - results.get(0), 1);
+      long ratio = normalized2/normalized1;
+      lastRatio = ratio;
+      System.out.println("10 x more methods requires " + ratio + " x more time");
+    }
+    // The following is just vague estimation but seems to work on current x86_64 and sparcv9 machines
+    if (lastRatio > 80) {
+      throw new RuntimeException("ATTENTION: it seems that class loading needs quadratic time with regard to the number of class methods!!!");
+    }
+  }
+}
+
+class JavaMemoryFileObject extends SimpleJavaFileObject {
+
+  private final String code;
+  private ByteArrayOutputStream byteCode;
+
+  JavaMemoryFileObject(String name, String code) {
+    super(URI.create("string:///" + name.replace('.','/') + Kind.SOURCE.extension), Kind.SOURCE);
+    this.code = code;
+  }
+
+  @Override
+  public CharSequence getCharContent(boolean ignoreEncodingErrors) {
+    return code;
+  }
+
+  @Override
+  public OutputStream openOutputStream() {
+    byteCode = new ByteArrayOutputStream();
+    return byteCode;
+  }
+
+  byte[] getByteCode() {
+    return byteCode.toByteArray();
+   }
+}
+
+class MemoryClassLoader extends ClassLoader {
+
+  private final JavaMemoryFileObject jfo;
+
+  public MemoryClassLoader(JavaMemoryFileObject jfo) {
+    this.jfo = jfo;
+  }
+
+  public Class findClass(String name) {
+    byte[] b = jfo.getByteCode();
+    return defineClass(name, b, 0, b.length);
+  }
+}
+
+class MemoryFileManager extends ForwardingJavaFileManager<JavaFileManager> {
+
+  private final JavaFileObject jfo;
+
+  public MemoryFileManager(StandardJavaFileManager jfm, JavaFileObject jfo) {
+    super(jfm);
+    this.jfo = jfo;
+  }
+
+  @Override
+  public FileObject getFileForInput(Location location, String packageName,
+                                    String relativeName) throws IOException {
+    return jfo;
+  }
+
+  @Override
+  public JavaFileObject getJavaFileForOutput(Location location, String qualifiedName,
+                                             Kind kind, FileObject outputFile) throws IOException {
+    return jfo;
+  }
+
+}