changeset 13031:9ebaac78a8a0 jdk8-b115

Merge
author amurillo
date Tue, 05 Nov 2013 14:06:23 -0800
parents 205834867346 (current diff) afd012c940e4 (diff)
children 842b6ce4dfb4 2b8e28fdf503
files
diffstat 57 files changed, 887 insertions(+), 338 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Oct 31 16:31:31 2013 -0700
+++ b/.hgtags	Tue Nov 05 14:06:23 2013 -0800
@@ -390,3 +390,4 @@
 4589b398ab03aba6a5da8c06ff53603488d1b8f4 jdk8-b113
 82a9cdbf683e374a76f2009352de53e16bed5a91 hs25-b56
 7fd913010dbbf75260688fd2fa8964763fa49a09 jdk8-b114
+3b32d287da89a47a45d16f6d9ba5bd3cd9bf4b3e hs25-b57
--- a/agent/src/os/bsd/ps_proc.c	Thu Oct 31 16:31:31 2013 -0700
+++ b/agent/src/os/bsd/ps_proc.c	Tue Nov 05 14:06:23 2013 -0800
@@ -131,7 +131,7 @@
 
 static bool ptrace_continue(pid_t pid, int signal) {
   // pass the signal to the process so we don't swallow it
-  if (ptrace(PTRACE_CONT, pid, NULL, signal) < 0) {
+  if (ptrace(PT_CONTINUE, pid, NULL, signal) < 0) {
     print_debug("ptrace(PTRACE_CONT, ..) failed for %d\n", pid);
     return false;
   }
@@ -434,7 +434,6 @@
 // attach to the process. One and only one exposed stuff
 struct ps_prochandle* Pgrab(pid_t pid) {
   struct ps_prochandle* ph = NULL;
-  thread_info* thr = NULL;
 
   if ( (ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle))) == NULL) {
      print_debug("can't allocate memory for ps_prochandle\n");
--- a/make/hotspot_version	Thu Oct 31 16:31:31 2013 -0700
+++ b/make/hotspot_version	Tue Nov 05 14:06:23 2013 -0800
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=56
+HS_BUILD_NUMBER=57
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -365,7 +365,7 @@
   return entry;
 }
 
-address CppInterpreter::return_entry(TosState state, int length) {
+address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
   // make it look good in the debugger
   return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) + frame::pc_return_offset;
 }
--- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -4099,15 +4099,19 @@
 
 void MacroAssembler::encode_klass_not_null(Register r) {
   assert (UseCompressedClassPointers, "must be compressed");
-  assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
-  assert(r != G6_heapbase, "bad register choice");
-  set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
-  sub(r, G6_heapbase, r);
-  if (Universe::narrow_klass_shift() != 0) {
-    assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
-    srlx(r, LogKlassAlignmentInBytes, r);
+  if (Universe::narrow_klass_base() != NULL) {
+    assert(r != G6_heapbase, "bad register choice");
+    set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
+    sub(r, G6_heapbase, r);
+    if (Universe::narrow_klass_shift() != 0) {
+      assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+      srlx(r, LogKlassAlignmentInBytes, r);
+    }
+    reinit_heapbase();
+  } else {
+    assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong");
+    srlx(r, Universe::narrow_klass_shift(), r);
   }
-  reinit_heapbase();
 }
 
 void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
@@ -4115,11 +4119,16 @@
     encode_klass_not_null(src);
   } else {
     assert (UseCompressedClassPointers, "must be compressed");
-    assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
-    set((intptr_t)Universe::narrow_klass_base(), dst);
-    sub(src, dst, dst);
-    if (Universe::narrow_klass_shift() != 0) {
-      srlx(dst, LogKlassAlignmentInBytes, dst);
+    if (Universe::narrow_klass_base() != NULL) {
+      set((intptr_t)Universe::narrow_klass_base(), dst);
+      sub(src, dst, dst);
+      if (Universe::narrow_klass_shift() != 0) {
+        srlx(dst, LogKlassAlignmentInBytes, dst);
+      }
+    } else {
+      // shift src into dst
+      assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong");
+      srlx(src, Universe::narrow_klass_shift(), dst);
     }
   }
 }
@@ -4129,14 +4138,16 @@
 // the instructions they generate change, then this method needs to be updated.
 int MacroAssembler::instr_size_for_decode_klass_not_null() {
   assert (UseCompressedClassPointers, "only for compressed klass ptrs");
-  // set + add + set
-  int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 +
-    insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
-  if (Universe::narrow_klass_shift() == 0) {
-    return num_instrs * BytesPerInstWord;
-  } else { // sllx
-    return (num_instrs + 1) * BytesPerInstWord;
+  int num_instrs = 1;  // shift src,dst or add
+  if (Universe::narrow_klass_base() != NULL) {
+    // set + add + set
+    num_instrs += insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) +
+                  insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
+    if (Universe::narrow_klass_shift() != 0) {
+      num_instrs += 1;  // sllx
+    }
   }
+  return num_instrs * BytesPerInstWord;
 }
 
 // !!! If the instructions that get generated here change then function
@@ -4145,13 +4156,17 @@
   // Do not add assert code to this unless you change vtableStubs_sparc.cpp
   // pd_code_size_limit.
   assert (UseCompressedClassPointers, "must be compressed");
-  assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
-  assert(r != G6_heapbase, "bad register choice");
-  set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
-  if (Universe::narrow_klass_shift() != 0)
-    sllx(r, LogKlassAlignmentInBytes, r);
-  add(r, G6_heapbase, r);
-  reinit_heapbase();
+  if (Universe::narrow_klass_base() != NULL) {
+    assert(r != G6_heapbase, "bad register choice");
+    set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
+    if (Universe::narrow_klass_shift() != 0)
+      sllx(r, LogKlassAlignmentInBytes, r);
+    add(r, G6_heapbase, r);
+    reinit_heapbase();
+  } else {
+    assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong");
+    sllx(r, Universe::narrow_klass_shift(), r);
+  }
 }
 
 void  MacroAssembler::decode_klass_not_null(Register src, Register dst) {
@@ -4161,16 +4176,21 @@
     // Do not add assert code to this unless you change vtableStubs_sparc.cpp
     // pd_code_size_limit.
     assert (UseCompressedClassPointers, "must be compressed");
-    assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
-    if (Universe::narrow_klass_shift() != 0) {
-      assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
-      set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
-      sllx(src, LogKlassAlignmentInBytes, dst);
-      add(dst, G6_heapbase, dst);
-      reinit_heapbase();
+    if (Universe::narrow_klass_base() != NULL) {
+      if (Universe::narrow_klass_shift() != 0) {
+        assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
+        set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
+        sllx(src, LogKlassAlignmentInBytes, dst);
+        add(dst, G6_heapbase, dst);
+        reinit_heapbase();
+      } else {
+        set((intptr_t)Universe::narrow_klass_base(), dst);
+        add(src, dst, dst);
+      }
     } else {
-      set((intptr_t)Universe::narrow_klass_base(), dst);
-      add(src, dst, dst);
+      // shift/mov src into dst.
+      assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong");
+      sllx(src, Universe::narrow_klass_shift(), dst);
     }
   }
 }
--- a/src/cpu/sparc/vm/sparc.ad	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/cpu/sparc/vm/sparc.ad	Tue Nov 05 14:06:23 2013 -0800
@@ -1660,12 +1660,16 @@
   if (UseCompressedClassPointers) {
     assert(Universe::heap() != NULL, "java heap should be initialized");
     st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
-    st->print_cr("\tSET    Universe::narrow_klass_base,R_G6_heap_base");
-    if (Universe::narrow_klass_shift() != 0) {
-      st->print_cr("\tSLL    R_G5,3,R_G5");
+    if (Universe::narrow_klass_base() != 0) {
+      st->print_cr("\tSET    Universe::narrow_klass_base,R_G6_heap_base");
+      if (Universe::narrow_klass_shift() != 0) {
+        st->print_cr("\tSLL    R_G5,Universe::narrow_klass_shift,R_G5");
+      }
+      st->print_cr("\tADD    R_G5,R_G6_heap_base,R_G5");
+      st->print_cr("\tSET    Universe::narrow_ptrs_base,R_G6_heap_base");
+    } else {
+      st->print_cr("\tSLL    R_G5,Universe::narrow_klass_shift,R_G5");
     }
-    st->print_cr("\tADD    R_G5,R_G6_heap_base,R_G5");
-    st->print_cr("\tSET    Universe::narrow_ptrs_base,R_G6_heap_base");
   } else {
     st->print_cr("\tLDX    [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
   }
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -153,13 +153,9 @@
 }
 
 
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
-  TosState incoming_state = state;
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
+  address entry = __ pc();
 
-  Label cont;
-  address compiled_entry = __ pc();
-
-  address entry = __ pc();
 #if !defined(_LP64) && defined(COMPILER2)
   // All return values are where we want them, except for Longs.  C2 returns
   // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
@@ -170,14 +166,12 @@
   // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
   // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
 
-  if (incoming_state == ltos) {
+  if (state == ltos) {
     __ srl (G1,  0, O1);
     __ srlx(G1, 32, O0);
   }
 #endif // !_LP64 && COMPILER2
 
-  __ bind(cont);
-
   // The callee returns with the stack possibly adjusted by adapter transition
   // We remove that possible adjustment here.
   // All interpreter local registers are untouched. Any result is passed back
@@ -186,29 +180,18 @@
 
   __ mov(Llast_SP, SP);   // Remove any adapter added stack space.
 
-  Label L_got_cache, L_giant_index;
   const Register cache = G3_scratch;
-  const Register size  = G1_scratch;
-  if (EnableInvokeDynamic) {
-    __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode.
-    __ cmp_and_br_short(G1_scratch, Bytecodes::_invokedynamic, Assembler::equal, Assembler::pn, L_giant_index);
-  }
-  __ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
-  __ bind(L_got_cache);
-  __ ld_ptr(cache, ConstantPoolCache::base_offset() +
-                   ConstantPoolCacheEntry::flags_offset(), size);
-  __ and3(size, 0xFF, size);                   // argument size in words
-  __ sll(size, Interpreter::logStackElementSize, size); // each argument size in bytes
-  __ add(Lesp, size, Lesp);                    // pop arguments
+  const Register index  = G1_scratch;
+  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
+
+  const Register flags = cache;
+  __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags);
+  const Register parameter_size = flags;
+  __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size);  // argument size in words
+  __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size);     // each argument size in bytes
+  __ add(Lesp, parameter_size, Lesp);                                           // pop arguments
   __ dispatch_next(state, step);
 
-  // out of the main line of code...
-  if (EnableInvokeDynamic) {
-    __ bind(L_giant_index);
-    __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4));
-    __ ba_short(L_got_cache);
-  }
-
   return entry;
 }
 
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -2932,9 +2932,7 @@
   ConstantPoolCacheEntry::verify_tos_state_shift();
   // load return address
   {
-    const address table_addr = (is_invokeinterface || is_invokedynamic) ?
-        (address)Interpreter::return_5_addrs_by_index_table() :
-        (address)Interpreter::return_3_addrs_by_index_table();
+    const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
     AddressLiteral table(table_addr);
     __ set(table, temp);
     __ sll(ra, LogBytesPerWord, ra);
@@ -2984,7 +2982,7 @@
   __ verify_oop(O0_recv);
 
   // get return address
-  AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
+  AddressLiteral table(Interpreter::invoke_return_entry_table());
   __ set(table, Rtemp);
   __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret);          // get return type
   // Make sure we don't need to mask Rret after the above shift
@@ -3026,7 +3024,7 @@
   __ profile_final_call(O4);
 
   // get return address
-  AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
+  AddressLiteral table(Interpreter::invoke_return_entry_table());
   __ set(table, Rtemp);
   __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret);          // get return type
   // Make sure we don't need to mask Rret after the above shift
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -1468,19 +1468,18 @@
     addr = new LIR_Address(src.result(), offset, type);
   }
 
-  if (data != dst) {
-    __ move(data, dst);
-    data = dst;
-  }
+  // Because we want a 2-arg form of xchg and xadd
+  __ move(data, dst);
+
   if (x->is_add()) {
-    __ xadd(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
+    __ xadd(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
   } else {
     if (is_obj) {
       // Do the pre-write barrier, if any.
       pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
                   true /* do_load */, false /* patch */, NULL);
     }
-    __ xchg(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
+    __ xchg(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
     if (is_obj) {
       // Seems to be a precise address
       post_barrier(LIR_OprFact::address(addr), data);
--- a/src/cpu/x86/vm/cppInterpreter_x86.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -367,7 +367,7 @@
   return entry;
 }
 
-address CppInterpreter::return_entry(TosState state, int length) {
+address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
   // make it look good in the debugger
   return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation);
 }
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -5049,25 +5049,32 @@
 }
 
 void MacroAssembler::encode_klass_not_null(Register r) {
-  assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
-  // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
-  assert(r != r12_heapbase, "Encoding a klass in r12");
-  mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
-  subq(r, r12_heapbase);
+  if (Universe::narrow_klass_base() != NULL) {
+    // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
+    assert(r != r12_heapbase, "Encoding a klass in r12");
+    mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
+    subq(r, r12_heapbase);
+  }
   if (Universe::narrow_klass_shift() != 0) {
     assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
     shrq(r, LogKlassAlignmentInBytes);
   }
-  reinit_heapbase();
+  if (Universe::narrow_klass_base() != NULL) {
+    reinit_heapbase();
+  }
 }
 
 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
   if (dst == src) {
     encode_klass_not_null(src);
   } else {
-    mov64(dst, (int64_t)Universe::narrow_klass_base());
-    negq(dst);
-    addq(dst, src);
+    if (Universe::narrow_klass_base() != NULL) {
+      mov64(dst, (int64_t)Universe::narrow_klass_base());
+      negq(dst);
+      addq(dst, src);
+    } else {
+      movptr(dst, src);
+    }
     if (Universe::narrow_klass_shift() != 0) {
       assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
       shrq(dst, LogKlassAlignmentInBytes);
@@ -5081,15 +5088,19 @@
 // generate change, then this method needs to be updated.
 int MacroAssembler::instr_size_for_decode_klass_not_null() {
   assert (UseCompressedClassPointers, "only for compressed klass ptrs");
-  // mov64 + addq + shlq? + mov64  (for reinit_heapbase()).
-  return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
+  if (Universe::narrow_klass_base() != NULL) {
+    // mov64 + addq + shlq? + mov64  (for reinit_heapbase()).
+    return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
+  } else {
+    // longest load decode klass function, mov64, leaq
+    return 16;
+  }
 }
 
 // !!! If the instructions that get generated here change then function
 // instr_size_for_decode_klass_not_null() needs to get updated.
 void  MacroAssembler::decode_klass_not_null(Register r) {
   // Note: it will change flags
-  assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
   assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert(r != r12_heapbase, "Decoding a klass in r12");
   // Cannot assert, unverified entry point counts instructions (see .ad file)
@@ -5100,14 +5111,15 @@
     shlq(r, LogKlassAlignmentInBytes);
   }
   // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
-  mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
-  addq(r, r12_heapbase);
-  reinit_heapbase();
+  if (Universe::narrow_klass_base() != NULL) {
+    mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
+    addq(r, r12_heapbase);
+    reinit_heapbase();
+  }
 }
 
 void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
   // Note: it will change flags
-  assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
   assert (UseCompressedClassPointers, "should only be used for compressed headers");
   if (dst == src) {
     decode_klass_not_null(dst);
@@ -5115,7 +5127,6 @@
     // Cannot assert, unverified entry point counts instructions (see .ad file)
     // vtableStubs also counts instructions in pd_code_size_limit.
     // Also do not verify_oop as this is called by verify_oop.
-
     mov64(dst, (int64_t)Universe::narrow_klass_base());
     if (Universe::narrow_klass_shift() != 0) {
       assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -150,13 +150,12 @@
 }
 
 
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
-  TosState incoming_state = state;
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
   address entry = __ pc();
 
 #ifdef COMPILER2
   // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
-  if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
+  if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
     for (int i = 1; i < 8; i++) {
         __ ffree(i);
     }
@@ -164,7 +163,7 @@
     __ empty_FPU_stack();
   }
 #endif
-  if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
+  if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
     __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
   } else {
     __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
@@ -172,12 +171,12 @@
 
   // In SSE mode, interpreter returns FP results in xmm0 but they need
   // to end up back on the FPU so it can operate on them.
-  if (incoming_state == ftos && UseSSE >= 1) {
+  if (state == ftos && UseSSE >= 1) {
     __ subptr(rsp, wordSize);
     __ movflt(Address(rsp, 0), xmm0);
     __ fld_s(Address(rsp, 0));
     __ addptr(rsp, wordSize);
-  } else if (incoming_state == dtos && UseSSE >= 2) {
+  } else if (state == dtos && UseSSE >= 2) {
     __ subptr(rsp, 2*wordSize);
     __ movdbl(Address(rsp, 0), xmm0);
     __ fld_d(Address(rsp, 0));
@@ -194,33 +193,22 @@
   __ restore_bcp();
   __ restore_locals();
 
-  if (incoming_state == atos) {
+  if (state == atos) {
     Register mdp = rbx;
     Register tmp = rcx;
     __ profile_return_type(mdp, rax, tmp);
   }
 
-  Label L_got_cache, L_giant_index;
-  if (EnableInvokeDynamic) {
-    __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
-    __ jcc(Assembler::equal, L_giant_index);
-  }
-  __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
-  __ bind(L_got_cache);
-  __ movl(rbx, Address(rbx, rcx,
-                    Address::times_ptr, ConstantPoolCache::base_offset() +
-                    ConstantPoolCacheEntry::flags_offset()));
-  __ andptr(rbx, 0xFF);
-  __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));
+  const Register cache = rbx;
+  const Register index = rcx;
+  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
+
+  const Register flags = cache;
+  __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
+  __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
+  __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
   __ dispatch_next(state, step);
 
-  // out of the main line of code...
-  if (EnableInvokeDynamic) {
-    __ bind(L_giant_index);
-    __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
-    __ jmp(L_got_cache);
-  }
-
   return entry;
 }
 
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -166,7 +166,7 @@
 }
 
 
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
   address entry = __ pc();
 
   // Restore stack bottom in case i2c adjusted stack
@@ -183,28 +183,16 @@
     __ profile_return_type(mdp, rax, tmp);
   }
 
-  Label L_got_cache, L_giant_index;
-  if (EnableInvokeDynamic) {
-    __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
-    __ jcc(Assembler::equal, L_giant_index);
-  }
-  __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
-  __ bind(L_got_cache);
-  __ movl(rbx, Address(rbx, rcx,
-                       Address::times_ptr,
-                       in_bytes(ConstantPoolCache::base_offset()) +
-                       3 * wordSize));
-  __ andl(rbx, 0xFF);
-  __ lea(rsp, Address(rsp, rbx, Address::times_8));
+  const Register cache = rbx;
+  const Register index = rcx;
+  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
+
+  const Register flags = cache;
+  __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
+  __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
+  __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
   __ dispatch_next(state, step);
 
-  // out of the main line of code...
-  if (EnableInvokeDynamic) {
-    __ bind(L_giant_index);
-    __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
-    __ jmp(L_got_cache);
-  }
-
   return entry;
 }
 
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -2925,9 +2925,7 @@
   ConstantPoolCacheEntry::verify_tos_state_shift();
   // load return address
   {
-    const address table_addr = (is_invokeinterface || is_invokedynamic) ?
-        (address)Interpreter::return_5_addrs_by_index_table() :
-        (address)Interpreter::return_3_addrs_by_index_table();
+    const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
     ExternalAddress table(table_addr);
     __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
   }
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -2980,9 +2980,7 @@
   ConstantPoolCacheEntry::verify_tos_state_shift();
   // load return address
   {
-    const address table_addr = (is_invokeinterface || is_invokedynamic) ?
-        (address)Interpreter::return_5_addrs_by_index_table() :
-        (address)Interpreter::return_3_addrs_by_index_table();
+    const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
     ExternalAddress table(table_addr);
     __ lea(rscratch1, table);
     __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -1006,7 +1006,7 @@
   istate->set_stack_limit(stack_base - method->max_stack() - 1);
 }
 
-address CppInterpreter::return_entry(TosState state, int length) {
+address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
   ShouldNotCallThis();
   return NULL;
 }
--- a/src/cpu/zero/vm/globals_zero.hpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/cpu/zero/vm/globals_zero.hpp	Tue Nov 05 14:06:23 2013 -0800
@@ -57,6 +57,8 @@
 // GC Ergo Flags
 define_pd_global(uintx, CMSYoungGenPerWorker, 16*M);  // default max size of CMS young gen, per GC worker thread
 
+define_pd_global(uintx, TypeProfileLevel, 0);
+
 #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)
 
 #endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP
--- a/src/os/bsd/vm/os_bsd.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/os/bsd/vm/os_bsd.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -945,17 +945,15 @@
 // Used by VMSelfDestructTimer and the MemProfiler.
 double os::elapsedTime() {
 
-  return (double)(os::elapsed_counter()) * 0.000001;
+  return ((double)os::elapsed_counter()) / os::elapsed_frequency();
 }
 
 jlong os::elapsed_counter() {
-  timeval time;
-  int status = gettimeofday(&time, NULL);
-  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
+  return javaTimeNanos() - initial_time_count;
 }
 
 jlong os::elapsed_frequency() {
-  return (1000 * 1000);
+  return NANOSECS_PER_SEC; // nanosecond resolution
 }
 
 bool os::supports_vtime() { return true; }
@@ -3582,7 +3580,7 @@
   Bsd::_main_thread = pthread_self();
 
   Bsd::clock_init();
-  initial_time_count = os::elapsed_counter();
+  initial_time_count = javaTimeNanos();
 
 #ifdef __APPLE__
   // XXXDARWIN
--- a/src/os/linux/vm/os_linux.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -1333,17 +1333,15 @@
 // Used by VMSelfDestructTimer and the MemProfiler.
 double os::elapsedTime() {
 
-  return (double)(os::elapsed_counter()) * 0.000001;
+  return ((double)os::elapsed_counter()) / os::elapsed_frequency(); // nanosecond resolution
 }
 
 jlong os::elapsed_counter() {
-  timeval time;
-  int status = gettimeofday(&time, NULL);
-  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
+  return javaTimeNanos() - initial_time_count;
 }
 
 jlong os::elapsed_frequency() {
-  return (1000 * 1000);
+  return NANOSECS_PER_SEC; // nanosecond resolution
 }
 
 bool os::supports_vtime() { return true; }
@@ -4750,7 +4748,7 @@
   Linux::_main_thread = pthread_self();
 
   Linux::clock_init();
-  initial_time_count = os::elapsed_counter();
+  initial_time_count = javaTimeNanos();
 
   // pthread_condattr initialization for monotonic clock
   int status;
--- a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -79,6 +79,15 @@
 # include <pthread_np.h>
 #endif
 
+// needed by current_stack_region() workaround for Mavericks
+#if defined(__APPLE__)
+# include <errno.h>
+# include <sys/types.h>
+# include <sys/sysctl.h>
+# define DEFAULT_MAIN_THREAD_STACK_PAGES 2048
+# define OS_X_10_9_0_KERNEL_MAJOR_VERSION 13
+#endif
+
 #ifdef AMD64
 #define SPELL_REG_SP "rsp"
 #define SPELL_REG_FP "rbp"
@@ -828,6 +837,21 @@
   pthread_t self = pthread_self();
   void *stacktop = pthread_get_stackaddr_np(self);
   *size = pthread_get_stacksize_np(self);
+  // workaround for OS X 10.9.0 (Mavericks)
+  // pthread_get_stacksize_np returns 128 pages even though the actual size is 2048 pages
+  if (pthread_main_np() == 1) {
+    if ((*size) < (DEFAULT_MAIN_THREAD_STACK_PAGES * (size_t)getpagesize())) {
+      char kern_osrelease[256];
+      size_t kern_osrelease_size = sizeof(kern_osrelease);
+      int ret = sysctlbyname("kern.osrelease", kern_osrelease, &kern_osrelease_size, NULL, 0);
+      if (ret == 0) {
+        // get the major number, atoi will ignore the minor amd micro portions of the version string
+        if (atoi(kern_osrelease) >= OS_X_10_9_0_KERNEL_MAJOR_VERSION) {
+          *size = (DEFAULT_MAIN_THREAD_STACK_PAGES*getpagesize());
+        }
+      }
+    }
+  }
   *bottom = (address) stacktop - *size;
 #elif defined(__OpenBSD__)
   stack_t ss;
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -1873,7 +1873,7 @@
         // number of implementors for decl_interface is 0 or 1. If
         // it's 0 then no class implements decl_interface and there's
         // no point in inlining.
-        if (!holder->is_loaded() || decl_interface->nof_implementors() != 1) {
+        if (!holder->is_loaded() || decl_interface->nof_implementors() != 1 || decl_interface->has_default_methods()) {
           singleton = NULL;
         }
       }
--- a/src/share/vm/ci/ciInstanceKlass.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/ci/ciInstanceKlass.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -57,6 +57,7 @@
   _init_state = ik->init_state();
   _nonstatic_field_size = ik->nonstatic_field_size();
   _has_nonstatic_fields = ik->has_nonstatic_fields();
+  _has_default_methods = ik->has_default_methods();
   _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
 
   _implementor = NULL; // we will fill these lazily
--- a/src/share/vm/ci/ciInstanceKlass.hpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/ci/ciInstanceKlass.hpp	Tue Nov 05 14:06:23 2013 -0800
@@ -52,6 +52,7 @@
   bool                   _has_finalizer;
   bool                   _has_subklass;
   bool                   _has_nonstatic_fields;
+  bool                   _has_default_methods;
 
   ciFlags                _flags;
   jint                   _nonstatic_field_size;
@@ -171,6 +172,11 @@
     }
   }
 
+  bool has_default_methods()  {
+    assert(is_loaded(), "must be loaded");
+    return _has_default_methods;
+  }
+
   ciInstanceKlass* get_canonical_holder(int offset);
   ciField* get_field_by_offset(int field_offset, bool is_static);
   ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);
--- a/src/share/vm/classfile/classLoaderData.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/classfile/classLoaderData.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -131,6 +131,17 @@
   }
 }
 
+void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
+  // Lock to avoid classes being modified/added/removed during iteration
+  MutexLockerEx ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
+  for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
+    // Do not filter ArrayKlass oops here...
+    if (k->oop_is_array() || (k->oop_is_instance() && InstanceKlass::cast(k)->is_loaded())) {
+      klass_closure->do_klass(k);
+    }
+  }
+}
+
 void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
   for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
     if (k->oop_is_instance()) {
@@ -600,6 +611,12 @@
   }
 }
 
+void ClassLoaderDataGraph::loaded_classes_do(KlassClosure* klass_closure) {
+  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
+    cld->loaded_classes_do(klass_closure);
+  }
+}
+
 void ClassLoaderDataGraph::classes_unloading_do(void f(Klass* const)) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
   for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
--- a/src/share/vm/classfile/classLoaderData.hpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/classfile/classLoaderData.hpp	Tue Nov 05 14:06:23 2013 -0800
@@ -78,6 +78,7 @@
   static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
   static void classes_do(KlassClosure* klass_closure);
   static void classes_do(void f(Klass* const));
+  static void loaded_classes_do(KlassClosure* klass_closure);
   static void classes_unloading_do(void f(Klass* const));
   static bool do_unloading(BoolObjectClosure* is_alive);
 
@@ -186,6 +187,7 @@
   bool keep_alive() const       { return _keep_alive; }
   bool is_alive(BoolObjectClosure* is_alive_closure) const;
   void classes_do(void f(Klass*));
+  void loaded_classes_do(KlassClosure* klass_closure);
   void classes_do(void f(InstanceKlass*));
 
   // Deallocate free list during class unloading.
--- a/src/share/vm/classfile/systemDictionary.hpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/classfile/systemDictionary.hpp	Tue Nov 05 14:06:23 2013 -0800
@@ -173,8 +173,6 @@
   /* It's okay if this turns out to be NULL in non-1.4 JDKs. */                                                          \
   do_klass(nio_Buffer_klass,                            java_nio_Buffer,                           Opt                 ) \
                                                                                                                          \
-  do_klass(PostVMInitHook_klass,                        sun_misc_PostVMInitHook,                   Opt                 ) \
-                                                                                                                         \
   /* Preload boxing klasses */                                                                                           \
   do_klass(Boolean_klass,                               java_lang_Boolean,                         Pre                 ) \
   do_klass(Character_klass,                             java_lang_Character,                       Pre                 ) \
--- a/src/share/vm/compiler/compileBroker.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/compiler/compileBroker.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -780,6 +780,10 @@
 void CompileBroker::compilation_init() {
   _last_method_compiled[0] = '\0';
 
+  // No need to initialize compilation system if we do not use it.
+  if (!UseCompiler) {
+    return;
+  }
 #ifndef SHARK
   // Set the interface to the current compiler(s).
   int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
--- a/src/share/vm/interpreter/abstractInterpreter.hpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/interpreter/abstractInterpreter.hpp	Tue Nov 05 14:06:23 2013 -0800
@@ -158,8 +158,8 @@
   // Runtime support
 
   // length = invoke bytecode length (to advance to next bytecode)
-  static address    deopt_entry   (TosState state, int length) { ShouldNotReachHere(); return NULL; }
-  static address    return_entry  (TosState state, int length) { ShouldNotReachHere(); return NULL; }
+  static address deopt_entry(TosState state, int length) { ShouldNotReachHere(); return NULL; }
+  static address return_entry(TosState state, int length, Bytecodes::Code code) { ShouldNotReachHere(); return NULL; }
 
   static address    rethrow_exception_entry()                   { return _rethrow_exception_entry; }
 
--- a/src/share/vm/interpreter/cppInterpreter.hpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/interpreter/cppInterpreter.hpp	Tue Nov 05 14:06:23 2013 -0800
@@ -78,7 +78,7 @@
   static address    stack_result_to_stack(int index)            { return _stack_to_stack[index]; }
   static address    stack_result_to_native(int index)           { return _stack_to_native_abi[index]; }
 
-  static address    return_entry  (TosState state, int length);
+  static address    return_entry  (TosState state, int length, Bytecodes::Code code);
   static address    deopt_entry   (TosState state, int length);
 
 #ifdef TARGET_ARCH_x86
--- a/src/share/vm/interpreter/interpreter.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/interpreter/interpreter.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -329,15 +329,21 @@
 //------------------------------------------------------------------------------------------------------------------------
 // Deoptimization support
 
-// If deoptimization happens, this function returns the point of next bytecode to continue execution
+/**
+ * If a deoptimization happens, this function returns the point of next bytecode to continue execution.
+ */
 address AbstractInterpreter::deopt_continue_after_entry(Method* method, address bcp, int callee_parameters, bool is_top_frame) {
   assert(method->contains(bcp), "just checkin'");
-  Bytecodes::Code code   = Bytecodes::java_code_at(method, bcp);
+
+  // Get the original and rewritten bytecode.
+  Bytecodes::Code code = Bytecodes::java_code_at(method, bcp);
   assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute");
-  int             bci    = method->bci_from(bcp);
-  int             length = -1; // initial value for debugging
+
+  const int bci = method->bci_from(bcp);
+
   // compute continuation length
-  length = Bytecodes::length_at(method, bcp);
+  const int length = Bytecodes::length_at(method, bcp);
+
   // compute result type
   BasicType type = T_ILLEGAL;
 
@@ -393,7 +399,7 @@
   return
     is_top_frame
     ? Interpreter::deopt_entry (as_TosState(type), length)
-    : Interpreter::return_entry(as_TosState(type), length);
+    : Interpreter::return_entry(as_TosState(type), length, code);
 }
 
 // If deoptimization happens, this function returns the point where the interpreter reexecutes
--- a/src/share/vm/interpreter/templateInterpreter.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/interpreter/templateInterpreter.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -184,8 +184,9 @@
 EntryPoint TemplateInterpreter::_continuation_entry;
 EntryPoint TemplateInterpreter::_safept_entry;
 
-address    TemplateInterpreter::_return_3_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
-address    TemplateInterpreter::_return_5_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
+address TemplateInterpreter::_invoke_return_entry[TemplateInterpreter::number_of_return_addrs];
+address TemplateInterpreter::_invokeinterface_return_entry[TemplateInterpreter::number_of_return_addrs];
+address TemplateInterpreter::_invokedynamic_return_entry[TemplateInterpreter::number_of_return_addrs];
 
 DispatchTable TemplateInterpreter::_active_table;
 DispatchTable TemplateInterpreter::_normal_table;
@@ -237,22 +238,37 @@
 #endif // !PRODUCT
 
   { CodeletMark cm(_masm, "return entry points");
+    const int index_size = sizeof(u2);
     for (int i = 0; i < Interpreter::number_of_return_entries; i++) {
       Interpreter::_return_entry[i] =
         EntryPoint(
-          generate_return_entry_for(itos, i),
-          generate_return_entry_for(itos, i),
-          generate_return_entry_for(itos, i),
-          generate_return_entry_for(atos, i),
-          generate_return_entry_for(itos, i),
-          generate_return_entry_for(ltos, i),
-          generate_return_entry_for(ftos, i),
-          generate_return_entry_for(dtos, i),
-          generate_return_entry_for(vtos, i)
+          generate_return_entry_for(itos, i, index_size),
+          generate_return_entry_for(itos, i, index_size),
+          generate_return_entry_for(itos, i, index_size),
+          generate_return_entry_for(atos, i, index_size),
+          generate_return_entry_for(itos, i, index_size),
+          generate_return_entry_for(ltos, i, index_size),
+          generate_return_entry_for(ftos, i, index_size),
+          generate_return_entry_for(dtos, i, index_size),
+          generate_return_entry_for(vtos, i, index_size)
         );
     }
   }
 
+  { CodeletMark cm(_masm, "invoke return entry points");
+    const TosState states[] = {itos, itos, itos, itos, ltos, ftos, dtos, atos, vtos};
+    const int invoke_length = Bytecodes::length_for(Bytecodes::_invokestatic);
+    const int invokeinterface_length = Bytecodes::length_for(Bytecodes::_invokeinterface);
+    const int invokedynamic_length = Bytecodes::length_for(Bytecodes::_invokedynamic);
+
+    for (int i = 0; i < Interpreter::number_of_return_addrs; i++) {
+      TosState state = states[i];
+      Interpreter::_invoke_return_entry[i] = generate_return_entry_for(state, invoke_length, sizeof(u2));
+      Interpreter::_invokeinterface_return_entry[i] = generate_return_entry_for(state, invokeinterface_length, sizeof(u2));
+      Interpreter::_invokedynamic_return_entry[i] = generate_return_entry_for(state, invokedynamic_length, sizeof(u4));
+    }
+  }
+
   { CodeletMark cm(_masm, "earlyret entry points");
     Interpreter::_earlyret_entry =
       EntryPoint(
@@ -298,13 +314,6 @@
     }
   }
 
-  for (int j = 0; j < number_of_states; j++) {
-    const TosState states[] = {btos, ctos, stos, itos, ltos, ftos, dtos, atos, vtos};
-    int index = Interpreter::TosState_as_index(states[j]);
-    Interpreter::_return_3_addrs_by_index[index] = Interpreter::return_entry(states[j], 3);
-    Interpreter::_return_5_addrs_by_index[index] = Interpreter::return_entry(states[j], 5);
-  }
-
   { CodeletMark cm(_masm, "continuation entry points");
     Interpreter::_continuation_entry =
       EntryPoint(
@@ -534,9 +543,46 @@
 //------------------------------------------------------------------------------------------------------------------------
 // Entry points
 
-address TemplateInterpreter::return_entry(TosState state, int length) {
+/**
+ * Returns the return entry table for the given invoke bytecode.
+ */
+address* TemplateInterpreter::invoke_return_entry_table_for(Bytecodes::Code code) {
+  switch (code) {
+  case Bytecodes::_invokestatic:
+  case Bytecodes::_invokespecial:
+  case Bytecodes::_invokevirtual:
+  case Bytecodes::_invokehandle:
+    return Interpreter::invoke_return_entry_table();
+  case Bytecodes::_invokeinterface:
+    return Interpreter::invokeinterface_return_entry_table();
+  case Bytecodes::_invokedynamic:
+    return Interpreter::invokedynamic_return_entry_table();
+  default:
+    fatal(err_msg("invalid bytecode: %s", Bytecodes::name(code)));
+    return NULL;
+  }
+}
+
+/**
+ * Returns the return entry address for the given top-of-stack state and bytecode.
+ */
+address TemplateInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
   guarantee(0 <= length && length < Interpreter::number_of_return_entries, "illegal length");
-  return _return_entry[length].entry(state);
+  const int index = TosState_as_index(state);
+  switch (code) {
+  case Bytecodes::_invokestatic:
+  case Bytecodes::_invokespecial:
+  case Bytecodes::_invokevirtual:
+  case Bytecodes::_invokehandle:
+    return _invoke_return_entry[index];
+  case Bytecodes::_invokeinterface:
+    return _invokeinterface_return_entry[index];
+  case Bytecodes::_invokedynamic:
+    return _invokedynamic_return_entry[index];
+  default:
+    assert(!Bytecodes::is_invoke(code), err_msg("invoke instructions should be handled separately: %s", Bytecodes::name(code)));
+    return _return_entry[length].entry(state);
+  }
 }
 
 
--- a/src/share/vm/interpreter/templateInterpreter.hpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/interpreter/templateInterpreter.hpp	Tue Nov 05 14:06:23 2013 -0800
@@ -120,8 +120,9 @@
   static EntryPoint _continuation_entry;
   static EntryPoint _safept_entry;
 
-  static address    _return_3_addrs_by_index[number_of_return_addrs];     // for invokevirtual   return entries
-  static address    _return_5_addrs_by_index[number_of_return_addrs];     // for invokeinterface return entries
+  static address _invoke_return_entry[number_of_return_addrs];           // for invokestatic, invokespecial, invokevirtual return entries
+  static address _invokeinterface_return_entry[number_of_return_addrs];  // for invokeinterface return entries
+  static address _invokedynamic_return_entry[number_of_return_addrs];    // for invokedynamic return entries
 
   static DispatchTable _active_table;                           // the active    dispatch table (used by the interpreter for dispatch)
   static DispatchTable _normal_table;                           // the normal    dispatch table (used to set the active table in normal mode)
@@ -161,12 +162,15 @@
   static address*   normal_table()                              { return _normal_table.table_for(); }
 
   // Support for invokes
-  static address*   return_3_addrs_by_index_table()             { return _return_3_addrs_by_index; }
-  static address*   return_5_addrs_by_index_table()             { return _return_5_addrs_by_index; }
-  static int        TosState_as_index(TosState state);          // computes index into return_3_entry_by_index table
+  static address*   invoke_return_entry_table()                 { return _invoke_return_entry; }
+  static address*   invokeinterface_return_entry_table()        { return _invokeinterface_return_entry; }
+  static address*   invokedynamic_return_entry_table()          { return _invokedynamic_return_entry; }
+  static int        TosState_as_index(TosState state);
 
-  static address    return_entry  (TosState state, int length);
-  static address    deopt_entry   (TosState state, int length);
+  static address* invoke_return_entry_table_for(Bytecodes::Code code);
+
+  static address deopt_entry(TosState state, int length);
+  static address return_entry(TosState state, int length, Bytecodes::Code code);
 
   // Safepoint support
   static void       notice_safepoints();                        // stops the thread when reaching a safepoint
--- a/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Tue Nov 05 14:06:23 2013 -0800
@@ -53,7 +53,7 @@
   address generate_ClassCastException_handler();
   address generate_ArrayIndexOutOfBounds_handler(const char* name);
   address generate_continuation_for(TosState state);
-  address generate_return_entry_for(TosState state, int step);
+  address generate_return_entry_for(TosState state, int step, size_t index_size);
   address generate_earlyret_entry_for(TosState state);
   address generate_deopt_entry_for(TosState state, int step);
   address generate_safept_entry_for(TosState state, address runtime_entry);
--- a/src/share/vm/memory/metaspace.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/memory/metaspace.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -56,7 +56,7 @@
 
 MetaWord* last_allocated = 0;
 
-size_t Metaspace::_class_metaspace_size;
+size_t Metaspace::_compressed_class_space_size;
 
 // Used in declarations in SpaceManager and ChunkManager
 enum ChunkIndex {
@@ -2843,6 +2843,8 @@
 #define VIRTUALSPACEMULTIPLIER 2
 
 #ifdef _LP64
+static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
+
 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
   // narrow_klass_base is the lower of the metaspace base and the cds base
@@ -2852,14 +2854,22 @@
   address higher_address;
   if (UseSharedSpaces) {
     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
-                          (address)(metaspace_base + class_metaspace_size()));
+                          (address)(metaspace_base + compressed_class_space_size()));
     lower_base = MIN2(metaspace_base, cds_base);
   } else {
-    higher_address = metaspace_base + class_metaspace_size();
+    higher_address = metaspace_base + compressed_class_space_size();
     lower_base = metaspace_base;
+
+    uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
+    // If compressed class space fits in lower 32G, we don't need a base.
+    if (higher_address <= (address)klass_encoding_max) {
+      lower_base = 0; // effectively lower base is zero.
+    }
   }
+
   Universe::set_narrow_klass_base(lower_base);
-  if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
+
+  if ((uint64_t)(higher_address - lower_base) < UnscaledClassSpaceMax) {
     Universe::set_narrow_klass_shift(0);
   } else {
     assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
@@ -2874,24 +2884,24 @@
   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
   address lower_base = MIN2((address)metaspace_base, cds_base);
   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
-                                (address)(metaspace_base + class_metaspace_size()));
-  return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
+                                (address)(metaspace_base + compressed_class_space_size()));
+  return ((uint64_t)(higher_address - lower_base) < UnscaledClassSpaceMax);
 }
 
 // Try to allocate the metaspace at the requested addr.
 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
   assert(using_class_space(), "called improperly");
   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
-  assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
+  assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
          "Metaspace size is too big");
-  assert_is_ptr_aligned(requested_addr,          _reserve_alignment);
-  assert_is_ptr_aligned(cds_base,                _reserve_alignment);
-  assert_is_size_aligned(class_metaspace_size(), _reserve_alignment);
+  assert_is_ptr_aligned(requested_addr, _reserve_alignment);
+  assert_is_ptr_aligned(cds_base, _reserve_alignment);
+  assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
 
   // Don't use large pages for the class space.
   bool large_pages = false;
 
-  ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
+  ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
                                              _reserve_alignment,
                                              large_pages,
                                              requested_addr, 0);
@@ -2906,7 +2916,7 @@
       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
         addr = addr + increment;
-        metaspace_rs = ReservedSpace(class_metaspace_size(),
+        metaspace_rs = ReservedSpace(compressed_class_space_size(),
                                      _reserve_alignment, large_pages, addr, 0);
       }
     }
@@ -2917,11 +2927,11 @@
     // initialization has happened that depends on UseCompressedClassPointers.
     // So, UseCompressedClassPointers cannot be turned off at this point.
     if (!metaspace_rs.is_reserved()) {
-      metaspace_rs = ReservedSpace(class_metaspace_size(),
+      metaspace_rs = ReservedSpace(compressed_class_space_size(),
                                    _reserve_alignment, large_pages);
       if (!metaspace_rs.is_reserved()) {
         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
-                                              class_metaspace_size()));
+                                              compressed_class_space_size()));
       }
     }
   }
@@ -2943,8 +2953,8 @@
   if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
     gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
                             Universe::narrow_klass_base(), Universe::narrow_klass_shift());
-    gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
-                           class_metaspace_size(), metaspace_rs.base(), requested_addr);
+    gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
+                           compressed_class_space_size(), metaspace_rs.base(), requested_addr);
   }
 }
 
@@ -3010,7 +3020,7 @@
   MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment);
 
   CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment);
-  set_class_metaspace_size(CompressedClassSpaceSize);
+  set_compressed_class_space_size(CompressedClassSpaceSize);
 }
 
 void Metaspace::global_initialize() {
@@ -3039,12 +3049,12 @@
     }
 
 #ifdef _LP64
-    if (cds_total + class_metaspace_size() > (uint64_t)max_juint) {
+    if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
       vm_exit_during_initialization("Unable to dump shared archive.",
           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
-                  "klass limit: " SIZE_FORMAT, cds_total, class_metaspace_size(),
-                  cds_total + class_metaspace_size(), (size_t)max_juint));
+                  "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(),
+                  cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
     }
 
     // Set the compressed klass pointer base so that decoding of these pointers works
@@ -3092,7 +3102,8 @@
         cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
         allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
       } else {
-        allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
+        char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
+        allocate_metaspace_compressed_klass_ptrs(base, 0);
       }
     }
 #endif
@@ -3354,6 +3365,11 @@
   return result;
 }
 
+size_t Metaspace::class_chunk_size(size_t word_size) {
+  assert(using_class_space(), "Has to use class space");
+  return class_vsm()->calc_chunk_size(word_size);
+}
+
 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
   // If result is still null, we are out of memory.
   if (Verbose && TraceMetadataChunkAllocation) {
@@ -3365,9 +3381,19 @@
     MetaspaceAux::dump(gclog_or_tty);
   }
 
+  bool out_of_compressed_class_space = false;
+  if (is_class_space_allocation(mdtype)) {
+    Metaspace* metaspace = loader_data->metaspace_non_null();
+    out_of_compressed_class_space =
+      MetaspaceAux::committed_bytes(Metaspace::ClassType) +
+      (metaspace->class_chunk_size(word_size) * BytesPerWord) >
+      CompressedClassSpaceSize;
+  }
+
   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
-  const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
-                                                                 "Metadata space";
+  const char* space_string = out_of_compressed_class_space ?
+    "Compressed class space" : "Metaspace";
+
   report_java_out_of_memory(space_string);
 
   if (JvmtiExport::should_post_resource_exhausted()) {
@@ -3380,7 +3406,7 @@
     vm_exit_during_initialization("OutOfMemoryError", space_string);
   }
 
-  if (is_class_space_allocation(mdtype)) {
+  if (out_of_compressed_class_space) {
     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
   } else {
     THROW_OOP(Universe::out_of_memory_error_metaspace());
--- a/src/share/vm/memory/metaspace.hpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/memory/metaspace.hpp	Tue Nov 05 14:06:23 2013 -0800
@@ -115,13 +115,13 @@
   static size_t align_word_size_up(size_t);
 
   // Aligned size of the metaspace.
-  static size_t _class_metaspace_size;
+  static size_t _compressed_class_space_size;
 
-  static size_t class_metaspace_size() {
-    return _class_metaspace_size;
+  static size_t compressed_class_space_size() {
+    return _compressed_class_space_size;
   }
-  static void set_class_metaspace_size(size_t metaspace_size) {
-    _class_metaspace_size = metaspace_size;
+  static void set_compressed_class_space_size(size_t size) {
+    _compressed_class_space_size = size;
   }
 
   static size_t _first_chunk_word_size;
@@ -192,6 +192,8 @@
   AllocRecord * _alloc_record_head;
   AllocRecord * _alloc_record_tail;
 
+  size_t class_chunk_size(size_t word_size);
+
  public:
 
   Metaspace(Mutex* lock, MetaspaceType type);
@@ -252,6 +254,7 @@
   static bool is_class_space_allocation(MetadataType mdType) {
     return mdType == ClassType && using_class_space();
   }
+
 };
 
 class MetaspaceAux : AllStatic {
--- a/src/share/vm/memory/universe.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/memory/universe.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -677,13 +677,13 @@
 // HeapBased - Use compressed oops with heap base + encoding.
 
 // 4Gb
-static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
+static const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1);
 // 32Gb
-// OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
+// OopEncodingHeapMax == UnscaledOopHeapMax << LogMinObjAlignmentInBytes;
 
 char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
   assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
-  assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be");
+  assert(is_size_aligned((size_t)UnscaledOopHeapMax, alignment), "Must be");
   assert(is_size_aligned(heap_size, alignment), "Must be");
 
   uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
@@ -702,20 +702,40 @@
     // If the total size is small enough to allow UnscaledNarrowOop then
     // just use UnscaledNarrowOop.
     } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
-      if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
+      if ((total_size <= UnscaledOopHeapMax) && (mode == UnscaledNarrowOop) &&
           (Universe::narrow_oop_shift() == 0)) {
         // Use 32-bits oops without encoding and
         // place heap's top on the 4Gb boundary
-        base = (NarrowOopHeapMax - heap_size);
+        base = (UnscaledOopHeapMax - heap_size);
       } else {
         // Can't reserve with NarrowOopShift == 0
         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
+
         if (mode == UnscaledNarrowOop ||
-            mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) {
+            mode == ZeroBasedNarrowOop && total_size <= UnscaledOopHeapMax) {
+
           // Use zero based compressed oops with encoding and
           // place heap's top on the 32Gb boundary in case
           // total_size > 4Gb or failed to reserve below 4Gb.
-          base = (OopEncodingHeapMax - heap_size);
+          uint64_t heap_top = OopEncodingHeapMax;
+
+          // For small heaps, save some space for compressed class pointer
+          // space so it can be decoded with no base.
+          if (UseCompressedClassPointers && !UseSharedSpaces &&
+              OopEncodingHeapMax <= 32*G) {
+
+            uint64_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
+            assert(is_size_aligned((size_t)OopEncodingHeapMax-class_space,
+                   alignment), "difference must be aligned too");
+            uint64_t new_top = OopEncodingHeapMax-class_space;
+
+            if (total_size <= new_top) {
+              heap_top = new_top;
+            }
+          }
+
+          // Align base to the adjusted top of the heap
+          base = heap_top - heap_size;
         }
       }
     } else {
@@ -737,7 +757,7 @@
       // Set to a non-NULL value so the ReservedSpace ctor computes
       // the correct no-access prefix.
       // The final value will be set in initialize_heap() below.
-      Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
+      Universe::set_narrow_oop_base((address)UnscaledOopHeapMax);
 #ifdef _WIN64
       if (UseLargePages) {
         // Cannot allocate guard pages for implicit checks in indexed
@@ -833,7 +853,7 @@
         Universe::set_narrow_oop_use_implicit_null_checks(true);
       }
 #endif //  _WIN64
-      if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) {
+      if((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
         // Can't reserve heap below 4Gb.
         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
       } else {
@@ -1029,7 +1049,7 @@
     Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false);
     java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg());
 
-    msg = java_lang_String::create_from_str("Metadata space", CHECK_false);
+    msg = java_lang_String::create_from_str("Metaspace", CHECK_false);
     java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
     msg = java_lang_String::create_from_str("Compressed class space", CHECK_false);
     java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
--- a/src/share/vm/oops/instanceKlass.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/oops/instanceKlass.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -2393,15 +2393,38 @@
 
 
 const char* InstanceKlass::signature_name() const {
+  int hash_len = 0;
+  char hash_buf[40];
+
+  // If this is an anonymous class, append a hash to make the name unique
+  if (is_anonymous()) {
+    assert(EnableInvokeDynamic, "EnableInvokeDynamic was not set.");
+    intptr_t hash = (java_mirror() != NULL) ? java_mirror()->identity_hash() : 0;
+    sprintf(hash_buf, "/" UINTX_FORMAT, (uintx)hash);
+    hash_len = (int)strlen(hash_buf);
+  }
+
+  // Get the internal name as a c string
   const char* src = (const char*) (name()->as_C_string());
   const int src_length = (int)strlen(src);
-  char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3);
-  int src_index = 0;
+
+  char* dest = NEW_RESOURCE_ARRAY(char, src_length + hash_len + 3);
+
+  // Add L as type indicator
   int dest_index = 0;
   dest[dest_index++] = 'L';
-  while (src_index < src_length) {
+
+  // Add the actual class name
+  for (int src_index = 0; src_index < src_length; ) {
     dest[dest_index++] = src[src_index++];
   }
+
+  // If we have a hash, append it
+  for (int hash_index = 0; hash_index < hash_len; ) {
+    dest[dest_index++] = hash_buf[hash_index++];
+  }
+
+  // Add the semicolon and the NULL
   dest[dest_index++] = ';';
   dest[dest_index] = '\0';
   return dest;
--- a/src/share/vm/oops/method.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/oops/method.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -1515,7 +1515,10 @@
       return bp->orig_bytecode();
     }
   }
-  ShouldNotReachHere();
+  {
+    ResourceMark rm;
+    fatal(err_msg("no original bytecode found in %s at bci %d", name_and_sig_as_C_string(), bci));
+  }
   return Bytecodes::_shouldnotreachhere;
 }
 
--- a/src/share/vm/opto/library_call.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/opto/library_call.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -2006,9 +2006,9 @@
   Node* arg2 = NULL;
 
   if (is_increment) {
-      arg2 = intcon(1);
+    arg2 = intcon(1);
   } else {
-      arg2 = argument(1);
+    arg2 = argument(1);
   }
 
   Node* add = _gvn.transform( new(C) AddExactINode(NULL, arg1, arg2) );
@@ -2056,7 +2056,7 @@
   if (is_decrement) {
     arg2 = longcon(1);
   } else {
-    Node* arg2 = argument(2); // type long
+    arg2 = argument(2); // type long
     // argument(3) == TOP
   }
 
--- a/src/share/vm/opto/loopTransform.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/opto/loopTransform.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -713,6 +713,10 @@
       case Op_ModL: body_size += 30; break;
       case Op_DivL: body_size += 30; break;
       case Op_MulL: body_size += 10; break;
+      case Op_FlagsProj:
+        // Can't handle unrolling of loops containing
+        // nodes that generate a FlagsProj at the moment
+        return false;
       case Op_StrComp:
       case Op_StrEquals:
       case Op_StrIndexOf:
--- a/src/share/vm/opto/postaloc.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/opto/postaloc.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -97,7 +97,8 @@
 static bool expected_yanked_node(Node *old, Node *orig_old) {
   // This code is expected only next original nodes:
   // - load from constant table node which may have next data input nodes:
-  //     MachConstantBase, Phi, MachTemp, MachSpillCopy
+  //     MachConstantBase, MachTemp, MachSpillCopy
+  // - Phi nodes that are considered Junk
   // - load constant node which may have next data input nodes:
   //     MachTemp, MachSpillCopy
   // - MachSpillCopy
@@ -112,7 +113,9 @@
     return (old == orig_old);
   } else if (old->is_MachTemp()) {
     return orig_old->is_Con();
-  } else if (old->is_Phi() || old->is_MachConstantBase()) {
+  } else if (old->is_Phi()) { // Junk phi's
+    return true;
+  } else if (old->is_MachConstantBase()) {
     return (orig_old->is_Con() && orig_old->is_MachConstant());
   }
   return false;
@@ -522,11 +525,9 @@
           u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input
       }
       if (u != NodeSentinel) {    // Junk Phi.  Remove
-        block->remove_node(j--);
+        phi->replace_by(u);
+        j -= yank_if_dead(phi, block, &value, &regnd);
         phi_dex--;
-        _cfg.unmap_node_from_block(phi);
-        phi->replace_by(u);
-        phi->disconnect_inputs(NULL, C);
         continue;
       }
       // Note that if value[pidx] exists, then we merged no new values here
--- a/src/share/vm/opto/type.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/opto/type.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -2787,13 +2787,11 @@
 
 //-----------------------------filter------------------------------------------
 // Do not allow interface-vs.-noninterface joins to collapse to top.
-const Type *TypeOopPtr::filter( const Type *kills ) const {
+const Type *TypeOopPtr::filter(const Type *kills) const {
 
   const Type* ft = join(kills);
   const TypeInstPtr* ftip = ft->isa_instptr();
   const TypeInstPtr* ktip = kills->isa_instptr();
-  const TypeKlassPtr* ftkp = ft->isa_klassptr();
-  const TypeKlassPtr* ktkp = kills->isa_klassptr();
 
   if (ft->empty()) {
     // Check for evil case of 'this' being a class and 'kills' expecting an
@@ -2807,8 +2805,6 @@
     // uplift the type.
     if (!empty() && ktip != NULL && ktip->is_loaded() && ktip->klass()->is_interface())
       return kills;             // Uplift to interface
-    if (!empty() && ktkp != NULL && ktkp->klass()->is_loaded() && ktkp->klass()->is_interface())
-      return kills;             // Uplift to interface
 
     return Type::TOP;           // Canonical empty value
   }
@@ -2825,14 +2821,6 @@
     assert(!ftip->klass_is_exact(), "interface could not be exact");
     return ktip->cast_to_ptr_type(ftip->ptr());
   }
-  // Interface klass type could be exact in opposite to interface type,
-  // return it here instead of incorrect Constant ptr J/L/Object (6894807).
-  if (ftkp != NULL && ktkp != NULL &&
-      ftkp->is_loaded() &&  ftkp->klass()->is_interface() &&
-      !ftkp->klass_is_exact() && // Keep exact interface klass
-      ktkp->is_loaded() && !ktkp->klass()->is_interface()) {
-    return ktkp->cast_to_ptr_type(ftkp->ptr());
-  }
 
   return ft;
 }
@@ -4385,6 +4373,33 @@
   return (_offset == 0) && !below_centerline(_ptr);
 }
 
+// Do not allow interface-vs.-noninterface joins to collapse to top.
+const Type *TypeKlassPtr::filter(const Type *kills) const {
+  // logic here mirrors the one from TypeOopPtr::filter. See comments
+  // there.
+  const Type* ft = join(kills);
+  const TypeKlassPtr* ftkp = ft->isa_klassptr();
+  const TypeKlassPtr* ktkp = kills->isa_klassptr();
+
+  if (ft->empty()) {
+    if (!empty() && ktkp != NULL && ktkp->klass()->is_loaded() && ktkp->klass()->is_interface())
+      return kills;             // Uplift to interface
+
+    return Type::TOP;           // Canonical empty value
+  }
+
+  // Interface klass type could be exact in opposite to interface type,
+  // return it here instead of incorrect Constant ptr J/L/Object (6894807).
+  if (ftkp != NULL && ktkp != NULL &&
+      ftkp->is_loaded() &&  ftkp->klass()->is_interface() &&
+      !ftkp->klass_is_exact() && // Keep exact interface klass
+      ktkp->is_loaded() && !ktkp->klass()->is_interface()) {
+    return ktkp->cast_to_ptr_type(ftkp->ptr());
+  }
+
+  return ft;
+}
+
 //----------------------compute_klass------------------------------------------
 // Compute the defining klass for this class
 ciKlass* TypeAryPtr::compute_klass(DEBUG_ONLY(bool verify)) const {
--- a/src/share/vm/opto/type.hpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/opto/type.hpp	Tue Nov 05 14:06:23 2013 -0800
@@ -63,7 +63,7 @@
 class     TypeOopPtr;
 class       TypeInstPtr;
 class       TypeAryPtr;
-class       TypeKlassPtr;
+class     TypeKlassPtr;
 class     TypeMetadataPtr;
 
 //------------------------------Type-------------------------------------------
@@ -1202,6 +1202,9 @@
 
   virtual intptr_t get_con() const;
 
+  // Do not allow interface-vs.-noninterface joins to collapse to top.
+  virtual const Type *filter( const Type *kills ) const;
+
   // Convenience common pre-built types.
   static const TypeKlassPtr* OBJECT; // Not-null object klass or below
   static const TypeKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same
--- a/src/share/vm/prims/jvmtiGetLoadedClasses.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/prims/jvmtiGetLoadedClasses.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -29,8 +29,43 @@
 #include "runtime/thread.hpp"
 
 
+// The closure for GetLoadedClasses
+class LoadedClassesClosure : public KlassClosure {
+private:
+  Stack<jclass, mtInternal> _classStack;
+  JvmtiEnv* _env;
 
-// The closure for GetLoadedClasses and GetClassLoaderClasses
+public:
+  LoadedClassesClosure(JvmtiEnv* env) {
+    _env = env;
+  }
+
+  void do_klass(Klass* k) {
+    // Collect all jclasses
+    _classStack.push((jclass) _env->jni_reference(k->java_mirror()));
+  }
+
+  int extract(jclass* result_list) {
+    // The size of the Stack will be 0 after extract, so get it here
+    int count = (int)_classStack.size();
+    int i = count;
+
+    // Pop all jclasses, fill backwards
+    while (!_classStack.is_empty()) {
+      result_list[--i] = _classStack.pop();
+    }
+
+    // Return the number of elements written
+    return count;
+  }
+
+  // Return current size of the Stack
+  int get_count() {
+    return (int)_classStack.size();
+  }
+};
+
+// The closure for GetClassLoaderClasses
 class JvmtiGetLoadedClassesClosure : public StackObj {
   // Since the SystemDictionary::classes_do callback
   // doesn't pass a closureData pointer,
@@ -165,19 +200,6 @@
     }
   }
 
-  // Finally, the static methods that are the callbacks
-  static void increment(Klass* k) {
-    JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this();
-    if (that->get_initiatingLoader() == NULL) {
-      for (Klass* l = k; l != NULL; l = l->array_klass_or_null()) {
-        that->set_count(that->get_count() + 1);
-      }
-    } else if (k != NULL) {
-      // if initiating loader not null, just include the instance with 1 dimension
-      that->set_count(that->get_count() + 1);
-    }
-  }
-
   static void increment_with_loader(Klass* k, ClassLoaderData* loader_data) {
     JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this();
     oop class_loader = loader_data->class_loader();
@@ -196,24 +218,6 @@
     }
   }
 
-  static void add(Klass* k) {
-    JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this();
-    if (that->available()) {
-      if (that->get_initiatingLoader() == NULL) {
-        for (Klass* l = k; l != NULL; l = l->array_klass_or_null()) {
-          oop mirror = l->java_mirror();
-          that->set_element(that->get_index(), mirror);
-          that->set_index(that->get_index() + 1);
-        }
-      } else if (k != NULL) {
-        // if initiating loader not null, just include the instance with 1 dimension
-        oop mirror = k->java_mirror();
-        that->set_element(that->get_index(), mirror);
-        that->set_index(that->get_index() + 1);
-      }
-    }
-  }
-
   static void add_with_loader(Klass* k, ClassLoaderData* loader_data) {
     JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this();
     if (that->available()) {
@@ -255,39 +259,30 @@
 
 jvmtiError
 JvmtiGetLoadedClasses::getLoadedClasses(JvmtiEnv *env, jint* classCountPtr, jclass** classesPtr) {
-  // Since SystemDictionary::classes_do only takes a function pointer
-  // and doesn't call back with a closure data pointer,
-  // we can only pass static methods.
 
-  JvmtiGetLoadedClassesClosure closure;
+  LoadedClassesClosure closure(env);
   {
     // To get a consistent list of classes we need MultiArray_lock to ensure
-    // array classes aren't created, and SystemDictionary_lock to ensure that
-    // classes aren't added to the system dictionary,
+    // array classes aren't created.
     MutexLocker ma(MultiArray_lock);
-    MutexLocker sd(SystemDictionary_lock);
+
+    // Iterate through all classes in ClassLoaderDataGraph
+    // and collect them using the LoadedClassesClosure
+    ClassLoaderDataGraph::loaded_classes_do(&closure);
+  }
 
-    // First, count the classes
-    SystemDictionary::classes_do(&JvmtiGetLoadedClassesClosure::increment);
-    Universe::basic_type_classes_do(&JvmtiGetLoadedClassesClosure::increment);
-    // Next, fill in the classes
-    closure.allocate();
-    SystemDictionary::classes_do(&JvmtiGetLoadedClassesClosure::add);
-    Universe::basic_type_classes_do(&JvmtiGetLoadedClassesClosure::add);
-    // Drop the SystemDictionary_lock, so the results could be wrong from here,
-    // but we still have a snapshot.
+  // Return results by extracting the collected contents into a list
+  // allocated via JvmtiEnv
+  jclass* result_list;
+  jvmtiError error = env->Allocate(closure.get_count() * sizeof(jclass),
+                               (unsigned char**)&result_list);
+
+  if (error == JVMTI_ERROR_NONE) {
+    int count = closure.extract(result_list);
+    *classCountPtr = count;
+    *classesPtr = result_list;
   }
-  // Post results
-  jclass* result_list;
-  jvmtiError err = env->Allocate(closure.get_count() * sizeof(jclass),
-                                 (unsigned char**)&result_list);
-  if (err != JVMTI_ERROR_NONE) {
-    return err;
-  }
-  closure.extract(env, result_list);
-  *classCountPtr = closure.get_count();
-  *classesPtr = result_list;
-  return JVMTI_ERROR_NONE;
+  return error;
 }
 
 jvmtiError
--- a/src/share/vm/runtime/arguments.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -1988,6 +1988,15 @@
     warning("DefaultMaxRAMFraction is deprecated and will likely be removed in a future release. "
         "Use MaxRAMFraction instead.");
   }
+  if (FLAG_IS_CMDLINE(UseCMSCompactAtFullCollection)) {
+    warning("UseCMSCompactAtFullCollection is deprecated and will likely be removed in a future release.");
+  }
+  if (FLAG_IS_CMDLINE(CMSFullGCsBeforeCompaction)) {
+    warning("CMSFullGCsBeforeCompaction is deprecated and will likely be removed in a future release.");
+  }
+  if (FLAG_IS_CMDLINE(UseCMSCollectionPassing)) {
+    warning("UseCMSCollectionPassing is deprecated and will likely be removed in a future release.");
+  }
 }
 
 // Check stack pages settings
--- a/src/share/vm/runtime/handles.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/runtime/handles.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -45,7 +45,7 @@
 oop* HandleArea::allocate_handle(oop obj) {
   assert(_handle_mark_nesting > 1, "memory leak: allocating handle outside HandleMark");
   assert(_no_handle_mark_nesting == 0, "allocating handle inside NoHandleMark");
-  assert(obj->is_oop(), "sanity check");
+  assert(obj->is_oop(), err_msg("not an oop: " INTPTR_FORMAT, (intptr_t*) obj));
   return real_allocate_handle(obj);
 }
 
--- a/src/share/vm/runtime/thread.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/runtime/thread.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -1097,7 +1097,7 @@
 // General purpose hook into Java code, run once when the VM is initialized.
 // The Java library method itself may be changed independently from the VM.
 static void call_postVMInitHook(TRAPS) {
-  Klass* k = SystemDictionary::PostVMInitHook_klass();
+  Klass* k = SystemDictionary::resolve_or_null(vmSymbols::sun_misc_PostVMInitHook(), THREAD);
   instanceKlassHandle klass (THREAD, k);
   if (klass.not_null()) {
     JavaValue result(T_VOID);
--- a/src/share/vm/services/jmm.h	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/services/jmm.h	Tue Nov 05 14:06:23 2013 -0800
@@ -78,6 +78,7 @@
   JMM_COMPILE_TOTAL_TIME_MS          = 8,    /* Total accumulated time spent in compilation */
   JMM_GC_TIME_MS                     = 9,    /* Total accumulated time spent in collection */
   JMM_GC_COUNT                       = 10,   /* Total number of collections */
+  JMM_JVM_UPTIME_MS                  = 11,   /* The JVM uptime in milliseconds */
 
   JMM_INTERNAL_ATTRIBUTE_INDEX       = 100,
   JMM_CLASS_LOADED_BYTES             = 101,  /* Number of bytes loaded instance classes */
--- a/src/share/vm/services/management.cpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/services/management.cpp	Tue Nov 05 14:06:23 2013 -0800
@@ -1032,6 +1032,9 @@
   case JMM_JVM_INIT_DONE_TIME_MS:
     return Management::vm_init_done_time();
 
+  case JMM_JVM_UPTIME_MS:
+    return Management::ticks_to_ms(os::elapsed_counter());
+
   case JMM_COMPILE_TOTAL_TIME_MS:
     return Management::ticks_to_ms(CompileBroker::total_compilation_ticks());
 
--- a/src/share/vm/utilities/globalDefinitions.hpp	Thu Oct 31 16:31:31 2013 -0700
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Tue Nov 05 14:06:23 2013 -0800
@@ -368,8 +368,6 @@
 // Klass encoding metaspace max size
 const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlassAlignmentInBytes;
 
-const jlong CompressedKlassPointersBase = NOT_LP64(0) LP64_ONLY(CONST64(0x800000000));  // 32*G
-
 // Machine dependent stuff
 
 #ifdef TARGET_ARCH_x86
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/inlining/InlineDefaultMethod.java	Tue Nov 05 14:06:23 2013 -0800
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026735
+ * @summary CHA in C1 should make correct decisions about default methods
+ * @run main/othervm -Xcomp -XX:CompileOnly=InlineDefaultMethod::test -XX:TieredStopAtLevel=1 InlineDefaultMethod
+ */
+
+
+interface InterfaceWithDefaultMethod0 {
+    default public int defaultMethod() {
+        return 1;
+    }
+}
+
+interface InterfaceWithDefaultMethod1 extends InterfaceWithDefaultMethod0 { }
+
+abstract class Subtype implements InterfaceWithDefaultMethod1 { }
+
+class Decoy extends Subtype {
+    public int defaultMethod() {
+        return 2;
+    }
+}
+
+class Instance extends Subtype { }
+
+public class InlineDefaultMethod {
+    public static int test(InterfaceWithDefaultMethod1 x) {
+        return x.defaultMethod();
+    }
+    public static void main(String[] args) {
+        InterfaceWithDefaultMethod1 a = new Decoy();
+        InterfaceWithDefaultMethod1 b = new Instance();
+        if (test(a) != 2 ||
+            test(b) != 1) {
+          System.err.println("FAILED");
+          System.exit(97);
+        }
+        System.err.println("PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/NestedMathExactTest.java	Tue Nov 05 14:06:23 2013 -0800
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8027444
+ * @summary Test nested loops
+ * @compile NestedMathExactTest.java
+ * @run main NestedMathExactTest
+ *
+ */
+
+public class NestedMathExactTest {
+    public static final int LIMIT = 100;
+    public static int[] result = new int[LIMIT];
+    public static int value = 17;
+
+    public static void main(String[] args) {
+        for (int i = 0; i < 100; ++i) {
+            result[i] = runTest();
+        }
+    }
+
+    public static int runTest() {
+        int sum = 0;
+        for (int j = 0; j < 100000; j = Math.addExact(j, 1)) {
+            sum = 1;
+            for (int i = 0; i < 5; ++i) {
+                sum *= value;
+            }
+        }
+        return sum;
+    }
+}
--- a/test/compiler/intrinsics/mathexact/SubExactLConstantTest.java	Thu Oct 31 16:31:31 2013 -0700
+++ b/test/compiler/intrinsics/mathexact/SubExactLConstantTest.java	Tue Nov 05 14:06:23 2013 -0800
@@ -24,6 +24,7 @@
 /*
  * @test
  * @bug 8026844
+ * @bug 8027353
  * @summary Test constant subtractExact
  * @compile SubExactLConstantTest.java Verify.java
  * @run main SubExactLConstantTest
--- a/test/compiler/intrinsics/mathexact/SubExactLNonConstantTest.java	Thu Oct 31 16:31:31 2013 -0700
+++ b/test/compiler/intrinsics/mathexact/SubExactLNonConstantTest.java	Tue Nov 05 14:06:23 2013 -0800
@@ -24,6 +24,7 @@
 /*
  * @test
  * @bug 8026844
+ * @bug 8027353
  * @summary Test non constant subtractExact
  * @compile SubExactLNonConstantTest.java Verify.java
  * @run main SubExactLNonConstantTest
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/startup/StartupOutput.java	Tue Nov 05 14:06:23 2013 -0800
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026949
+ * @summary Test ensures correct VM output during startup
+ * @library ../../testlibrary
+ *
+ */
+import com.oracle.java.testlibrary.*;
+
+public class StartupOutput {
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb;
+    OutputAnalyzer out;
+
+    pb = ProcessTools.createJavaProcessBuilder("-Xint", "-XX:+DisplayVMOutputToStdout", "-version");
+    out = new OutputAnalyzer(pb.start());
+    out.shouldNotContain("no space to run compilers");
+
+    out.shouldHaveExitValue(0);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/startup_warnings/TestCMSForegroundFlags.java	Tue Nov 05 14:06:23 2013 -0800
@@ -0,0 +1,52 @@
+/*
+* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+* @test TestCMSForegroundFlags
+* @key gc
+* @bug 8027132
+* @summary Test that the deprecated CMS foreground collector flags print warning messages
+* @library /testlibrary
+* @run main TestCMSForegroundFlags -XX:-UseCMSCompactAtFullCollection UseCMSCompactAtFullCollection
+* @run main TestCMSForegroundFlags -XX:CMSFullGCsBeforeCompaction=4 CMSFullGCsBeforeCompaction
+* @run main TestCMSForegroundFlags -XX:-UseCMSCollectionPassing UseCMSCollectionPassing
+*/
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+public class TestCMSForegroundFlags {
+  public static void main(String[] args) throws Exception {
+    if (args.length != 2) {
+      throw new Exception("Expected two arguments,flagValue and flagName");
+    }
+    String flagValue = args[0];
+    String flagName = args[1];
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(flagValue, "-version");
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    output.shouldContain("warning: " + flagName + " is deprecated and will likely be removed in a future release.");
+    output.shouldNotContain("error");
+    output.shouldHaveExitValue(0);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/CompressedOops/CompressedClassPointers.java	Tue Nov 05 14:06:23 2013 -0800
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8024927
+ * @summary Testing address of compressed class pointer space as best as possible.
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class CompressedClassPointers {
+
+    public static void smallHeapTest() throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:SharedBaseAddress=8g",
+            "-Xmx128m",
+            "-XX:+PrintCompressedOopsMode",
+            "-XX:+VerifyBeforeGC", "-version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Narrow klass base: 0x0000000000000000");
+        output.shouldHaveExitValue(0);
+    }
+
+    public static void smallHeapTestWith3G() throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:CompressedClassSpaceSize=3g",
+            "-Xmx128m",
+            "-XX:+PrintCompressedOopsMode",
+            "-XX:+VerifyBeforeGC", "-version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Narrow klass base: 0x0000000000000000, Narrow klass shift: 3");
+        output.shouldHaveExitValue(0);
+    }
+
+    public static void largeHeapTest() throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-Xmx30g",
+            "-XX:+PrintCompressedOopsMode",
+            "-XX:+VerifyBeforeGC", "-version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("Narrow klass base: 0x0000000000000000");
+        output.shouldContain("Narrow klass shift: 0");
+        output.shouldHaveExitValue(0);
+    }
+
+    public static void largePagesTest() throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-Xmx128m",
+            "-XX:+UseLargePages",
+            "-XX:+PrintCompressedOopsMode",
+            "-XX:+VerifyBeforeGC", "-version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Narrow klass base:");
+        output.shouldHaveExitValue(0);
+    }
+
+    public static void sharingTest() throws Exception {
+        // Test small heaps
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:SharedArchiveFile=./sample.jsa",
+            "-Xmx128m",
+            "-XX:SharedBaseAddress=8g",
+            "-XX:+PrintCompressedOopsMode",
+            "-XX:+VerifyBeforeGC",
+            "-Xshare:dump");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        try {
+          output.shouldContain("Loading classes to share");
+          output.shouldHaveExitValue(0);
+
+          pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:SharedArchiveFile=./sample.jsa",
+            "-Xmx128m",
+            "-XX:SharedBaseAddress=8g",
+            "-XX:+PrintCompressedOopsMode",
+            "-Xshare:on",
+            "-version");
+          output = new OutputAnalyzer(pb.start());
+          output.shouldContain("sharing");
+          output.shouldHaveExitValue(0);
+
+        } catch (RuntimeException e) {
+          output.shouldContain("Unable to use shared archive");
+          output.shouldHaveExitValue(1);
+        }
+    }
+
+  public static void main(String[] args) throws Exception {
+      if (!Platform.is64bit()) {
+          // Can't test this on 32 bit, just pass
+          System.out.println("Skipping test on 32bit");
+          return;
+      }
+      // Solaris 10 can't mmap compressed oops space without a base
+      if (Platform.isSolaris()) {
+           String name = System.getProperty("os.version");
+           if (name.equals("5.10")) {
+               System.out.println("Skipping test on Solaris 10");
+               return;
+           }
+      }
+      smallHeapTest();
+      smallHeapTestWith3G();
+      largeHeapTest();
+      largePagesTest();
+      sharingTest();
+  }
+}