changeset 20711:f8fc5cbe082c hs25.40-b23

Merge
author amurillo
date Fri, 12 Dec 2014 10:31:00 -0800
parents f25953cb4e18 (current diff) c5e86c5cd22e (diff)
children adc3127d2e09
files
diffstat 44 files changed, 1977 insertions(+), 315 deletions(-) [+]
line wrap: on
line diff
--- a/make/hotspot_version	Wed Dec 10 14:35:48 2014 -0800
+++ b/make/hotspot_version	Fri Dec 12 10:31:00 2014 -0800
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=40
-HS_BUILD_NUMBER=22
+HS_BUILD_NUMBER=23
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -545,6 +545,9 @@
   cmplw(CCR0, Rindex, Rlength);
   sldi(RsxtIndex, RsxtIndex, index_shift);
   blt(CCR0, LnotOOR);
+  // Index should be in R17_tos, array should be in R4_ARG2.
+  mr(R17_tos, Rindex);
+  mr(R4_ARG2, Rarray);
   load_dispatch_table(Rtmp, (address*)Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
   mtctr(Rtmp);
   bctr();
@@ -1679,6 +1682,228 @@
   }
 }
 
+// Argument and return type profilig.
+// kills: tmp, tmp2, R0, CR0, CR1
+void InterpreterMacroAssembler::profile_obj_type(Register obj, Register mdo_addr_base,
+                                                 RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2) {
+  Label do_nothing, do_update;
+
+  // tmp2 = obj is allowed
+  assert_different_registers(obj, mdo_addr_base, tmp, R0);
+  assert_different_registers(tmp2, mdo_addr_base, tmp, R0);
+  const Register klass = tmp2;
+
+  verify_oop(obj);
+
+  ld(tmp, mdo_addr_offs, mdo_addr_base);
+
+  // Set null_seen if obj is 0.
+  cmpdi(CCR0, obj, 0);
+  ori(R0, tmp, TypeEntries::null_seen);
+  beq(CCR0, do_update);
+
+  load_klass(klass, obj);
+
+  clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
+  // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
+  cmpd(CCR1, R0, klass);
+  // Klass seen before, nothing to do (regardless of unknown bit).
+  //beq(CCR1, do_nothing);
+
+  andi_(R0, klass, TypeEntries::type_unknown);
+  // Already unknown. Nothing to do anymore.
+  //bne(CCR0, do_nothing);
+  crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2); // cr0 eq = cr1 eq or cr0 ne
+  beq(CCR0, do_nothing);
+
+  clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
+  orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
+  beq(CCR0, do_update); // First time here. Set profile type.
+
+  // Different than before. Cannot keep accurate profile.
+  ori(R0, tmp, TypeEntries::type_unknown);
+
+  bind(do_update);
+  // update profile
+  std(R0, mdo_addr_offs, mdo_addr_base);
+
+  align(32, 12);
+  bind(do_nothing);
+}
+
+void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) {
+  if (!ProfileInterpreter) {
+    return;
+  }
+
+  assert_different_registers(callee, tmp1, tmp2, R28_mdx);
+
+  if (MethodData::profile_arguments() || MethodData::profile_return()) {
+    Label profile_continue;
+
+    test_method_data_pointer(profile_continue);
+
+    int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
+
+    lbz(tmp1, in_bytes(DataLayout::tag_offset()) - off_to_start, R28_mdx);
+    cmpwi(CCR0, tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
+    bne(CCR0, profile_continue);
+
+    if (MethodData::profile_arguments()) {
+      Label done;
+      int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
+      add(R28_mdx, off_to_args, R28_mdx);
+
+      for (int i = 0; i < TypeProfileArgsLimit; i++) {
+        if (i > 0 || MethodData::profile_return()) {
+          // If return value type is profiled we may have no argument to profile.
+          ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
+          cmpdi(CCR0, tmp1, (i+1)*TypeStackSlotEntries::per_arg_count());
+          addi(tmp1, tmp1, -i*TypeStackSlotEntries::per_arg_count());
+          blt(CCR0, done);
+        }
+        ld(tmp1, in_bytes(Method::const_offset()), callee);
+        lhz(tmp1, in_bytes(ConstMethod::size_of_parameters_offset()), tmp1);
+        // Stack offset o (zero based) from the start of the argument
+        // list, for n arguments translates into offset n - o - 1 from
+        // the end of the argument list. But there's an extra slot at
+        // the top of the stack. So the offset is n - o from Lesp.
+        ld(tmp2, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, R28_mdx);
+        subf(tmp1, tmp2, tmp1);
+
+        sldi(tmp1, tmp1, Interpreter::logStackElementSize);
+        ldx(tmp1, tmp1, R15_esp);
+
+        profile_obj_type(tmp1, R28_mdx, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args, tmp2, tmp1);
+
+        int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
+        addi(R28_mdx, R28_mdx, to_add);
+        off_to_args += to_add;
+      }
+
+      if (MethodData::profile_return()) {
+        ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
+        addi(tmp1, tmp1, -TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
+      }
+
+      bind(done);
+
+      if (MethodData::profile_return()) {
+        // We're right after the type profile for the last
+        // argument. tmp1 is the number of cells left in the
+        // CallTypeData/VirtualCallTypeData to reach its end. Non null
+        // if there's a return to profile.
+        assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
+        sldi(tmp1, tmp1, exact_log2(DataLayout::cell_size));
+        add(R28_mdx, tmp1, R28_mdx);
+      }
+    } else {
+      assert(MethodData::profile_return(), "either profile call args or call ret");
+      update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size()));
+    }
+
+    // Mdp points right after the end of the
+    // CallTypeData/VirtualCallTypeData, right after the cells for the
+    // return value type if there's one.
+    align(32, 12);
+    bind(profile_continue);
+  }
+}
+
+void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) {
+  assert_different_registers(ret, tmp1, tmp2);
+  if (ProfileInterpreter && MethodData::profile_return()) {
+    Label profile_continue;
+
+    test_method_data_pointer(profile_continue);
+
+    if (MethodData::profile_return_jsr292_only()) {
+      // If we don't profile all invoke bytecodes we must make sure
+      // it's a bytecode we indeed profile. We can't go back to the
+      // begining of the ProfileData we intend to update to check its
+      // type because we're right after it and we don't known its
+      // length.
+      lbz(tmp1, 0, R14_bcp);
+      lbz(tmp2, Method::intrinsic_id_offset_in_bytes(), R19_method);
+      cmpwi(CCR0, tmp1, Bytecodes::_invokedynamic);
+      cmpwi(CCR1, tmp1, Bytecodes::_invokehandle);
+      cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
+      cmpwi(CCR1, tmp2, vmIntrinsics::_compiledLambdaForm);
+      cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
+      bne(CCR0, profile_continue);
+    }
+
+    profile_obj_type(ret, R28_mdx, -in_bytes(ReturnTypeEntry::size()), tmp1, tmp2);
+
+    align(32, 12);
+    bind(profile_continue);
+  }
+}
+
+void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
+  if (ProfileInterpreter && MethodData::profile_parameters()) {
+    Label profile_continue, done;
+
+    test_method_data_pointer(profile_continue);
+
+    // Load the offset of the area within the MDO used for
+    // parameters. If it's negative we're not profiling any parameters.
+    lwz(tmp1, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), R28_mdx);
+    cmpwi(CCR0, tmp1, 0);
+    blt(CCR0, profile_continue);
+
+    // Compute a pointer to the area for parameters from the offset
+    // and move the pointer to the slot for the last
+    // parameters. Collect profiling from last parameter down.
+    // mdo start + parameters offset + array length - 1
+
+    // Pointer to the parameter area in the MDO.
+    const Register mdp = tmp1;
+    add(mdp, tmp1, R28_mdx);
+
+    // Pffset of the current profile entry to update.
+    const Register entry_offset = tmp2;
+    // entry_offset = array len in number of cells
+    ld(entry_offset, in_bytes(ArrayData::array_len_offset()), mdp);
+
+    int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
+    assert(off_base % DataLayout::cell_size == 0, "should be a number of cells");
+
+    // entry_offset (number of cells)  = array len - size of 1 entry + offset of the stack slot field
+    addi(entry_offset, entry_offset, -TypeStackSlotEntries::per_arg_count() + (off_base / DataLayout::cell_size));
+    // entry_offset in bytes
+    sldi(entry_offset, entry_offset, exact_log2(DataLayout::cell_size));
+
+    Label loop;
+    align(32, 12);
+    bind(loop);
+
+    // Load offset on the stack from the slot for this parameter.
+    ld(tmp3, entry_offset, mdp);
+    sldi(tmp3, tmp3, Interpreter::logStackElementSize);
+    neg(tmp3, tmp3);
+    // Read the parameter from the local area.
+    ldx(tmp3, tmp3, R18_locals);
+
+    // Make entry_offset now point to the type field for this parameter.
+    int type_base = in_bytes(ParametersTypeData::type_offset(0));
+    assert(type_base > off_base, "unexpected");
+    addi(entry_offset, entry_offset, type_base - off_base);
+
+    // Profile the parameter.
+    profile_obj_type(tmp3, mdp, entry_offset, tmp4, tmp3);
+
+    // Go to next parameter.
+    int delta = TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base);
+    cmpdi(CCR0, entry_offset, off_base + delta);
+    addi(entry_offset, entry_offset, -delta);
+    bge(CCR0, loop);
+
+    align(32, 12);
+    bind(profile_continue);
+  }
+}
+
 // Add a InterpMonitorElem to stack (see frame_sparc.hpp).
 void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2) {
 
@@ -2040,20 +2265,19 @@
   bne(CCR0, test);
 
   address fd = CAST_FROM_FN_PTR(address, verify_return_address);
-  unsigned int nbytes_save = 10*8; // 10 volatile gprs
-
-  save_LR_CR(Rtmp);
+  const int nbytes_save = 11*8; // volatile gprs except R0
+  save_volatile_gprs(R1_SP, -nbytes_save); // except R0
+  save_LR_CR(Rtmp); // Save in old frame.
   push_frame_reg_args(nbytes_save, Rtmp);
-  save_volatile_gprs(R1_SP, 112); // except R0
 
   load_const_optimized(Rtmp, fd, R0);
   mr_if_needed(R4_ARG2, reg);
   mr(R3_ARG1, R19_method);
   call_c(Rtmp); // call C
 
-  restore_volatile_gprs(R1_SP, 112); // except R0
   pop_frame();
   restore_LR_CR(Rtmp);
+  restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
   b(skip);
 
   // Perform a more elaborate out-of-line call.
--- a/src/cpu/ppc/vm/interp_masm_ppc_64.hpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/cpu/ppc/vm/interp_masm_ppc_64.hpp	Fri Dec 12 10:31:00 2014 -0800
@@ -255,6 +255,12 @@
   void record_klass_in_profile(Register receiver, Register scratch1, Register scratch2, bool is_virtual_call);
   void record_klass_in_profile_helper(Register receiver, Register scratch1, Register scratch2, int start_row, Label& done, bool is_virtual_call);
 
+  // Argument and return type profiling.
+  void profile_obj_type(Register obj, Register mdo_addr_base, RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2);
+  void profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual);
+  void profile_return_type(Register ret, Register tmp1, Register tmp2);
+  void profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4);
+
 #endif // !CC_INTERP
 
   // Debugging
--- a/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -806,6 +806,7 @@
 
 // For verify_oops.
 void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
+  std(R2,  offset, dst);   offset += 8;
   std(R3,  offset, dst);   offset += 8;
   std(R4,  offset, dst);   offset += 8;
   std(R5,  offset, dst);   offset += 8;
@@ -820,6 +821,7 @@
 
 // For verify_oops.
 void MacroAssembler::restore_volatile_gprs(Register src, int offset) {
+  ld(R2,  offset, src);   offset += 8;
   ld(R3,  offset, src);   offset += 8;
   ld(R4,  offset, src);   offset += 8;
   ld(R5,  offset, src);   offset += 8;
@@ -1186,6 +1188,16 @@
   call_VM(oop_result, entry_point, check_exceptions);
 }
 
+void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3,
+                             bool check_exceptions) {
+  // R3_ARG1 is reserved for the thread
+  mr_if_needed(R4_ARG2, arg_1);
+  assert(arg_2 != R4_ARG2, "smashed argument");
+  mr_if_needed(R5_ARG3, arg_2);
+  mr_if_needed(R6_ARG4, arg_3);
+  call_VM(oop_result, entry_point, check_exceptions);
+}
+
 void MacroAssembler::call_VM_leaf(address entry_point) {
   call_VM_leaf_base(entry_point);
 }
@@ -3058,35 +3070,27 @@
   if (!VerifyOops) {
     return;
   }
-  // Will be preserved.
-  Register tmp = R11;
-  assert(oop != tmp, "precondition");
-  unsigned int nbytes_save = 10*8; // 10 volatile gprs
+
   address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
-  // save tmp
-  mr(R0, tmp);
-  // kill tmp
-  save_LR_CR(tmp);
+  const Register tmp = R11; // Will be preserved.
+  const int nbytes_save = 11*8; // Volatile gprs except R0.
+  save_volatile_gprs(R1_SP, -nbytes_save); // except R0
+
+  if (oop == tmp) mr(R4_ARG2, oop);
+  save_LR_CR(tmp); // save in old frame
   push_frame_reg_args(nbytes_save, tmp);
-  // restore tmp
-  mr(tmp, R0);
-  save_volatile_gprs(R1_SP, 112); // except R0
   // load FunctionDescriptor** / entry_address *
-  load_const(tmp, fd);
+  load_const_optimized(tmp, fd, R0);
   // load FunctionDescriptor* / entry_address
   ld(tmp, 0, tmp);
-  mr(R4_ARG2, oop);
-  load_const(R3_ARG1, (address)msg);
-  // call destination for its side effect
+  if (oop != tmp) mr_if_needed(R4_ARG2, oop);
+  load_const_optimized(R3_ARG1, (address)msg, R0);
+  // Call destination for its side effect.
   call_c(tmp);
-  restore_volatile_gprs(R1_SP, 112); // except R0
+
   pop_frame();
-  // save tmp
-  mr(R0, tmp);
-  // kill tmp
   restore_LR_CR(tmp);
-  // restore tmp
-  mr(tmp, R0);
+  restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
 }
 
 const char* stop_types[] = {
--- a/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Fri Dec 12 10:31:00 2014 -0800
@@ -368,6 +368,7 @@
   void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
   void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
   void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
+  void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg3, bool check_exceptions = true);
   void call_VM_leaf(address entry_point);
   void call_VM_leaf(address entry_point, Register arg_1);
   void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
--- a/src/cpu/ppc/vm/nativeInst_ppc.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/cpu/ppc/vm/nativeInst_ppc.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -100,10 +100,7 @@
   MacroAssembler* a = new MacroAssembler(&cb);
 
   // Patch the call.
-  if (ReoptimizeCallSequences &&
-      a->is_within_range_of_b(dest, addr_call)) {
-    a->bl(dest);
-  } else {
+  if (!ReoptimizeCallSequences || !a->is_within_range_of_b(dest, addr_call)) {
     address trampoline_stub_addr = get_trampoline();
 
     // We did not find a trampoline stub because the current codeblob
@@ -115,9 +112,12 @@
 
     // Patch the constant in the call's trampoline stub.
     NativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
+    dest = trampoline_stub_addr;
+  }
 
-    a->bl(trampoline_stub_addr);
-  }
+  OrderAccess::release();
+  a->bl(dest);
+
   ICache::ppc64_flush_icache_bytes(addr_call, code_size);
 }
 
--- a/src/cpu/ppc/vm/ppc.ad	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/cpu/ppc/vm/ppc.ad	Fri Dec 12 10:31:00 2014 -0800
@@ -1938,8 +1938,9 @@
   // --------------------------------------------------------------------
   // Check for hi bits still needing moving. Only happens for misaligned
   // arguments to native calls.
-  if (src_hi == dst_hi)
+  if (src_hi == dst_hi) {
     return ppc64Opcode_none;               // Self copy; no move.
+  }
 
   ShouldNotReachHere();
   return ppc64Opcode_undefined;
@@ -1961,14 +1962,15 @@
 }
 
 uint MachNopNode::size(PhaseRegAlloc *ra_) const {
-   return _count * 4;
+  return _count * 4;
 }
 
 #ifndef PRODUCT
 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
-  int reg = ra_->get_reg_first(this);
-  st->print("ADDI %s, SP, %d \t// box node", Matcher::regName[reg], offset);
+  char reg_str[128];
+  ra_->dump_register(this, reg_str);
+  st->print("ADDI    %s, SP, %d \t// box node", reg_str, offset);
 }
 #endif
 
--- a/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -90,7 +90,7 @@
 
   // Thread will be loaded to R3_ARG1.
   // Target class oop is in register R5_ARG3 by convention!
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose, R17_tos, R5_ARG3));
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose), R17_tos, R5_ARG3);
   // Above call must not return here since exception pending.
   DEBUG_ONLY(__ should_not_reach_here();)
   return entry;
@@ -171,6 +171,10 @@
   // Compiled code destroys templateTableBase, reload.
   __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2);
 
+  if (state == atos) {
+    __ profile_return_type(R3_RET, R11_scratch1, R12_scratch2);
+  }
+
   const Register cache = R11_scratch1;
   const Register size  = R12_scratch2;
   __ get_cache_and_index_at_bcp(cache, 1, index_size);
@@ -1230,6 +1234,10 @@
       __ li(R0, 1);
       __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
     }
+
+    // Argument and return type profiling.
+    __ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4);
+
     // Increment invocation counter and check for overflow.
     if (inc_counter) {
       generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
@@ -1549,6 +1557,8 @@
     __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
     if (ProfileInterpreter) {
       __ set_method_data_pointer_for_bcp();
+      __ ld(R11_scratch1, 0, R1_SP);
+      __ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1);
     }
 #if INCLUDE_JVMTI
     Label L_done;
@@ -1560,13 +1570,11 @@
     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
     // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
     __ ld(R4_ARG2, 0, R18_locals);
-    __ call_VM(R11_scratch1, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),
-               R4_ARG2, R19_method, R14_bcp);
-
-    __ cmpdi(CCR0, R11_scratch1, 0);
+    __ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false);
+    __ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
+    __ cmpdi(CCR0, R4_ARG2, 0);
     __ beq(CCR0, L_done);
-
-    __ std(R11_scratch1, wordSize, R15_esp);
+    __ std(R4_ARG2, wordSize, R15_esp);
     __ bind(L_done);
 #endif // INCLUDE_JVMTI
     __ dispatch_next(vtos);
--- a/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -3234,6 +3234,8 @@
   // Load target.
   __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes());
   __ ldx(Rtarget_method, Rindex, Rrecv_klass);
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true);
   __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */);
 }
 
@@ -3317,6 +3319,8 @@
   __ null_check_throw(Rrecv, -1, Rscratch1);
 
   __ profile_final_call(Rrecv, Rscratch1);
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
 
   // Do the call.
   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2);
@@ -3338,6 +3342,8 @@
   __ null_check_throw(Rreceiver, -1, R11_scratch1);
 
   __ profile_call(R11_scratch1, R12_scratch2);
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false);
   __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2);
 }
 
@@ -3352,6 +3358,8 @@
   prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1);
 
   __ profile_call(R11_scratch1, R12_scratch2);
+  // Argument and return type profiling.
+  __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false);
   __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2);
 }
 
@@ -3373,6 +3381,8 @@
 
   // Final call case.
   __ profile_final_call(Rtemp1, Rscratch);
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true);
   // Do the final call - the index (f2) contains the method.
   __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */);
 
@@ -3424,6 +3434,8 @@
   __ cmpdi(CCR0, Rindex, 0);
   __ beq(CCR0, Lthrow_ame);
   // Found entry. Jump off!
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rindex, Rscratch1, Rscratch2, true);
   __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2);
 
   // Vtable entry was NULL => Throw abstract method error.
@@ -3477,6 +3489,8 @@
   // to be the callsite object the bootstrap method returned. This is passed to a
   // "link" method which does the dispatch (Most likely just grabs the MH stored
   // inside the callsite and does an invokehandle).
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false);
   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
 }
 
@@ -3503,6 +3517,8 @@
   __ profile_final_call(Rrecv, Rscratch1);
 
   // Still no call from handle => We call the method handle interpreter here.
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
 }
 
--- a/src/cpu/ppc/vm/vm_version_ppc.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/cpu/ppc/vm/vm_version_ppc.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -139,13 +139,44 @@
   }
 
   assert(AllocatePrefetchLines > 0, "invalid value");
-  if (AllocatePrefetchLines < 1) // Set valid value in product VM.
+  if (AllocatePrefetchLines < 1) { // Set valid value in product VM.
     AllocatePrefetchLines = 1; // Conservative value.
+  }
 
-  if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size)
+  if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size) {
     AllocatePrefetchStyle = 1; // Fall back if inappropriate.
+  }
 
   assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
+
+  if (UseCRC32Intrinsics) {
+    if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
+      warning("CRC32 intrinsics  are not available on this CPU");
+    FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
+  }
+
+  // The AES intrinsic stubs require AES instruction support.
+  if (UseAES) {
+    warning("AES instructions are not available on this CPU");
+    FLAG_SET_DEFAULT(UseAES, false);
+  }
+  if (UseAESIntrinsics) {
+    if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
+      warning("AES intrinsics are not available on this CPU");
+    FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+  }
+
+  if (UseSHA) {
+    warning("SHA instructions are not available on this CPU");
+    FLAG_SET_DEFAULT(UseSHA, false);
+  }
+  if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) {
+    warning("SHA intrinsics are not available on this CPU");
+    FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
+    FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
+    FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
+  }
+
 }
 
 void VM_Version::print_features() {
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -675,7 +675,7 @@
   case handle_exception_nofpu_id:
   case handle_exception_id:
     // At this point all registers MAY be live.
-    oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id);
+    oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id);
     break;
   case handle_exception_from_callee_id: {
     // At this point all registers except exception oop (RAX) and
@@ -748,7 +748,7 @@
   case handle_exception_nofpu_id:
   case handle_exception_id:
     // Restore the registers that were saved at the beginning.
-    restore_live_registers(sasm, id == handle_exception_nofpu_id);
+    restore_live_registers(sasm, id != handle_exception_nofpu_id);
     break;
   case handle_exception_from_callee_id:
     // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
--- a/src/os/aix/vm/os_aix.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/os/aix/vm/os_aix.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -114,12 +114,6 @@
 }
 #endif
 
-// Excerpts from systemcfg.h definitions newer than AIX 5.3
-#ifndef PV_7
-# define PV_7 0x200000          // Power PC 7
-# define PV_7_Compat 0x208000   // Power PC 7
-#endif
-
 #define MAX_PATH (2 * K)
 
 // for timer info max values which include all bits
@@ -130,17 +124,40 @@
 #define ERROR_MP_VMGETINFO_FAILED                    102
 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 
-// the semantics in this file are thus that codeptr_t is a *real code ptr*
+// The semantics in this file are thus that codeptr_t is a *real code ptr*.
 // This means that any function taking codeptr_t as arguments will assume
 // a real codeptr and won't handle function descriptors (eg getFuncName),
 // whereas functions taking address as args will deal with function
-// descriptors (eg os::dll_address_to_library_name)
+// descriptors (eg os::dll_address_to_library_name).
 typedef unsigned int* codeptr_t;
 
-// typedefs for stackslots, stack pointers, pointers to op codes
+// Typedefs for stackslots, stack pointers, pointers to op codes.
 typedef unsigned long stackslot_t;
 typedef stackslot_t* stackptr_t;
 
+// Excerpts from systemcfg.h definitions newer than AIX 5.3.
+#ifndef PV_7
+#define PV_7 0x200000          /* Power PC 7 */
+#define PV_7_Compat 0x208000   /* Power PC 7 */
+#endif
+#ifndef PV_8
+#define PV_8 0x300000          /* Power PC 8 */
+#define PV_8_Compat 0x308000   /* Power PC 8 */
+#endif
+
+#define trcVerbose(fmt, ...) { /* PPC port */  \
+  if (Verbose) { \
+    fprintf(stderr, fmt, ##__VA_ARGS__); \
+    fputc('\n', stderr); fflush(stderr); \
+  } \
+}
+#define trc(fmt, ...)        /* PPC port */
+
+#define ERRBYE(s) { \
+    trcVerbose(s); \
+    return -1; \
+}
+
 // query dimensions of the stack of the calling thread
 static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 
@@ -172,12 +189,12 @@
   return true;
 }
 
-// macro to check a given stack pointer against given stack limits and to die if test fails
+// Macro to check a given stack pointer against given stack limits and to die if test fails.
 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 }
 
-// macro to check the current stack pointer against given stacklimits
+// Macro to check the current stack pointer against given stacklimits.
 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
   address sp; \
   sp = os::current_stack_pointer(); \
@@ -211,7 +228,7 @@
 static pid_t    _initial_pid       = 0;
 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 static sigset_t SR_sigset;
-static pthread_mutex_t dl_mutex;           // Used to protect dlsym() calls */
+static pthread_mutex_t dl_mutex;              // Used to protect dlsym() calls.
 
 julong os::available_memory() {
   return Aix::available_memory();
@@ -243,7 +260,6 @@
   return false;
 }
 
-
 // Return true if user is running as root.
 
 bool os::have_special_privileges() {
@@ -274,8 +290,7 @@
 
   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
-      //if (Verbose)
-      fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
+      trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
       return false;
     }
     p += maxDisclaimSize;
@@ -283,8 +298,7 @@
 
   if (lastDisclaimSize > 0) {
     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
-      //if (Verbose)
-        fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
+      trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
       return false;
     }
   }
@@ -324,11 +338,11 @@
 
 void os::Aix::initialize_system_info() {
 
-  // get the number of online(logical) cpus instead of configured
+  // Get the number of online(logical) cpus instead of configured.
   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
   assert(_processor_count > 0, "_processor_count must be > 0");
 
-  // retrieve total physical storage
+  // Retrieve total physical storage.
   os::Aix::meminfo_t mi;
   if (!os::Aix::get_meminfo(&mi)) {
     fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
@@ -503,7 +517,6 @@
 
 } // end os::Aix::query_multipage_support()
 
-// The code for this method was initially derived from the version in os_linux.cpp.
 void os::init_system_properties_values() {
 
 #define DEFAULT_LIBPATH "/usr/lib:/lib"
@@ -600,10 +613,11 @@
   sigaction(sig, (struct sigaction*)NULL, &oact);
   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
-  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
+  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
     return true;
-  else
+  } else {
     return false;
+  }
 }
 
 void os::Aix::signal_sets_init() {
@@ -777,6 +791,9 @@
 
   // get the processor version from _system_configuration
   switch (_system_configuration.version) {
+  case PV_8:
+    strcpy(pci->version, "Power PC 8");
+    break;
   case PV_7:
     strcpy(pci->version, "Power PC 7");
     break;
@@ -804,6 +821,9 @@
   case PV_7_Compat:
     strcpy(pci->version, "PV_7_Compat");
     break;
+  case PV_8_Compat:
+    strcpy(pci->version, "PV_8_Compat");
+    break;
   default:
     strcpy(pci->version, "unknown");
   }
@@ -939,7 +959,9 @@
 
   pthread_attr_destroy(&attr);
 
-  if (ret != 0) {
+  if (ret == 0) {
+    // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
+  } else {
     if (PrintMiscellaneous && (Verbose || WizardMode)) {
       perror("pthread_create()");
     }
@@ -1096,8 +1118,7 @@
   if (os::Aix::on_pase()) {
     Unimplemented();
     return 0;
-  }
-  else {
+  } else {
     // On AIX use the precision of processors real time clock
     // or time base registers.
     timebasestruct_t time;
@@ -1150,7 +1171,6 @@
   }
 }
 
-
 char * os::local_time_string(char *buf, size_t buflen) {
   struct tm t;
   time_t long_time;
@@ -1188,7 +1208,6 @@
   if (abort_hook != NULL) {
     abort_hook();
   }
-
 }
 
 // Note: os::abort() might be called very early during initialization, or
@@ -1220,8 +1239,7 @@
 // from src/solaris/hpi/src/system_md.c
 
 size_t os::lasterror(char *buf, size_t len) {
-
-  if (errno == 0)  return 0;
+  if (errno == 0) return 0;
 
   const char *s = ::strerror(errno);
   size_t n = ::strlen(s);
@@ -1234,6 +1252,7 @@
 }
 
 intx os::current_thread_id() { return (intx)pthread_self(); }
+
 int os::current_process_id() {
 
   // This implementation returns a unique pid, the pid of the
@@ -1370,9 +1389,9 @@
   if (offset) {
     *offset = -1;
   }
-  if (buf) {
-    buf[0] = '\0';
-  }
+  // Buf is not optional, but offset is optional.
+  assert(buf != NULL, "sanity check");
+  buf[0] = '\0';
 
   // Resolve function ptr literals first.
   addr = resolve_function_descriptor_to_code_pointer(addr);
@@ -1405,12 +1424,9 @@
     return 0;
   }
 
-  if (Verbose) {
-    fprintf(stderr, "pc outside any module");
-  }
+  trcVerbose("pc outside any module");
 
   return -1;
-
 }
 
 bool os::dll_address_to_library_name(address addr, char* buf,
@@ -1418,9 +1434,9 @@
   if (offset) {
     *offset = -1;
   }
-  if (buf) {
-      buf[0] = '\0';
-  }
+  // Buf is not optional, but offset is optional.
+  assert(buf != NULL, "sanity check");
+  buf[0] = '\0';
 
   // Resolve function ptr literals first.
   addr = resolve_function_descriptor_to_code_pointer(addr);
@@ -1435,7 +1451,7 @@
 }
 
 // Loads .dll/.so and in case of error it checks if .dll/.so was built
-// for the same architecture as Hotspot is running on
+// for the same architecture as Hotspot is running on.
 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
 
   if (ebuf && ebuflen > 0) {
@@ -1598,7 +1614,6 @@
   st->cr();
 }
 
-
 static void print_signal_handler(outputStream* st, int sig,
                                  char* buf, size_t buflen);
 
@@ -1622,7 +1637,7 @@
 
 static char saved_jvm_path[MAXPATHLEN] = {0};
 
-// Find the full path to the current module, libjvm.so or libjvm_g.so
+// Find the full path to the current module, libjvm.so.
 void os::jvm_path(char *buf, jint buflen) {
   // Error checking.
   if (buflen < MAXPATHLEN) {
@@ -1692,7 +1707,7 @@
   // Do not block out synchronous signals in the signal handler.
   // Blocking synchronous signals only makes sense if you can really
   // be sure that those signals won't happen during signal handling,
-  // when the blocking applies.  Normal signal handlers are lean and
+  // when the blocking applies. Normal signal handlers are lean and
   // do not cause signals. But our signal handlers tend to be "risky"
   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
   // On AIX, PASE there was a case where a SIGSEGV happened, followed
@@ -2967,13 +2982,9 @@
   param.sched_priority = newpri;
   int ret = pthread_setschedparam(thr, policy, &param);
 
-  if (Verbose) {
-    if (ret == 0) {
-      fprintf(stderr, "changed priority of thread %d to %d\n", (int)thr, newpri);
-    } else {
-      fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)\n",
-              (int)thr, newpri, ret, strerror(ret));
-    }
+  if (ret != 0) {
+    trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
+        (int)thr, newpri, ret, strerror(ret));
   }
   return (ret == 0) ? OS_OK : OS_ERR;
 }
@@ -3094,7 +3105,6 @@
   errno = old_errno;
 }
 
-
 static int SR_initialize() {
   struct sigaction act;
   char *s;
@@ -3337,7 +3347,6 @@
   JVM_handle_aix_signal(sig, info, uc, true);
 }
 
-
 // This boolean allows users to forward their own non-matching signals
 // to JVM_handle_aix_signal, harmlessly.
 bool os::Aix::signal_handlers_are_installed = false;
@@ -3531,7 +3540,7 @@
     set_signal_handler(SIGDANGER, true);
 
     if (libjsig_is_loaded) {
-      // Tell libjsig jvm finishes setting signal handlers
+      // Tell libjsig jvm finishes setting signal handlers.
       (*end_signal_setting)();
     }
 
@@ -3547,7 +3556,7 @@
         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
         check_signals = false;
       }
-      // need to initialize check_signal_done
+      // Need to initialize check_signal_done.
       ::sigemptyset(&check_signal_done);
     }
   }
@@ -3621,7 +3630,6 @@
   st->cr();
 }
 
-
 #define DO_SIGNAL_CHECK(sig) \
   if (!sigismember(&check_signal_done, sig)) \
     os::Aix::check_signal_handler(sig)
@@ -3682,7 +3690,6 @@
     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
     : CAST_FROM_FN_PTR(address, act.sa_handler);
 
-
   switch(sig) {
   case SIGSEGV:
   case SIGBUS:
@@ -3830,15 +3837,13 @@
   pthread_mutex_init(&dl_mutex, NULL);
 }
 
-// this is called _after_ the global arguments have been parsed
+// This is called _after_ the global arguments have been parsed.
 jint os::init_2(void) {
 
-  if (Verbose) {
-    fprintf(stderr, "processor count: %d\n", os::_processor_count);
-    fprintf(stderr, "physical memory: %lu\n", Aix::_physical_memory);
-  }
-
-  // initially build up the loaded dll map
+  trcVerbose("processor count: %d", os::_processor_count);
+  trcVerbose("physical memory: %lu", Aix::_physical_memory);
+
+  // Initially build up the loaded dll map.
   LoadedLibraries::reload();
 
   const int page_size = Aix::page_size();
@@ -3888,7 +3893,7 @@
       }
 
       if (map_address != (address) MAP_FAILED) {
-        // map succeeded, but polling_page is not at wished address, unmap and continue.
+        // Map succeeded, but polling_page is not at wished address, unmap and continue.
         ::munmap(map_address, map_size);
         map_address = (address) MAP_FAILED;
       }
@@ -3942,7 +3947,7 @@
 
   // Make the stack size a multiple of the page size so that
   // the yellow/red zones can be guarded.
-  // note that this can be 0, if no default stacksize was set
+  // Note that this can be 0, if no default stacksize was set.
   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
 
   Aix::libpthread_init();
@@ -4255,7 +4260,6 @@
   return fd;
 }
 
-
 // create binary file, rewriting existing file if required
 int os::create_binary_file(const char* path, bool rewrite_existing) {
   int oflags = O_WRONLY | O_CREAT;
@@ -4324,7 +4328,6 @@
   return NULL;
 }
 
-
 // Remap a block of memory.
 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
                           char *addr, size_t bytes, bool read_only,
@@ -4372,14 +4375,14 @@
   jlong sys_time = 0;
   jlong user_time = 0;
 
-  // reimplemented using getthrds64().
+  // Reimplemented using getthrds64().
   //
-  // goes like this:
+  // Works like this:
   // For the thread in question, get the kernel thread id. Then get the
   // kernel thread statistics using that id.
   //
   // This only works of course when no pthread scheduling is used,
-  // ie there is a 1:1 relationship to kernel threads.
+  // i.e. there is a 1:1 relationship to kernel threads.
   // On AIX, see AIXTHREAD_SCOPE variable.
 
   pthread_t pthtid = thread->osthread()->pthread_id();
@@ -4526,14 +4529,12 @@
   memset(&uts, 0, sizeof(uts));
   strcpy(uts.sysname, "?");
   if (::uname(&uts) == -1) {
-    fprintf(stderr, "uname failed (%d)\n", errno);
+    trc("uname failed (%d)", errno);
     guarantee(0, "Could not determine whether we run on AIX or PASE");
   } else {
-    if (Verbose) {
-      fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" "
-              "node \"%s\" machine \"%s\"\n",
-              uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
-    }
+    trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
+               "node \"%s\" machine \"%s\"\n",
+               uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
     const int major = atoi(uts.version);
     assert(major > 0, "invalid OS version");
     const int minor = atoi(uts.release);
@@ -4545,12 +4546,10 @@
       // We run on AIX. We do not support versions older than AIX 5.3.
       _on_pase = 0;
       if (_os_version < 0x0503) {
-        fprintf(stderr, "AIX release older than AIX 5.3 not supported.\n");
+        trc("AIX release older than AIX 5.3 not supported.");
         assert(false, "AIX release too old.");
       } else {
-        if (Verbose) {
-          fprintf(stderr, "We run on AIX %d.%d\n", major, minor);
-        }
+        trcVerbose("We run on AIX %d.%d\n", major, minor);
       }
     } else {
       assert(false, "unknown OS");
@@ -4558,7 +4557,6 @@
   }
 
   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
-
 } // end: os::Aix::initialize_os_info()
 
 // Scan environment for important settings which might effect the VM.
@@ -4596,12 +4594,10 @@
   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
   // exec() ? before loading the libjvm ? ....)
   p = ::getenv("XPG_SUS_ENV");
-  if (Verbose) {
-    fprintf(stderr, "XPG_SUS_ENV=%s.\n", p ? p : "<unset>");
-  }
+  trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
   if (p && strcmp(p, "ON") == 0) {
     _xpg_sus_mode = 1;
-    fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON\n");
+    trc("Unsupported setting: XPG_SUS_ENV=ON");
     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
     // clobber address ranges. If we ever want to support that, we have to do some
     // testing first.
@@ -4613,10 +4609,7 @@
   // Switch off AIX internal (pthread) guard pages. This has
   // immediate effect for any pthread_create calls which follow.
   p = ::getenv("AIXTHREAD_GUARDPAGES");
-  if (Verbose) {
-    fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.\n", p ? p : "<unset>");
-    fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.\n");
-  }
+  trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
   rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
   guarantee(rc == 0, "");
 
@@ -4634,7 +4627,7 @@
   assert(os::Aix::on_aix(), "AIX only");
 
   if (!libperfstat::init()) {
-    fprintf(stderr, "libperfstat initialization failed.\n");
+    trc("libperfstat initialization failed.");
     assert(false, "libperfstat initialization failed");
   } else {
     if (Verbose) {
@@ -4806,7 +4799,6 @@
   return abstime;
 }
 
-
 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
 // Conceptually TryPark() should be equivalent to park(0).
 
@@ -4889,7 +4881,7 @@
   while (_Event < 0) {
     status = pthread_cond_timedwait(_cond, _mutex, &abst);
     assert_status(status == 0 || status == ETIMEDOUT,
-          status, "cond_timedwait");
+                  status, "cond_timedwait");
     if (!FilterSpuriousWakeups) break;         // previous semantics
     if (status == ETIMEDOUT) break;
     // We consume and ignore EINTR and spurious wakeups.
@@ -5023,9 +5015,9 @@
   // Optional fast-path check:
   // Return immediately if a permit is available.
   if (_counter > 0) {
-      _counter = 0;
-      OrderAccess::fence();
-      return;
+    _counter = 0;
+    OrderAccess::fence();
+    return;
   }
 
   Thread* thread = Thread::current();
@@ -5047,7 +5039,6 @@
     unpackTime(&absTime, isAbsolute, time);
   }
 
-
   // Enter safepoint region
   // Beware of deadlocks such as 6317397.
   // The per-thread Parker:: mutex is a classic leaf-lock.
@@ -5135,7 +5126,6 @@
   }
 }
 
-
 extern char** environ;
 
 // Run the specified command in a separate process. Return its exit value,
@@ -5154,44 +5144,43 @@
   } else if (pid == 0) {
     // child process
 
-    // try to be consistent with system(), which uses "/usr/bin/sh" on AIX
+    // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
     execve("/usr/bin/sh", argv, environ);
 
     // execve failed
     _exit(-1);
 
-  } else  {
+  } else {
     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
     // care about the actual exit code, for now.
 
     int status;
 
-    // Wait for the child process to exit.  This returns immediately if
+    // Wait for the child process to exit. This returns immediately if
     // the child has already exited. */
     while (waitpid(pid, &status, 0) < 0) {
-        switch (errno) {
+      switch (errno) {
         case ECHILD: return 0;
         case EINTR: break;
         default: return -1;
-        }
+      }
     }
 
     if (WIFEXITED(status)) {
-       // The child exited normally; get its exit code.
-       return WEXITSTATUS(status);
+      // The child exited normally; get its exit code.
+      return WEXITSTATUS(status);
     } else if (WIFSIGNALED(status)) {
-       // The child exited because of a signal
-       // The best value to return is 0x80 + signal number,
-       // because that is what all Unix shells do, and because
-       // it allows callers to distinguish between process exit and
-       // process death by signal.
-       return 0x80 + WTERMSIG(status);
+      // The child exited because of a signal.
+      // The best value to return is 0x80 + signal number,
+      // because that is what all Unix shells do, and because
+      // it allows callers to distinguish between process exit and
+      // process death by signal.
+      return 0x80 + WTERMSIG(status);
     } else {
-       // Unknown exit code; pass it through
-       return status;
+      // Unknown exit code; pass it through.
+      return status;
     }
   }
-  // Remove warning.
   return -1;
 }
 
@@ -5206,7 +5195,7 @@
   struct stat statbuf;
   char buf[MAXPATHLEN];
   char libmawtpath[MAXPATHLEN];
-  const char *xawtstr  = "/xawt/libmawt.so";
+  const char *xawtstr = "/xawt/libmawt.so";
   const char *new_xawtstr = "/libawt_xawt.so";
 
   char *p;
--- a/src/os/aix/vm/os_aix.hpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/os/aix/vm/os_aix.hpp	Fri Dec 12 10:31:00 2014 -0800
@@ -209,7 +209,7 @@
     return _can_use_16M_pages == 1 ? true : false;
   }
 
-  static address   ucontext_get_pc(ucontext_t* uc);
+  static address   ucontext_get_pc(const ucontext_t* uc);
   static intptr_t* ucontext_get_sp(ucontext_t* uc);
   static intptr_t* ucontext_get_fp(ucontext_t* uc);
   // Set PC into context. Needed for continuation after signal.
--- a/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -91,8 +91,9 @@
 
 // Frame information (pc, sp, fp) retrieved via ucontext
 // always looks like a C-frame according to the frame
-// conventions in frame_ppc64.hpp.
-address os::Aix::ucontext_get_pc(ucontext_t * uc) {
+// conventions in frame_ppc.hpp.
+
+address os::Aix::ucontext_get_pc(const ucontext_t * uc) {
   return (address)uc->uc_mcontext.jmp_context.iar;
 }
 
@@ -486,7 +487,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 // thread stack
 
-size_t os::Aix::min_stack_allowed = 768*K;
+size_t os::Aix::min_stack_allowed = 128*K;
 
 // Aix is always in floating stack mode. The stack size for a new
 // thread can be set via pthread_attr_setstacksize().
@@ -499,7 +500,7 @@
   // because of the strange 'fallback logic' in os::create_thread().
   // Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to
   // specify a different stack size for compiler threads!
-  size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K);
+  size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
   return s;
 }
 
--- a/src/os_cpu/aix_ppc/vm/os_aix_ppc.hpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/os_cpu/aix_ppc/vm/os_aix_ppc.hpp	Fri Dec 12 10:31:00 2014 -0800
@@ -23,8 +23,8 @@
  *
  */
 
-#ifndef OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
-#define OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
+#ifndef OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
+#define OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
 
   static void setup_fpu() {}
 
@@ -32,4 +32,4 @@
   // Note: Currently only used in 64 bit Windows implementations
   static bool register_code_area(char *low, char *high) { return true; }
 
-#endif // OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
+#endif // OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
--- a/src/os_cpu/aix_ppc/vm/prefetch_aix_ppc.inline.hpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/os_cpu/aix_ppc/vm/prefetch_aix_ppc.inline.hpp	Fri Dec 12 10:31:00 2014 -0800
@@ -23,8 +23,8 @@
  *
  */
 
-#ifndef OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
-#define OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
+#ifndef OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
+#define OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
 
 #include "runtime/prefetch.hpp"
 
@@ -55,4 +55,4 @@
 #endif
 }
 
-#endif // OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
+#endif // OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
--- a/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.hpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.hpp	Fri Dec 12 10:31:00 2014 -0800
@@ -23,8 +23,8 @@
  *
  */
 
-#ifndef OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
-#define OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
+#ifndef OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
+#define OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
 
   // Processor dependent parts of ThreadLocalStorage
 
@@ -33,4 +33,4 @@
     return (Thread *) os::thread_local_storage_at(thread_index());
   }
 
-#endif // OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
+#endif // OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
--- a/src/os_cpu/aix_ppc/vm/thread_aix_ppc.hpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/os_cpu/aix_ppc/vm/thread_aix_ppc.hpp	Fri Dec 12 10:31:00 2014 -0800
@@ -23,8 +23,8 @@
  *
  */
 
-#ifndef OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
-#define OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
+#ifndef OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
+#define OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
 
  private:
   void pd_initialize() {
@@ -76,4 +76,4 @@
 
   intptr_t* last_interpreter_fp() { return _last_interpreter_fp; }
 
-#endif // OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
+#endif // OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
--- a/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -453,7 +453,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 // thread stack
 
-size_t os::Linux::min_stack_allowed = 768*K;
+size_t os::Linux::min_stack_allowed = 128*K;
 
 bool os::Linux::supports_variable_stack_size() { return true; }
 
--- a/src/share/vm/classfile/classFileParser.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/classfile/classFileParser.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -3058,21 +3058,39 @@
   }
 }
 
-// Transfer ownership of metadata allocated to the InstanceKlass.
-void ClassFileParser::apply_parsed_class_metadata(
-                                            instanceKlassHandle this_klass,
-                                            int java_fields_count, TRAPS) {
-  // Assign annotations if needed
-  if (_annotations != NULL || _type_annotations != NULL ||
-      _fields_annotations != NULL || _fields_type_annotations != NULL) {
+// Create the Annotations object that will
+// hold the annotations array for the Klass.
+void ClassFileParser::create_combined_annotations(TRAPS) {
+    if (_annotations == NULL &&
+        _type_annotations == NULL &&
+        _fields_annotations == NULL &&
+        _fields_type_annotations == NULL) {
+      // Don't create the Annotations object unnecessarily.
+      return;
+    }
+
     Annotations* annotations = Annotations::allocate(_loader_data, CHECK);
     annotations->set_class_annotations(_annotations);
     annotations->set_class_type_annotations(_type_annotations);
     annotations->set_fields_annotations(_fields_annotations);
     annotations->set_fields_type_annotations(_fields_type_annotations);
-    this_klass->set_annotations(annotations);
-  }
-
+
+    // This is the Annotations object that will be
+    // assigned to InstanceKlass being constructed.
+    _combined_annotations = annotations;
+
+    // The annotations arrays below has been transfered the
+    // _combined_annotations so these fields can now be cleared.
+    _annotations             = NULL;
+    _type_annotations        = NULL;
+    _fields_annotations      = NULL;
+    _fields_type_annotations = NULL;
+}
+
+// Transfer ownership of metadata allocated to the InstanceKlass.
+void ClassFileParser::apply_parsed_class_metadata(
+                                            instanceKlassHandle this_klass,
+                                            int java_fields_count, TRAPS) {
   _cp->set_pool_holder(this_klass());
   this_klass->set_constants(_cp);
   this_klass->set_fields(_fields, java_fields_count);
@@ -3080,6 +3098,7 @@
   this_klass->set_inner_classes(_inner_classes);
   this_klass->set_local_interfaces(_local_interfaces);
   this_klass->set_transitive_interfaces(_transitive_interfaces);
+  this_klass->set_annotations(_combined_annotations);
 
   // Clear out these fields so they don't get deallocated by the destructor
   clear_class_metadata();
@@ -3939,6 +3958,10 @@
     ClassAnnotationCollector parsed_annotations;
     parse_classfile_attributes(&parsed_annotations, CHECK_(nullHandle));
 
+    // Finalize the Annotations metadata object,
+    // now that all annotation arrays have been created.
+    create_combined_annotations(CHECK_(nullHandle));
+
     // Make sure this is the end of class file stream
     guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle));
 
@@ -4239,10 +4262,27 @@
   InstanceKlass::deallocate_interfaces(_loader_data, _super_klass(),
                                        _local_interfaces, _transitive_interfaces);
 
-  MetadataFactory::free_array<u1>(_loader_data, _annotations);
-  MetadataFactory::free_array<u1>(_loader_data, _type_annotations);
-  Annotations::free_contents(_loader_data, _fields_annotations);
-  Annotations::free_contents(_loader_data, _fields_type_annotations);
+  if (_combined_annotations != NULL) {
+    // After all annotations arrays have been created, they are installed into the
+    // Annotations object that will be assigned to the InstanceKlass being created.
+
+    // Deallocate the Annotations object and the installed annotations arrays.
+    _combined_annotations->deallocate_contents(_loader_data);
+
+    // If the _combined_annotations pointer is non-NULL,
+    // then the other annotations fields should have been cleared.
+    assert(_annotations             == NULL, "Should have been cleared");
+    assert(_type_annotations        == NULL, "Should have been cleared");
+    assert(_fields_annotations      == NULL, "Should have been cleared");
+    assert(_fields_type_annotations == NULL, "Should have been cleared");
+  } else {
+    // If the annotations arrays were not installed into the Annotations object,
+    // then they have to be deallocated explicitly.
+    MetadataFactory::free_array<u1>(_loader_data, _annotations);
+    MetadataFactory::free_array<u1>(_loader_data, _type_annotations);
+    Annotations::free_contents(_loader_data, _fields_annotations);
+    Annotations::free_contents(_loader_data, _fields_type_annotations);
+  }
 
   clear_class_metadata();
 
--- a/src/share/vm/classfile/classFileParser.hpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/classfile/classFileParser.hpp	Fri Dec 12 10:31:00 2014 -0800
@@ -75,6 +75,7 @@
   Array<u2>*       _inner_classes;
   Array<Klass*>*   _local_interfaces;
   Array<Klass*>*   _transitive_interfaces;
+  Annotations*     _combined_annotations;
   AnnotationArray* _annotations;
   AnnotationArray* _type_annotations;
   Array<AnnotationArray*>* _fields_annotations;
@@ -86,6 +87,8 @@
   void set_class_generic_signature_index(u2 x) { _generic_signature_index = x; }
   void set_class_sde_buffer(char* x, int len)  { _sde_buffer = x; _sde_length = len; }
 
+  void create_combined_annotations(TRAPS);
+
   void init_parsed_class_attributes(ClassLoaderData* loader_data) {
     _loader_data = loader_data;
     _synthetic_flag = false;
@@ -110,6 +113,7 @@
     _inner_classes = NULL;
     _local_interfaces = NULL;
     _transitive_interfaces = NULL;
+    _combined_annotations = NULL;
     _annotations = _type_annotations = NULL;
     _fields_annotations = _fields_type_annotations = NULL;
   }
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -398,6 +398,18 @@
   int                handler_bci;
   int                current_bci = bci(thread);
 
+  if (thread->frames_to_pop_failed_realloc() > 0) {
+    // Allocation of scalar replaced object used in this frame
+    // failed. Unconditionally pop the frame.
+    thread->dec_frames_to_pop_failed_realloc();
+    thread->set_vm_result(h_exception());
+    // If the method is synchronized we already unlocked the monitor
+    // during deoptimization so the interpreter needs to skip it when
+    // the frame is popped.
+    thread->set_do_not_unlock_if_synchronized(true);
+    return Interpreter::remove_activation_entry();
+  }
+
   // Need to do this check first since when _do_not_unlock_if_synchronized
   // is set, we don't want to trigger any classloading which may make calls
   // into java, or surprisingly find a matching exception handler for bci 0
--- a/src/share/vm/memory/filemap.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/memory/filemap.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -97,11 +97,11 @@
         tty->print_cr("UseSharedSpaces: %s", msg);
       }
     }
+    UseSharedSpaces = false;
+    assert(current_info() != NULL, "singleton must be registered");
+    current_info()->close();
   }
   va_end(ap);
-  UseSharedSpaces = false;
-  assert(current_info() != NULL, "singleton must be registered");
-  current_info()->close();
 }
 
 // Fill in the fileMapInfo structure with data about this VM instance.
--- a/src/share/vm/memory/metaspaceShared.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/memory/metaspaceShared.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -967,7 +967,7 @@
 #endif
     // If -Xshare:on is specified, print out the error message and exit VM,
     // otherwise, set UseSharedSpaces to false and continue.
-    if (RequireSharedSpaces) {
+    if (RequireSharedSpaces || PrintSharedArchiveAndExit) {
       vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
     } else {
       FLAG_SET_DEFAULT(UseSharedSpaces, false);
--- a/src/share/vm/memory/universe.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/memory/universe.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -119,6 +119,7 @@
 oop Universe::_out_of_memory_error_class_metaspace    = NULL;
 oop Universe::_out_of_memory_error_array_size         = NULL;
 oop Universe::_out_of_memory_error_gc_overhead_limit  = NULL;
+oop Universe::_out_of_memory_error_realloc_objects    = NULL;
 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
 volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0;
 bool Universe::_verify_in_progress                    = false;
@@ -190,6 +191,7 @@
   f->do_oop((oop*)&_out_of_memory_error_class_metaspace);
   f->do_oop((oop*)&_out_of_memory_error_array_size);
   f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
+  f->do_oop((oop*)&_out_of_memory_error_realloc_objects);
     f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
   f->do_oop((oop*)&_null_ptr_exception_instance);
   f->do_oop((oop*)&_arithmetic_exception_instance);
@@ -574,7 +576,8 @@
           (throwable() != Universe::_out_of_memory_error_metaspace)  &&
           (throwable() != Universe::_out_of_memory_error_class_metaspace)  &&
           (throwable() != Universe::_out_of_memory_error_array_size) &&
-          (throwable() != Universe::_out_of_memory_error_gc_overhead_limit));
+          (throwable() != Universe::_out_of_memory_error_gc_overhead_limit) &&
+          (throwable() != Universe::_out_of_memory_error_realloc_objects));
 }
 
 
@@ -1044,6 +1047,7 @@
     Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false);
     Universe::_out_of_memory_error_gc_overhead_limit =
       k_h->allocate_instance(CHECK_false);
+    Universe::_out_of_memory_error_realloc_objects = k_h->allocate_instance(CHECK_false);
 
     // Setup preallocated NullPointerException
     // (this is currently used for a cheap & dirty solution in compiler exception handling)
@@ -1083,6 +1087,9 @@
     msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false);
     java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg());
 
+    msg = java_lang_String::create_from_str("Java heap space: failed reallocation of scalar replaced objects", CHECK_false);
+    java_lang_Throwable::set_message(Universe::_out_of_memory_error_realloc_objects, msg());
+
     msg = java_lang_String::create_from_str("/ by zero", CHECK_false);
     java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg());
 
--- a/src/share/vm/memory/universe.hpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/memory/universe.hpp	Fri Dec 12 10:31:00 2014 -0800
@@ -157,6 +157,7 @@
   static oop          _out_of_memory_error_class_metaspace;
   static oop          _out_of_memory_error_array_size;
   static oop          _out_of_memory_error_gc_overhead_limit;
+  static oop          _out_of_memory_error_realloc_objects;
 
   static Array<int>*       _the_empty_int_array;    // Canonicalized int array
   static Array<u2>*        _the_empty_short_array;  // Canonicalized short array
@@ -328,6 +329,7 @@
   static oop out_of_memory_error_class_metaspace()    { return gen_out_of_memory_error(_out_of_memory_error_class_metaspace);   }
   static oop out_of_memory_error_array_size()         { return gen_out_of_memory_error(_out_of_memory_error_array_size); }
   static oop out_of_memory_error_gc_overhead_limit()  { return gen_out_of_memory_error(_out_of_memory_error_gc_overhead_limit);  }
+  static oop out_of_memory_error_realloc_objects()    { return gen_out_of_memory_error(_out_of_memory_error_realloc_objects);  }
 
   // Accessors needed for fast allocation
   static Klass** boolArrayKlassObj_addr()           { return &_boolArrayKlassObj;   }
--- a/src/share/vm/opto/connode.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/opto/connode.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -462,7 +462,8 @@
   // Try to improve the type of the CastII if we recognize a CmpI/If
   // pattern.
   if (_carry_dependency) {
-    if (in(0) != NULL && (in(0)->is_IfFalse() || in(0)->is_IfTrue())) {
+    if (in(0) != NULL && in(0)->in(0) != NULL && in(0)->in(0)->is_If()) {
+      assert(in(0)->is_IfFalse() || in(0)->is_IfTrue(), "should be If proj");
       Node* proj = in(0);
       if (proj->in(0)->in(1)->is_Bool()) {
         Node* b = proj->in(0)->in(1);
--- a/src/share/vm/opto/ifnode.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/opto/ifnode.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -820,6 +820,11 @@
 
 static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff);
 
+struct RangeCheck {
+  Node* ctl;
+  jint off;
+};
+
 //------------------------------Ideal------------------------------------------
 // Return a node which is more "ideal" than the current node.  Strip out
 // control copies
@@ -861,83 +866,141 @@
   jint offset1;
   int flip1 = is_range_check(range1, index1, offset1);
   if( flip1 ) {
-    Node *first_prev_dom = NULL;
-
     // Try to remove extra range checks.  All 'up_one_dom' gives up at merges
     // so all checks we inspect post-dominate the top-most check we find.
     // If we are going to fail the current check and we reach the top check
     // then we are guaranteed to fail, so just start interpreting there.
-    // We 'expand' the top 2 range checks to include all post-dominating
+    // We 'expand' the top 3 range checks to include all post-dominating
     // checks.
 
-    // The top 2 range checks seen
-    Node *prev_chk1 = NULL;
-    Node *prev_chk2 = NULL;
+    // The top 3 range checks seen
+    const int NRC =3;
+    RangeCheck prev_checks[NRC];
+    int nb_checks = 0;
+
     // Low and high offsets seen so far
     jint off_lo = offset1;
     jint off_hi = offset1;
 
-    // Scan for the top 2 checks and collect range of offsets
-    for( int dist = 0; dist < 999; dist++ ) { // Range-Check scan limit
-      if( dom->Opcode() == Op_If &&  // Not same opcode?
-          prev_dom->in(0) == dom ) { // One path of test does dominate?
-        if( dom == this ) return NULL; // dead loop
+    bool found_immediate_dominator = false;
+
+    // Scan for the top checks and collect range of offsets
+    for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit
+      if (dom->Opcode() == Op_If &&  // Not same opcode?
+          prev_dom->in(0) == dom) { // One path of test does dominate?
+        if (dom == this) return NULL; // dead loop
         // See if this is a range check
         Node *index2, *range2;
         jint offset2;
         int flip2 = dom->as_If()->is_range_check(range2, index2, offset2);
         // See if this is a _matching_ range check, checking against
         // the same array bounds.
-        if( flip2 == flip1 && range2 == range1 && index2 == index1 &&
-            dom->outcnt() == 2 ) {
+        if (flip2 == flip1 && range2 == range1 && index2 == index1 &&
+            dom->outcnt() == 2) {
+          if (nb_checks == 0 && dom->in(1) == in(1)) {
+            // Found an immediately dominating test at the same offset.
+            // This kind of back-to-back test can be eliminated locally,
+            // and there is no need to search further for dominating tests.
+            assert(offset2 == offset1, "Same test but different offsets");
+            found_immediate_dominator = true;
+            break;
+          }
           // Gather expanded bounds
           off_lo = MIN2(off_lo,offset2);
           off_hi = MAX2(off_hi,offset2);
-          // Record top 2 range checks
-          prev_chk2 = prev_chk1;
-          prev_chk1 = prev_dom;
-          // If we match the test exactly, then the top test covers
-          // both our lower and upper bounds.
-          if( dom->in(1) == in(1) )
-            prev_chk2 = prev_chk1;
+          // Record top NRC range checks
+          prev_checks[nb_checks%NRC].ctl = prev_dom;
+          prev_checks[nb_checks%NRC].off = offset2;
+          nb_checks++;
         }
       }
       prev_dom = dom;
-      dom = up_one_dom( dom );
-      if( !dom ) break;
+      dom = up_one_dom(dom);
+      if (!dom) break;
     }
 
-
-    // Attempt to widen the dominating range check to cover some later
-    // ones.  Since range checks "fail" by uncommon-trapping to the
-    // interpreter, widening a check can make us speculative enter the
-    // interpreter.  If we see range-check deopt's, do not widen!
-    if (!phase->C->allow_range_check_smearing())  return NULL;
+    if (!found_immediate_dominator) {
+      // Attempt to widen the dominating range check to cover some later
+      // ones.  Since range checks "fail" by uncommon-trapping to the
+      // interpreter, widening a check can make us speculatively enter
+      // the interpreter.  If we see range-check deopt's, do not widen!
+      if (!phase->C->allow_range_check_smearing())  return NULL;
 
-    // Constant indices only need to check the upper bound.
-    // Non-constance indices must check both low and high.
-    if( index1 ) {
-      // Didn't find 2 prior covering checks, so cannot remove anything.
-      if( !prev_chk2 ) return NULL;
-      // 'Widen' the offsets of the 1st and 2nd covering check
-      adjust_check( prev_chk1, range1, index1, flip1, off_lo, igvn );
-      // Do not call adjust_check twice on the same projection
-      // as the first call may have transformed the BoolNode to a ConI
-      if( prev_chk1 != prev_chk2 ) {
-        adjust_check( prev_chk2, range1, index1, flip1, off_hi, igvn );
+      // Didn't find prior covering check, so cannot remove anything.
+      if (nb_checks == 0) {
+        return NULL;
       }
-      // Test is now covered by prior checks, dominate it out
-      prev_dom = prev_chk2;
-    } else {
-      // Didn't find prior covering check, so cannot remove anything.
-      if( !prev_chk1 ) return NULL;
-      // 'Widen' the offset of the 1st and only covering check
-      adjust_check( prev_chk1, range1, index1, flip1, off_hi, igvn );
-      // Test is now covered by prior checks, dominate it out
-      prev_dom = prev_chk1;
+      // Constant indices only need to check the upper bound.
+      // Non-constant indices must check both low and high.
+      int chk0 = (nb_checks - 1) % NRC;
+      if (index1) {
+        if (nb_checks == 1) {
+          return NULL;
+        } else {
+          // If the top range check's constant is the min or max of
+          // all constants we widen the next one to cover the whole
+          // range of constants.
+          RangeCheck rc0 = prev_checks[chk0];
+          int chk1 = (nb_checks - 2) % NRC;
+          RangeCheck rc1 = prev_checks[chk1];
+          if (rc0.off == off_lo) {
+            adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
+            prev_dom = rc1.ctl;
+          } else if (rc0.off == off_hi) {
+            adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
+            prev_dom = rc1.ctl;
+          } else {
+            // If the top test's constant is not the min or max of all
+            // constants, we need 3 range checks. We must leave the
+            // top test unchanged because widening it would allow the
+            // accesses it protects to successfully read/write out of
+            // bounds.
+            if (nb_checks == 2) {
+              return NULL;
+            }
+            int chk2 = (nb_checks - 3) % NRC;
+            RangeCheck rc2 = prev_checks[chk2];
+            // The top range check a+i covers interval: -a <= i < length-a
+            // The second range check b+i covers interval: -b <= i < length-b
+            if (rc1.off <= rc0.off) {
+              // if b <= a, we change the second range check to:
+              // -min_of_all_constants <= i < length-min_of_all_constants
+              // Together top and second range checks now cover:
+              // -min_of_all_constants <= i < length-a
+              // which is more restrictive than -b <= i < length-b:
+              // -b <= -min_of_all_constants <= i < length-a <= length-b
+              // The third check is then changed to:
+              // -max_of_all_constants <= i < length-max_of_all_constants
+              // so 2nd and 3rd checks restrict allowed values of i to:
+              // -min_of_all_constants <= i < length-max_of_all_constants
+              adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
+              adjust_check(rc2.ctl, range1, index1, flip1, off_hi, igvn);
+            } else {
+              // if b > a, we change the second range check to:
+              // -max_of_all_constants <= i < length-max_of_all_constants
+              // Together top and second range checks now cover:
+              // -a <= i < length-max_of_all_constants
+              // which is more restrictive than -b <= i < length-b:
+              // -b < -a <= i < length-max_of_all_constants <= length-b
+              // The third check is then changed to:
+              // -max_of_all_constants <= i < length-max_of_all_constants
+              // so 2nd and 3rd checks restrict allowed values of i to:
+              // -min_of_all_constants <= i < length-max_of_all_constants
+              adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
+              adjust_check(rc2.ctl, range1, index1, flip1, off_lo, igvn);
+            }
+            prev_dom = rc2.ctl;
+          }
+        }
+      } else {
+        RangeCheck rc0 = prev_checks[chk0];
+        // 'Widen' the offset of the 1st and only covering check
+        adjust_check(rc0.ctl, range1, index1, flip1, off_hi, igvn);
+        // Test is now covered by prior checks, dominate it out
+        prev_dom = rc0.ctl;
+      }
     }
 
-
   } else {                      // Scan for an equivalent test
 
     Node *cmp;
@@ -1019,7 +1082,7 @@
   // for lower and upper bounds.
   ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
   if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate))
-    prev_dom = idom;
+   prev_dom = idom;
 
   // Now walk the current IfNode's projections.
   // Loop ends when 'this' has no more uses.
--- a/src/share/vm/opto/loopopts.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/opto/loopopts.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -239,8 +239,13 @@
   ProjNode* dp_proj  = dp->as_Proj();
   ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj();
   if (exclude_loop_predicate &&
-      unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate))
+      (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) ||
+       unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check))) {
+    // If this is a range check (IfNode::is_range_check), do not
+    // reorder because Compile::allow_range_check_smearing might have
+    // changed the check.
     return; // Let IGVN transformation change control dependence.
+  }
 
   IdealLoopTree *old_loop = get_loop(dp);
 
@@ -896,23 +901,23 @@
   int n_op = n->Opcode();
 
   // Check for an IF being dominated by another IF same test
-  if( n_op == Op_If ) {
+  if (n_op == Op_If) {
     Node *bol = n->in(1);
     uint max = bol->outcnt();
     // Check for same test used more than once?
-    if( n_op == Op_If && max > 1 && bol->is_Bool() ) {
+    if (max > 1 && bol->is_Bool()) {
       // Search up IDOMs to see if this IF is dominated.
       Node *cutoff = get_ctrl(bol);
 
       // Now search up IDOMs till cutoff, looking for a dominating test
       Node *prevdom = n;
       Node *dom = idom(prevdom);
-      while( dom != cutoff ) {
-        if( dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom ) {
+      while (dom != cutoff) {
+        if (dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom) {
           // Replace the dominated test with an obvious true or false.
           // Place it on the IGVN worklist for later cleanup.
           C->set_major_progress();
-          dominated_by( prevdom, n, false, true );
+          dominated_by(prevdom, n, false, true);
 #ifndef PRODUCT
           if( VerifyLoopOptimizations ) verify();
 #endif
--- a/src/share/vm/opto/macro.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/opto/macro.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -964,7 +964,11 @@
 }
 
 bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
-  if (!EliminateAllocations || !alloc->_is_non_escaping) {
+  // Don't do scalar replacement if the frame can be popped by JVMTI:
+  // if reallocation fails during deoptimization we'll pop all
+  // interpreter frames for this compiled frame and that won't play
+  // nice with JVMTI popframe.
+  if (!EliminateAllocations || JvmtiExport::can_pop_frame() || !alloc->_is_non_escaping) {
     return false;
   }
   Node* klass = alloc->in(AllocateNode::KlassNode);
--- a/src/share/vm/runtime/deoptimization.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/runtime/deoptimization.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -213,6 +213,8 @@
   assert(vf->is_compiled_frame(), "Wrong frame type");
   chunk->push(compiledVFrame::cast(vf));
 
+  bool realloc_failures = false;
+
 #ifdef COMPILER2
   // Reallocate the non-escaping objects and restore their fields. Then
   // relock objects if synchronization on them was eliminated.
@@ -243,19 +245,16 @@
           tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, (void *)result, thread);
         }
       }
-      bool reallocated = false;
       if (objects != NULL) {
         JRT_BLOCK
-          reallocated = realloc_objects(thread, &deoptee, objects, THREAD);
+          realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD);
         JRT_END
-      }
-      if (reallocated) {
-        reassign_fields(&deoptee, &map, objects);
+        reassign_fields(&deoptee, &map, objects, realloc_failures);
 #ifndef PRODUCT
         if (TraceDeoptimization) {
           ttyLocker ttyl;
           tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread);
-          print_objects(objects);
+          print_objects(objects, realloc_failures);
         }
 #endif
       }
@@ -273,7 +272,7 @@
         assert (cvf->scope() != NULL,"expect only compiled java frames");
         GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
         if (monitors->is_nonempty()) {
-          relock_objects(monitors, thread);
+          relock_objects(monitors, thread, realloc_failures);
 #ifndef PRODUCT
           if (TraceDeoptimization) {
             ttyLocker ttyl;
@@ -284,7 +283,12 @@
                   first = false;
                   tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread);
                 }
-                tty->print_cr("     object <" INTPTR_FORMAT "> locked", (void *)mi->owner());
+                if (mi->owner_is_scalar_replaced()) {
+                  Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
+                  tty->print_cr("     failed reallocation for klass %s", k->external_name());
+                } else {
+                  tty->print_cr("     object <" INTPTR_FORMAT "> locked", (void *)mi->owner());
+                }
               }
             }
           }
@@ -299,9 +303,14 @@
   // out the java state residing in the vframeArray will be missed.
   No_Safepoint_Verifier no_safepoint;
 
-  vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk);
+  vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures);
+#ifdef COMPILER2
+  if (realloc_failures) {
+    pop_frames_failed_reallocs(thread, array);
+  }
+#endif
 
-  assert(thread->vframe_array_head() == NULL, "Pending deopt!");;
+  assert(thread->vframe_array_head() == NULL, "Pending deopt!");
   thread->set_vframe_array_head(array);
 
   // Now that the vframeArray has been created if we have any deferred local writes
@@ -753,6 +762,8 @@
   int exception_line = thread->exception_line();
   thread->clear_pending_exception();
 
+  bool failures = false;
+
   for (int i = 0; i < objects->length(); i++) {
     assert(objects->at(i)->is_object(), "invalid debug information");
     ObjectValue* sv = (ObjectValue*) objects->at(i);
@@ -762,27 +773,34 @@
 
     if (k->oop_is_instance()) {
       InstanceKlass* ik = InstanceKlass::cast(k());
-      obj = ik->allocate_instance(CHECK_(false));
+      obj = ik->allocate_instance(THREAD);
     } else if (k->oop_is_typeArray()) {
       TypeArrayKlass* ak = TypeArrayKlass::cast(k());
       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
       int len = sv->field_size() / type2size[ak->element_type()];
-      obj = ak->allocate(len, CHECK_(false));
+      obj = ak->allocate(len, THREAD);
     } else if (k->oop_is_objArray()) {
       ObjArrayKlass* ak = ObjArrayKlass::cast(k());
-      obj = ak->allocate(sv->field_size(), CHECK_(false));
+      obj = ak->allocate(sv->field_size(), THREAD);
     }
 
-    assert(obj != NULL, "allocation failed");
+    if (obj == NULL) {
+      failures = true;
+    }
+
     assert(sv->value().is_null(), "redundant reallocation");
+    assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
+    CLEAR_PENDING_EXCEPTION;
     sv->set_value(obj);
   }
 
-  if (pending_exception.not_null()) {
+  if (failures) {
+    THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
+  } else if (pending_exception.not_null()) {
     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
   }
 
-  return true;
+  return failures;
 }
 
 // This assumes that the fields are stored in ObjectValue in the same order
@@ -920,12 +938,15 @@
 
 
 // restore fields of all eliminated objects and arrays
-void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects) {
+void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
   for (int i = 0; i < objects->length(); i++) {
     ObjectValue* sv = (ObjectValue*) objects->at(i);
     KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
     Handle obj = sv->value();
-    assert(obj.not_null(), "reallocation was missed");
+    assert(obj.not_null() || realloc_failures, "reallocation was missed");
+    if (obj.is_null()) {
+      continue;
+    }
 
     if (k->oop_is_instance()) {
       InstanceKlass* ik = InstanceKlass::cast(k());
@@ -942,34 +963,36 @@
 
 
 // relock objects for which synchronization was eliminated
-void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread) {
+void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures) {
   for (int i = 0; i < monitors->length(); i++) {
     MonitorInfo* mon_info = monitors->at(i);
     if (mon_info->eliminated()) {
-      assert(mon_info->owner() != NULL, "reallocation was missed");
-      Handle obj = Handle(mon_info->owner());
-      markOop mark = obj->mark();
-      if (UseBiasedLocking && mark->has_bias_pattern()) {
-        // New allocated objects may have the mark set to anonymously biased.
-        // Also the deoptimized method may called methods with synchronization
-        // where the thread-local object is bias locked to the current thread.
-        assert(mark->is_biased_anonymously() ||
-               mark->biased_locker() == thread, "should be locked to current thread");
-        // Reset mark word to unbiased prototype.
-        markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
-        obj->set_mark(unbiased_prototype);
+      assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
+      if (!mon_info->owner_is_scalar_replaced()) {
+        Handle obj = Handle(mon_info->owner());
+        markOop mark = obj->mark();
+        if (UseBiasedLocking && mark->has_bias_pattern()) {
+          // New allocated objects may have the mark set to anonymously biased.
+          // Also the deoptimized method may called methods with synchronization
+          // where the thread-local object is bias locked to the current thread.
+          assert(mark->is_biased_anonymously() ||
+                 mark->biased_locker() == thread, "should be locked to current thread");
+          // Reset mark word to unbiased prototype.
+          markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
+          obj->set_mark(unbiased_prototype);
+        }
+        BasicLock* lock = mon_info->lock();
+        ObjectSynchronizer::slow_enter(obj, lock, thread);
+        assert(mon_info->owner()->is_locked(), "object must be locked now");
       }
-      BasicLock* lock = mon_info->lock();
-      ObjectSynchronizer::slow_enter(obj, lock, thread);
     }
-    assert(mon_info->owner()->is_locked(), "object must be locked now");
   }
 }
 
 
 #ifndef PRODUCT
 // print information about reallocated objects
-void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) {
+void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
   fieldDescriptor fd;
 
   for (int i = 0; i < objects->length(); i++) {
@@ -979,10 +1002,15 @@
 
     tty->print("     object <" INTPTR_FORMAT "> of type ", (void *)sv->value()());
     k->print_value();
-    tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
+    assert(obj.not_null() || realloc_failures, "reallocation was missed");
+    if (obj.is_null()) {
+      tty->print(" allocation failed");
+    } else {
+      tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
+    }
     tty->cr();
 
-    if (Verbose) {
+    if (Verbose && !obj.is_null()) {
       k->oop_print_on(obj(), tty);
     }
   }
@@ -990,7 +1018,7 @@
 #endif
 #endif // COMPILER2
 
-vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk) {
+vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
   Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, fr.pc(), fr.sp());
 
 #ifndef PRODUCT
@@ -1033,7 +1061,7 @@
   // Since the Java thread being deoptimized will eventually adjust it's own stack,
   // the vframeArray containing the unpacking information is allocated in the C heap.
   // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
-  vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr);
+  vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);
 
   // Compare the vframeArray to the collected vframes
   assert(array->structural_compare(thread, chunk), "just checking");
@@ -1048,6 +1076,33 @@
   return array;
 }
 
+#ifdef COMPILER2
+void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) {
+  // Reallocation of some scalar replaced objects failed. Record
+  // that we need to pop all the interpreter frames for the
+  // deoptimized compiled frame.
+  assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?");
+  thread->set_frames_to_pop_failed_realloc(array->frames());
+  // Unlock all monitors here otherwise the interpreter will see a
+  // mix of locked and unlocked monitors (because of failed
+  // reallocations of synchronized objects) and be confused.
+  for (int i = 0; i < array->frames(); i++) {
+    MonitorChunk* monitors = array->element(i)->monitors();
+    if (monitors != NULL) {
+      for (int j = 0; j < monitors->number_of_monitors(); j++) {
+        BasicObjectLock* src = monitors->at(j);
+        if (src->obj() != NULL) {
+          ObjectSynchronizer::fast_exit(src->obj(), src->lock(), thread);
+        }
+      }
+      array->element(i)->free_monitors(thread);
+#ifdef ASSERT
+      array->element(i)->set_removed_monitors();
+#endif
+    }
+  }
+}
+#endif
 
 static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) {
   GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
--- a/src/share/vm/runtime/deoptimization.hpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/runtime/deoptimization.hpp	Fri Dec 12 10:31:00 2014 -0800
@@ -120,13 +120,14 @@
   static bool realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS);
   static void reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type);
   static void reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj);
-  static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects);
-  static void relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread);
-  NOT_PRODUCT(static void print_objects(GrowableArray<ScopeValue*>* objects);)
+  static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures);
+  static void relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures);
+  static void pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array);
+  NOT_PRODUCT(static void print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures);)
 #endif // COMPILER2
 
   public:
-  static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk);
+  static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures);
 
   // Interface used for unpacking deoptimized frames
 
--- a/src/share/vm/runtime/sharedRuntime.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -482,6 +482,7 @@
 
 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
   assert(frame::verify_return_pc(return_address), err_msg("must be a return address: " INTPTR_FORMAT, return_address));
+  assert(thread->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
 
   // Reset method handle flag.
   thread->set_is_method_handle_return(false);
--- a/src/share/vm/runtime/thread.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/runtime/thread.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -1495,6 +1495,7 @@
   _popframe_condition = popframe_inactive;
   _popframe_preserved_args = NULL;
   _popframe_preserved_args_size = 0;
+  _frames_to_pop_failed_realloc = 0;
 
   pd_initialize();
 }
--- a/src/share/vm/runtime/thread.hpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/runtime/thread.hpp	Fri Dec 12 10:31:00 2014 -0800
@@ -933,6 +933,12 @@
   // This is set to popframe_pending to signal that top Java frame should be popped immediately
   int _popframe_condition;
 
+  // If reallocation of scalar replaced objects fails, we throw OOM
+  // and during exception propagation, pop the top
+  // _frames_to_pop_failed_realloc frames, the ones that reference
+  // failed reallocations.
+  int _frames_to_pop_failed_realloc;
+
 #ifndef PRODUCT
   int _jmp_ring_index;
   struct {
@@ -1585,6 +1591,10 @@
   void clr_pop_frame_in_process(void)                 { _popframe_condition &= ~popframe_processing_bit; }
 #endif
 
+  int frames_to_pop_failed_realloc() const            { return _frames_to_pop_failed_realloc; }
+  void set_frames_to_pop_failed_realloc(int nb)       { _frames_to_pop_failed_realloc = nb; }
+  void dec_frames_to_pop_failed_realloc()             { _frames_to_pop_failed_realloc--; }
+
  private:
   // Saved incoming arguments to popped frame.
   // Used only when popped interpreted frame returns to deoptimized frame.
--- a/src/share/vm/runtime/vframeArray.cpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/runtime/vframeArray.cpp	Fri Dec 12 10:31:00 2014 -0800
@@ -56,7 +56,7 @@
   }
 }
 
-void vframeArrayElement::fill_in(compiledVFrame* vf) {
+void vframeArrayElement::fill_in(compiledVFrame* vf, bool realloc_failures) {
 
 // Copy the information from the compiled vframe to the
 // interpreter frame we will be creating to replace vf
@@ -64,6 +64,9 @@
   _method = vf->method();
   _bci    = vf->raw_bci();
   _reexecute = vf->should_reexecute();
+#ifdef ASSERT
+  _removed_monitors = false;
+#endif
 
   int index;
 
@@ -81,11 +84,15 @@
     // Migrate the BasicLocks from the stack to the monitor chunk
     for (index = 0; index < list->length(); index++) {
       MonitorInfo* monitor = list->at(index);
-      assert(!monitor->owner_is_scalar_replaced(), "object should be reallocated already");
-      assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased");
+      assert(!monitor->owner_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
       BasicObjectLock* dest = _monitors->at(index);
-      dest->set_obj(monitor->owner());
-      monitor->lock()->move_to(monitor->owner(), dest->lock());
+      if (monitor->owner_is_scalar_replaced()) {
+        dest->set_obj(NULL);
+      } else {
+        assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased");
+        dest->set_obj(monitor->owner());
+        monitor->lock()->move_to(monitor->owner(), dest->lock());
+      }
     }
   }
 
@@ -110,7 +117,7 @@
     StackValue* value = locs->at(index);
     switch(value->type()) {
       case T_OBJECT:
-        assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
+        assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
         // preserve object type
         _locals->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
         break;
@@ -135,7 +142,7 @@
     StackValue* value = exprs->at(index);
     switch(value->type()) {
       case T_OBJECT:
-        assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
+        assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
         // preserve object type
         _expressions->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
         break;
@@ -286,7 +293,7 @@
 
   _frame.patch_pc(thread, pc);
 
-  assert (!method()->is_synchronized() || locks > 0, "synchronized methods must have monitors");
+  assert (!method()->is_synchronized() || locks > 0 || _removed_monitors, "synchronized methods must have monitors");
 
   BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin();
   for (int index = 0; index < locks; index++) {
@@ -438,7 +445,8 @@
 
 
 vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
-                                   RegisterMap *reg_map, frame sender, frame caller, frame self) {
+                                   RegisterMap *reg_map, frame sender, frame caller, frame self,
+                                   bool realloc_failures) {
 
   // Allocate the vframeArray
   vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part
@@ -450,19 +458,20 @@
   result->_caller = caller;
   result->_original = self;
   result->set_unroll_block(NULL); // initialize it
-  result->fill_in(thread, frame_size, chunk, reg_map);
+  result->fill_in(thread, frame_size, chunk, reg_map, realloc_failures);
   return result;
 }
 
 void vframeArray::fill_in(JavaThread* thread,
                           int frame_size,
                           GrowableArray<compiledVFrame*>* chunk,
-                          const RegisterMap *reg_map) {
+                          const RegisterMap *reg_map,
+                          bool realloc_failures) {
   // Set owner first, it is used when adding monitor chunks
 
   _frame_size = frame_size;
   for(int i = 0; i < chunk->length(); i++) {
-    element(i)->fill_in(chunk->at(i));
+    element(i)->fill_in(chunk->at(i), realloc_failures);
   }
 
   // Copy registers for callee-saved registers
--- a/src/share/vm/runtime/vframeArray.hpp	Wed Dec 10 14:35:48 2014 -0800
+++ b/src/share/vm/runtime/vframeArray.hpp	Fri Dec 12 10:31:00 2014 -0800
@@ -58,6 +58,9 @@
     MonitorChunk* _monitors;                                     // active monitors for this vframe
     StackValueCollection* _locals;
     StackValueCollection* _expressions;
+#ifdef ASSERT
+    bool _removed_monitors;
+#endif
 
   public:
 
@@ -78,7 +81,7 @@
 
   StackValueCollection* expressions(void) const        { return _expressions; }
 
-  void fill_in(compiledVFrame* vf);
+  void fill_in(compiledVFrame* vf, bool realloc_failures);
 
   // Formerly part of deoptimizedVFrame
 
@@ -99,6 +102,12 @@
                        bool is_bottom_frame,
                        int exec_mode);
 
+#ifdef ASSERT
+  void set_removed_monitors() {
+    _removed_monitors = true;
+  }
+#endif
+
 #ifndef PRODUCT
   void print(outputStream* st);
 #endif /* PRODUCT */
@@ -160,13 +169,14 @@
   int frames() const                            { return _frames;   }
 
   static vframeArray* allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
-                               RegisterMap* reg_map, frame sender, frame caller, frame self);
+                               RegisterMap* reg_map, frame sender, frame caller, frame self,
+                               bool realloc_failures);
 
 
   vframeArrayElement* element(int index)        { assert(is_within_bounds(index), "Bad index"); return &_elements[index]; }
 
   // Allocates a new vframe in the array and fills the array with vframe information in chunk
-  void fill_in(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk, const RegisterMap *reg_map);
+  void fill_in(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk, const RegisterMap *reg_map, bool realloc_failures);
 
   // Returns the owner of this vframeArray
   JavaThread* owner_thread() const           { return _owner_thread; }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/exceptions/SumTest.java	Fri Dec 12 10:31:00 2014 -0800
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8066900
+ * @summary FP registers are not properly restored by C1 when handling exceptions
+ * @run main/othervm -Xbatch SumTest
+ *
+ */
+public class SumTest {
+    private static class Sum {
+
+        double[] sums;
+
+        /**
+         * Construct empty Sum
+         */
+        public Sum() {
+            sums = new double[0];
+        }
+
+        /**
+         * Return the sum of all numbers added to this Sum
+         *
+         * @return the sum
+         */
+        final public double getSum() {
+            double sum = 0;
+            for (final double s : sums) {
+                sum += s;
+            }
+
+            return sum;
+        }
+
+        /**
+         * Add a new number to this Sum
+         *
+         * @param a number to be added.
+         */
+        final public void add(double a) {
+            try {
+                sums[sums.length] = -1; // Cause IndexOutOfBoundsException
+            } catch (final IndexOutOfBoundsException e) {
+                final double[] oldSums = sums;
+                sums = new double[oldSums.length + 1]; // Extend sums
+                System.arraycopy(oldSums, 0, sums, 0, oldSums.length);
+                sums[oldSums.length] = a; // Append a
+            }
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        final Sum sum = new Sum();
+        for (int i = 1; i <= 10000; ++i) {
+            sum.add(1);
+            double ii = sum.getSum();
+            if (i != ii) {
+                throw new Exception("Failure: computed = " + ii + ", expected = " + i);
+            }
+        }
+    }
+
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/rangechecks/TestRangeCheckSmearing.java	Fri Dec 12 10:31:00 2014 -0800
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8066103
+ * @summary C2's range check smearing allows out of bound array accesses
+ * @library /testlibrary /testlibrary/whitebox /compiler/whitebox /testlibrary/com/oracle/java/testlibrary
+ * @build TestRangeCheckSmearing
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main ClassFileInstaller com.oracle.java.testlibrary.Platform
+ * @run main/othervm -ea -Xmixed -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
+ *                   -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestRangeCheckSmearing
+ *
+ */
+
+import java.lang.annotation.*;
+import java.lang.reflect.*;
+import java.util.*;
+import sun.hotspot.WhiteBox;
+import sun.hotspot.code.NMethod;
+import com.oracle.java.testlibrary.Platform;
+
+public class TestRangeCheckSmearing {
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+
+    @Retention(RetentionPolicy.RUNTIME)
+    @interface Args { int[] value(); }
+
+    // first range check is i + max of all constants
+    @Args({0, 8})
+    static int m1(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+9];
+        if (allaccesses) {
+            res += array[i+8];
+            res += array[i+7];
+            res += array[i+6];
+            res += array[i+5];
+            res += array[i+4];
+            res += array[i+3];
+            res += array[i+2];
+            res += array[i+1];
+        }
+        return res;
+    }
+
+    // first range check is i + min of all constants
+    @Args({0, -9})
+    static int m2(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+1];
+        if (allaccesses) {
+            res += array[i+2];
+            res += array[i+3];
+            res += array[i+4];
+            res += array[i+5];
+            res += array[i+6];
+            res += array[i+7];
+            res += array[i+8];
+            res += array[i+9];
+        }
+        return res;
+    }
+
+    // first range check is not i + min/max of all constants
+    @Args({0, 8})
+    static int m3(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        if (allaccesses) {
+            res += array[i+2];
+            res += array[i+1];
+            res += array[i+4];
+            res += array[i+5];
+            res += array[i+6];
+            res += array[i+7];
+            res += array[i+8];
+            res += array[i+9];
+        }
+        return res;
+    }
+
+    @Args({0, -9})
+    static int m4(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        if (allaccesses) {
+            res += array[i+4];
+            res += array[i+1];
+            res += array[i+2];
+            res += array[i+5];
+            res += array[i+6];
+            res += array[i+7];
+            res += array[i+8];
+            res += array[i+9];
+        }
+        return res;
+    }
+
+    @Args({0, -3})
+    static int m5(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        res += array[i+2];
+        if (allaccesses) {
+            res += array[i+1];
+            res += array[i+4];
+            res += array[i+5];
+            res += array[i+6];
+            res += array[i+7];
+            res += array[i+8];
+            res += array[i+9];
+        }
+        return res;
+    }
+
+    @Args({0, 6})
+    static int m6(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        res += array[i+4];
+        if (allaccesses) {
+            res += array[i+2];
+            res += array[i+1];
+            res += array[i+5];
+            res += array[i+6];
+            res += array[i+7];
+            res += array[i+8];
+            res += array[i+9];
+        }
+        return res;
+    }
+
+    @Args({0, 6})
+    static int m7(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        res += array[i+2];
+        res += array[i+4];
+        if (allaccesses) {
+            res += array[i+1];
+            res += array[i+5];
+            res += array[i+6];
+            res += array[i+7];
+            res += array[i+8];
+            res += array[i+9];
+        }
+        return res;
+    }
+
+    @Args({0, -3})
+    static int m8(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        res += array[i+4];
+        res += array[i+2];
+        if (allaccesses) {
+            res += array[i+1];
+            res += array[i+5];
+            res += array[i+6];
+            res += array[i+7];
+            res += array[i+8];
+            res += array[i+9];
+        }
+        return res;
+    }
+
+    @Args({6, 15})
+    static int m9(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        if (allaccesses) {
+            res += array[i-2];
+            res += array[i-1];
+            res += array[i-4];
+            res += array[i-5];
+            res += array[i-6];
+        }
+        return res;
+    }
+
+    @Args({3, 12})
+    static int m10(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        if (allaccesses) {
+            res += array[i-2];
+            res += array[i-1];
+            res += array[i-3];
+            res += array[i+4];
+            res += array[i+5];
+            res += array[i+6];
+        }
+        return res;
+    }
+
+    @Args({3, -3})
+    static int m11(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        res += array[i-2];
+        if (allaccesses) {
+            res += array[i+5];
+            res += array[i+6];
+        }
+        return res;
+    }
+
+    @Args({3, 6})
+    static int m12(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        res += array[i+6];
+        if (allaccesses) {
+            res += array[i-2];
+            res += array[i-3];
+        }
+        return res;
+    }
+
+    // check that identical range check is replaced by dominating one
+    // only when correct
+    @Args({0})
+    static int m13(int[] array, int i, boolean ignore) {
+        int res = 0;
+        res += array[i+3];
+        res += array[i+3];
+        return res;
+    }
+
+    @Args({2, 0})
+    static int m14(int[] array, int i, boolean ignore) {
+        int res = 0;
+
+        res += array[i];
+        res += array[i-2];
+        res += array[i]; // If range check below were to be removed first this cannot be considered identical to first range check
+        res += array[i-1]; // range check removed so i-1 array access depends on previous check
+
+        return res;
+    }
+
+    static int[] m15_dummy = new int[10];
+    @Args({2, 0})
+    static int m15(int[] array, int i, boolean ignore) {
+        int res = 0;
+        res += array[i];
+
+        // When the loop is optimized out we don't want the
+        // array[i-1] access which is dependent on array[i]'s
+        // range check to become dependent on the identical range
+        // check above.
+
+        int[] array2 = m15_dummy;
+        int j = 0;
+        for (; j < 10; j++);
+        if (j == 10) {
+            array2 = array;
+        }
+
+        res += array2[i-2];
+        res += array2[i];
+        res += array2[i-1]; // range check removed so i-1 array access depends on previous check
+
+        return res;
+    }
+
+    @Args({2, 0})
+    static int m16(int[] array, int i, boolean ignore) {
+        int res = 0;
+
+        res += array[i];
+        res += array[i-1];
+        res += array[i-1];
+        res += array[i-2];
+
+        return res;
+    }
+
+    @Args({2, 0})
+    static int m17(int[] array, int i, boolean ignore) {
+        int res = 0;
+
+        res += array[i];
+        res += array[i-2];
+        res += array[i-2];
+        res += array[i+2];
+        res += array[i+2];
+        res += array[i-1];
+        res += array[i-1];
+
+        return res;
+    }
+
+    static public void main(String[] args) {
+        if (WHITE_BOX.getBooleanVMFlag("BackgroundCompilation")) {
+            throw new AssertionError("Background compilation enabled");
+        }
+        new TestRangeCheckSmearing().doTests();
+    }
+    boolean success = true;
+    boolean exception = false;
+    final int[] array = new int[10];
+    final HashMap<String,Method> tests = new HashMap<>();
+    {
+        final Class<?> TEST_PARAM_TYPES[] = { int[].class, int.class, boolean.class };
+        for (Method m : this.getClass().getDeclaredMethods()) {
+            if (m.getName().matches("m[0-9]+")) {
+                assert(Modifier.isStatic(m.getModifiers())) : m;
+                assert(m.getReturnType() == int.class) : m;
+                assert(Arrays.equals(m.getParameterTypes(), TEST_PARAM_TYPES)) : m;
+                tests.put(m.getName(), m);
+            }
+        }
+    }
+
+    void invokeTest(Method m, int[] array, int index, boolean z) {
+        try {
+            m.invoke(null, array, index, z);
+        } catch (ReflectiveOperationException roe) {
+            Throwable ex = roe.getCause();
+            if (ex instanceof ArrayIndexOutOfBoundsException)
+                throw (ArrayIndexOutOfBoundsException) ex;
+            throw new AssertionError(roe);
+        }
+    }
+
+    void doTest(String name) {
+        Method m = tests.get(name);
+        tests.remove(name);
+        int[] args = m.getAnnotation(Args.class).value();
+        int index0 = args[0], index1;
+        boolean exceptionRequired = true;
+        if (args.length == 2) {
+            index1 = args[1];
+        } else {
+            // no negative test for this one
+            assert(args.length == 1);
+            assert(name.equals("m13"));
+            exceptionRequired = false;
+            index1 = index0;
+        }
+        // Get the method compiled.
+        if (!WHITE_BOX.isMethodCompiled(m)) {
+            // If not, try to compile it with C2
+            if(!WHITE_BOX.enqueueMethodForCompilation(m, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION)) {
+                // C2 compiler not available, try to compile with C1
+                WHITE_BOX.enqueueMethodForCompilation(m, CompilerWhiteBoxTest.COMP_LEVEL_SIMPLE);
+            }
+        }
+        if (!WHITE_BOX.isMethodCompiled(m)) {
+            throw new RuntimeException(m + " not compiled");
+        }
+
+        // valid access
+        invokeTest(m, array, index0, true);
+
+        if (!WHITE_BOX.isMethodCompiled(m)) {
+            throw new RuntimeException(m + " deoptimized on valid array access");
+        }
+
+        exception = false;
+        boolean test_success = true;
+        try {
+            invokeTest(m, array, index1, false);
+        } catch(ArrayIndexOutOfBoundsException aioob) {
+            exception = true;
+            System.out.println("ArrayIndexOutOfBoundsException thrown in "+name);
+        }
+        if (!exception) {
+            System.out.println("ArrayIndexOutOfBoundsException was not thrown in "+name);
+        }
+
+        if (Platform.isServer()) {
+            if (exceptionRequired == WHITE_BOX.isMethodCompiled(m)) {
+                System.out.println((exceptionRequired?"Didn't deoptimized":"deoptimized") + " in "+name);
+                test_success = false;
+            }
+        }
+
+        if (exception != exceptionRequired) {
+            System.out.println((exceptionRequired?"exception required but not thrown":"not exception required but thrown") + " in "+name);
+            test_success = false;
+        }
+
+        if (!test_success) {
+            success = false;
+            System.out.println("TEST FAILED: "+name);
+        }
+
+    }
+    void doTests() {
+        doTest("m1");
+        doTest("m2");
+        doTest("m3");
+        doTest("m4");
+        doTest("m5");
+        doTest("m6");
+        doTest("m7");
+        doTest("m8");
+        doTest("m9");
+        doTest("m10");
+        doTest("m11");
+        doTest("m12");
+        doTest("m13");
+        doTest("m14");
+        doTest("m15");
+        doTest("m16");
+        doTest("m17");
+        if (!success) {
+            throw new RuntimeException("Some tests failed");
+        }
+        assert(tests.isEmpty()) : tests;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/rangechecks/TestRangeCheckSmearingLoopOpts.java	Fri Dec 12 10:31:00 2014 -0800
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8048170
+ * @summary Following range check smearing, range check cannot be replaced by dominating identical test.
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestRangeCheckSmearingLoopOpts
+ *
+ */
+public class TestRangeCheckSmearingLoopOpts {
+
+    static int dummy;
+
+    static int m1(int[] array, int i) {
+        for (;;) {
+            for (;;) {
+                if (array[i] < 0) { // range check (i+0) dominates equivalent check below
+                    break;
+                }
+                i++;
+            }
+
+            // A control flow that stops IfNode::up_one_dom()
+            if ((i % 2)== 0) {
+                if ((array[i] % 2) == 0) {
+                    dummy = i;
+                }
+            }
+
+            // IfNode::Ideal will rewrite some range checks if Compile::allow_range_check_smearing
+            if (array[i-1] == 9) {    // range check (i-1) unchanged
+                int res = array[i-3]; // range check (i-3) unchanged
+                res += array[i];      // range check (i+0) unchanged
+                res += array[i-2];    // removed redundant range check
+                // the previous access might be hoisted by
+                // PhaseIdealLoop::split_if_with_blocks_post because
+                // it appears to have the same guard, but it also
+                // depends on the previous guards
+                return res;
+            }
+            i++;
+        }
+    }
+
+    static public void main(String[] args) {
+        int[] array = { 0, 1, 2, -3, 4, 5, -2, 7, 8, 9, -1 };
+        for (int i = 0; i < 20000; i++) {
+            m1(array, 0);
+        }
+        array[0] = -1;
+        try {
+            m1(array, 0);
+        } catch(ArrayIndexOutOfBoundsException aioobe) {}
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/uncommontrap/TestDeoptOOM.java	Fri Dec 12 10:31:00 2014 -0800
@@ -0,0 +1,426 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 6898462
+ * @summary failed reallocations of scalar replaced objects during deoptimization causes crash
+ * @run main/othervm -XX:-BackgroundCompilation -XX:CompileCommand=exclude,TestDeoptOOM::main -XX:CompileCommand=exclude,TestDeoptOOM::m9_1 -Xmx128M TestDeoptOOM
+ *
+ */
+
+public class TestDeoptOOM {
+
+    long f1;
+    long f2;
+    long f3;
+    long f4;
+    long f5;
+
+    static class LinkedList {
+        LinkedList l;
+        long[] array;
+        LinkedList(LinkedList l, int size) {
+            array = new long[size];
+            this.l = l;
+        }
+    }
+
+    static LinkedList ll;
+
+    static void consume_all_memory() {
+        int size = 128 * 1024 * 1024;
+        while(size > 0) {
+            try {
+                while(true) {
+                    ll = new LinkedList(ll, size);
+                }
+            } catch(OutOfMemoryError oom) {
+            }
+            size = size / 2;
+        }
+    }
+
+    static void free_memory() {
+        ll = null;
+    }
+
+    static TestDeoptOOM m1(boolean deopt) {
+        try {
+            TestDeoptOOM tdoom = new TestDeoptOOM();
+            if (deopt) {
+                return tdoom;
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m1");
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m2_1(boolean deopt) {
+        try {
+            TestDeoptOOM tdoom = new TestDeoptOOM();
+            if (deopt) {
+                return tdoom;
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m2_1");
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m2(boolean deopt) {
+        try {
+            return m2_1(deopt);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m2");
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m3_3(boolean deopt) {
+        try {
+            TestDeoptOOM tdoom = new TestDeoptOOM();
+            if (deopt) {
+                return tdoom;
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m3_3");
+        }
+        return null;
+    }
+
+    static boolean m3_2(boolean deopt) {
+        try {
+            return m3_3(deopt) != null;
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m3_2");
+        }
+        return false;
+    }
+
+    static TestDeoptOOM m3_1(boolean deopt) {
+        try {
+            TestDeoptOOM tdoom = new TestDeoptOOM();
+            if (m3_2(deopt)) {
+                return tdoom;
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m3_1");
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m3(boolean deopt) {
+        try {
+            return m3_1(deopt);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m3");
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m4(boolean deopt) {
+        try {
+            TestDeoptOOM tdoom = new TestDeoptOOM();
+            if (deopt) {
+                tdoom.f1 = 1l;
+                tdoom.f2 = 2l;
+                tdoom.f3 = 3l;
+                return tdoom;
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m4");
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m5(boolean deopt) {
+        try {
+            TestDeoptOOM tdoom = new TestDeoptOOM();
+            synchronized(tdoom) {
+                if (deopt) {
+                    return tdoom;
+                }
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m5");
+        }
+        return null;
+    }
+
+    synchronized TestDeoptOOM m6_1(boolean deopt) {
+        if (deopt) {
+            return this;
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m6(boolean deopt) {
+        try {
+            TestDeoptOOM tdoom = new TestDeoptOOM();
+            return tdoom.m6_1(deopt);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m6");
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m7_1(boolean deopt, Object lock) {
+        try {
+            synchronized(lock) {
+                TestDeoptOOM tdoom = new TestDeoptOOM();
+                if (deopt) {
+                    return tdoom;
+                }
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m7_1");
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m7(boolean deopt, Object lock) {
+        try {
+            return m7_1(deopt, lock);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m7");
+        }
+        return null;
+    }
+
+    static class A {
+        long f1;
+        long f2;
+        long f3;
+        long f4;
+        long f5;
+    }
+
+    static class B {
+        long f1;
+        long f2;
+        long f3;
+        long f4;
+        long f5;
+
+        A a;
+    }
+
+    static B m8(boolean deopt) {
+        try {
+            A a = new A();
+            B b = new B();
+            b.a = a;
+            if (deopt) {
+                return b;
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m8");
+        }
+        return null;
+    }
+
+    static void m9_1(int i) {
+        if (i > 90000) {
+            consume_all_memory();
+        }
+    }
+
+    static TestDeoptOOM m9() {
+        try {
+            for (int i = 0; i < 100000; i++) {
+                TestDeoptOOM tdoom = new TestDeoptOOM();
+                m9_1(i);
+                if (i > 90000) {
+                    return tdoom;
+                }
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m1");
+        }
+        return null;
+    }
+
+    public static void main(String[] args) {
+        for (int i = 0; i < 20000; i++) {
+            m1(false);
+        }
+
+        consume_all_memory();
+
+        try {
+            m1(true);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main " + oom.getMessage());
+        }
+
+        free_memory();
+
+        for (int i = 0; i < 20000; i++) {
+            m2(false);
+        }
+
+        consume_all_memory();
+
+        try {
+            m2(true);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main");
+        }
+
+        free_memory();
+
+        for (int i = 0; i < 20000; i++) {
+            m3(false);
+        }
+
+        consume_all_memory();
+
+        try {
+            m3(true);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main");
+        }
+
+        free_memory();
+
+        for (int i = 0; i < 20000; i++) {
+            m4(false);
+        }
+
+        consume_all_memory();
+
+        try {
+            m4(true);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main");
+        }
+
+        free_memory();
+
+        for (int i = 0; i < 20000; i++) {
+            m5(false);
+        }
+
+        consume_all_memory();
+
+        try {
+            m5(true);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main");
+        }
+
+        free_memory();
+
+        for (int i = 0; i < 20000; i++) {
+            m6(false);
+        }
+
+        consume_all_memory();
+
+        try {
+            m6(true);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main");
+        }
+
+        free_memory();
+
+        final Object lock = new Object();
+
+        for (int i = 0; i < 20000; i++) {
+            m7(false, lock);
+        }
+
+        consume_all_memory();
+
+        try {
+            m7(true, lock);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main");
+        }
+
+        free_memory();
+
+        Thread thread = new Thread() {
+                public void run() {
+                    System.out.println("Acquiring lock");
+                    synchronized(lock) {
+                        System.out.println("Lock acquired");
+                    }
+                    System.out.println("Lock released");
+                }
+            };
+        thread.start();
+        try {
+            thread.join();
+        } catch(InterruptedException ie) {
+        }
+
+        for (int i = 0; i < 20000; i++) {
+            m8(false);
+        }
+
+        consume_all_memory();
+
+        try {
+            m8(true);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main");
+        }
+
+        free_memory();
+
+        try {
+            m9();
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main");
+        }
+
+        free_memory();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/uncommontrap/TraceDeoptimizationNoRealloc.java	Fri Dec 12 10:31:00 2014 -0800
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8067144
+ * @summary -XX:+TraceDeoptimization tries to print realloc'ed objects even when there are none
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:+IgnoreUnrecognizedVMOptions -XX:+TraceDeoptimization TraceDeoptimizationNoRealloc
+ *
+ */
+
+public class TraceDeoptimizationNoRealloc {
+
+    static void m(boolean some_condition) {
+        if (some_condition) {
+            return;
+        }
+    }
+
+
+    static public void main(String[] args) {
+        for (int i = 0; i < 20000; i++) {
+            m(false);
+        }
+        m(true);
+    }
+}
--- a/test/runtime/CheckEndorsedAndExtDirs/EndorsedExtDirs.java	Wed Dec 10 14:35:48 2014 -0800
+++ b/test/runtime/CheckEndorsedAndExtDirs/EndorsedExtDirs.java	Fri Dec 12 10:31:00 2014 -0800
@@ -26,6 +26,7 @@
  * @bug 8064667
  * @summary Sanity test for -XX:+CheckEndorsedAndExtDirs
  * @library /testlibrary
+ * @build com.oracle.java.testlibrary.*
  * @run main/othervm EndorsedExtDirs
  */
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/SharedArchiveFile/PrintSharedArchiveAndExit.java	Fri Dec 12 10:31:00 2014 -0800
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8066670
+ * @summary Testing -XX:+PrintSharedArchiveAndExit option
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class PrintSharedArchiveAndExit {
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+        "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    try {
+      output.shouldContain("Loading classes to share");
+      output.shouldHaveExitValue(0);
+
+      // (1) With a valid archive
+      pb = ProcessTools.createJavaProcessBuilder(
+          "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
+          "-XX:+PrintSharedArchiveAndExit", "-version");
+      output = new OutputAnalyzer(pb.start());
+      output.shouldContain("archive is valid");
+      output.shouldNotContain("java version");     // Should not print JVM version
+      output.shouldHaveExitValue(0);               // Should report success in error code.
+
+      pb = ProcessTools.createJavaProcessBuilder(
+          "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
+          "-XX:+PrintSharedArchiveAndExit");
+      output = new OutputAnalyzer(pb.start());
+      output.shouldContain("archive is valid");
+      output.shouldNotContain("Usage:");           // Should not print JVM help message
+      output.shouldHaveExitValue(0);               // Should report success in error code.
+
+      // (2) With an invalid archive (boot class path has been prepended)
+      pb = ProcessTools.createJavaProcessBuilder(
+          "-Xbootclasspath/p:foo.jar",
+          "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
+          "-XX:+PrintSharedArchiveAndExit", "-version");
+      output = new OutputAnalyzer(pb.start());
+      output.shouldContain("archive is invalid");
+      output.shouldNotContain("java version");     // Should not print JVM version
+      output.shouldHaveExitValue(1);               // Should report failure in error code.
+
+      pb = ProcessTools.createJavaProcessBuilder(
+          "-Xbootclasspath/p:foo.jar",
+          "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
+          "-XX:+PrintSharedArchiveAndExit");
+      output = new OutputAnalyzer(pb.start());
+      output.shouldContain("archive is invalid");
+      output.shouldNotContain("Usage:");           // Should not print JVM help message
+      output.shouldHaveExitValue(1);               // Should report failure in error code.
+    } catch (RuntimeException e) {
+      e.printStackTrace();
+      output.shouldContain("Unable to use shared archive");
+      output.shouldHaveExitValue(1);
+    }
+  }
+}