# HG changeset patch # User amurillo # Date 1401480693 25200 # Node ID ee8b934668694dba5dc0ac039f8d56e52499c0f9 # Parent 460b0b08247ffcf4c76843216af6be03c2226544# Parent 0342d80559e048c6292c2726d712e0d97344603b Merge diff -r 460b0b08247f -r ee8b93466869 make/hotspot_version --- a/make/hotspot_version Wed May 28 02:27:39 2014 -0700 +++ b/make/hotspot_version Fri May 30 13:11:33 2014 -0700 @@ -35,7 +35,7 @@ HS_MAJOR_VER=25 HS_MINOR_VER=20 -HS_BUILD_NUMBER=16 +HS_BUILD_NUMBER=17 JDK_MAJOR_VER=1 JDK_MINOR_VER=8 diff -r 460b0b08247f -r ee8b93466869 src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp --- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Wed May 28 02:27:39 2014 -0700 +++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Fri May 30 13:11:33 2014 -0700 @@ -1221,10 +1221,8 @@ bool is_obj = (type == T_ARRAY || type == T_OBJECT); LIR_Opr offset = off.result(); - if (data != dst) { - __ move(data, dst); - data = dst; - } + // Because we want a 2-arg form of xchg + __ move(data, dst); assert (!x->is_add() && (type == T_INT || (is_obj LP64_ONLY(&& UseCompressedOops))), "unexpected type"); LIR_Address* addr; @@ -1254,7 +1252,7 @@ pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */, true /* do_load */, false /* patch */, NULL); } - __ xchg(LIR_OprFact::address(addr), data, dst, tmp); + __ xchg(LIR_OprFact::address(addr), dst, dst, tmp); if (is_obj) { // Seems to be a precise address post_barrier(ptr, data); diff -r 460b0b08247f -r ee8b93466869 src/cpu/sparc/vm/stubGenerator_sparc.cpp --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Wed May 28 02:27:39 2014 -0700 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Fri May 30 13:11:33 2014 -0700 @@ -3653,9 +3653,9 @@ const Register len_reg = I4; // cipher length const Register keylen = I5; // reg for storing expanded key array length - // save cipher len before save_frame, to return in the end - __ mov(O4, L0); __ save_frame(0); + // save cipher len to return in the end + __ mov(len_reg, L0); // read expanded key length __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0); @@ -3778,9 +3778,9 @@ // re-init intial vector for next block, 8-byte alignment is guaranteed __ stf(FloatRegisterImpl::D, F60, rvec, 0); __ stf(FloatRegisterImpl::D, F62, rvec, 8); - __ restore(); - __ retl(); - __ delayed()->mov(L0, O0); + __ mov(L0, I0); + __ ret(); + __ delayed()->restore(); __ align(OptoLoopAlignment); __ BIND(L_cbcenc192); @@ -3869,9 +3869,9 @@ // re-init intial vector for next block, 8-byte alignment is guaranteed __ stf(FloatRegisterImpl::D, F60, rvec, 0); __ stf(FloatRegisterImpl::D, F62, rvec, 8); - __ restore(); - __ retl(); - __ delayed()->mov(L0, O0); + __ mov(L0, I0); + __ ret(); + __ delayed()->restore(); __ align(OptoLoopAlignment); __ BIND(L_cbcenc256); @@ -3962,9 +3962,9 @@ // re-init intial vector for next block, 8-byte alignment is guaranteed __ stf(FloatRegisterImpl::D, F60, rvec, 0); __ stf(FloatRegisterImpl::D, F62, rvec, 8); - __ restore(); - __ retl(); - __ delayed()->mov(L0, O0); + __ mov(L0, I0); + __ ret(); + __ delayed()->restore(); return start; } @@ -3992,9 +3992,9 @@ const Register original_key = I5; // original key array only required during decryption const Register keylen = L6; // reg for storing expanded key array length - // save cipher len before save_frame, to return in the end - __ mov(O4, L0); __ save_frame(0); //args are read from I* registers since we save the frame in the beginning + // save cipher len to return in the end + __ mov(len_reg, L7); // load original key from SunJCE expanded decryption key // Since we load original key buffer starting first element, 8-byte alignment is guaranteed @@ -4568,10 +4568,9 @@ // re-init intial vector for next block, 8-byte alignment is guaranteed __ stx(L0, rvec, 0); __ stx(L1, rvec, 8); - __ restore(); - __ mov(L0, O0); - __ retl(); - __ delayed()->nop(); + __ mov(L7, I0); + __ ret(); + __ delayed()->restore(); return start; } diff -r 460b0b08247f -r ee8b93466869 src/share/vm/c1/c1_LIR.cpp --- a/src/share/vm/c1/c1_LIR.cpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/c1/c1_LIR.cpp Fri May 30 13:11:33 2014 -0700 @@ -1083,7 +1083,7 @@ void LIR_OpArrayCopy::emit_code(LIR_Assembler* masm) { masm->emit_arraycopy(this); - masm->emit_code_stub(stub()); + masm->append_code_stub(stub()); } void LIR_OpUpdateCRC32::emit_code(LIR_Assembler* masm) { @@ -1100,20 +1100,20 @@ void LIR_OpAllocObj::emit_code(LIR_Assembler* masm) { masm->emit_alloc_obj(this); - masm->emit_code_stub(stub()); + masm->append_code_stub(stub()); } void LIR_OpBranch::emit_code(LIR_Assembler* masm) { masm->emit_opBranch(this); if (stub()) { - masm->emit_code_stub(stub()); + masm->append_code_stub(stub()); } } void LIR_OpConvert::emit_code(LIR_Assembler* masm) { masm->emit_opConvert(this); if (stub() != NULL) { - masm->emit_code_stub(stub()); + masm->append_code_stub(stub()); } } @@ -1123,13 +1123,13 @@ void LIR_OpAllocArray::emit_code(LIR_Assembler* masm) { masm->emit_alloc_array(this); - masm->emit_code_stub(stub()); + masm->append_code_stub(stub()); } void LIR_OpTypeCheck::emit_code(LIR_Assembler* masm) { masm->emit_opTypeCheck(this); if (stub()) { - masm->emit_code_stub(stub()); + masm->append_code_stub(stub()); } } @@ -1144,7 +1144,7 @@ void LIR_OpLock::emit_code(LIR_Assembler* masm) { masm->emit_lock(this); if (stub()) { - masm->emit_code_stub(stub()); + masm->append_code_stub(stub()); } } diff -r 460b0b08247f -r ee8b93466869 src/share/vm/c1/c1_LIR.hpp --- a/src/share/vm/c1/c1_LIR.hpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/c1/c1_LIR.hpp Fri May 30 13:11:33 2014 -0700 @@ -1127,6 +1127,7 @@ virtual void print_instr(outputStream* out) const = 0; virtual void print_on(outputStream* st) const PRODUCT_RETURN; + virtual bool is_patching() { return false; } virtual LIR_OpCall* as_OpCall() { return NULL; } virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; } virtual LIR_OpLabel* as_OpLabel() { return NULL; } @@ -1387,6 +1388,7 @@ return (LIR_MoveKind)_flags; } + virtual bool is_patching() { return _patch != lir_patch_none; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_Op1* as_Op1() { return this; } virtual const char * name() const PRODUCT_RETURN0; @@ -1619,6 +1621,7 @@ int profiled_bci() const { return _profiled_bci; } bool should_profile() const { return _should_profile; } + virtual bool is_patching() { return _info_for_patch != NULL; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; } void print_instr(outputStream* out) const PRODUCT_RETURN; diff -r 460b0b08247f -r ee8b93466869 src/share/vm/c1/c1_LIRAssembler.cpp --- a/src/share/vm/c1/c1_LIRAssembler.cpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp Fri May 30 13:11:33 2014 -0700 @@ -58,7 +58,7 @@ _masm->nop(); } patch->install(_masm, patch_code, obj, info); - append_patching_stub(patch); + append_code_stub(patch); #ifdef ASSERT Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci()); @@ -131,11 +131,6 @@ } -void LIR_Assembler::append_patching_stub(PatchingStub* stub) { - _slow_case_stubs->append(stub); -} - - void LIR_Assembler::check_codespace() { CodeSection* cs = _masm->code_section(); if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) { @@ -144,7 +139,7 @@ } -void LIR_Assembler::emit_code_stub(CodeStub* stub) { +void LIR_Assembler::append_code_stub(CodeStub* stub) { _slow_case_stubs->append(stub); } @@ -435,7 +430,7 @@ void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) { ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo); - emit_code_stub(stub); + append_code_stub(stub); } void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) { @@ -444,7 +439,7 @@ void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) { DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo); - emit_code_stub(stub); + append_code_stub(stub); } void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) { diff -r 460b0b08247f -r ee8b93466869 src/share/vm/c1/c1_LIRAssembler.hpp --- a/src/share/vm/c1/c1_LIRAssembler.hpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp Fri May 30 13:11:33 2014 -0700 @@ -143,7 +143,7 @@ // stubs void emit_slow_case_stubs(); void emit_static_call_stub(); - void emit_code_stub(CodeStub* op); + void append_code_stub(CodeStub* op); void add_call_info_here(CodeEmitInfo* info) { add_call_info(code_offset(), info); } // code patterns diff -r 460b0b08247f -r ee8b93466869 src/share/vm/c1/c1_LinearScan.cpp --- a/src/share/vm/c1/c1_LinearScan.cpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/c1/c1_LinearScan.cpp Fri May 30 13:11:33 2014 -0700 @@ -2382,16 +2382,6 @@ int arg_count = frame_map()->oop_map_arg_count(); OopMap* map = new OopMap(frame_size, arg_count); - // Check if this is a patch site. - bool is_patch_info = false; - if (op->code() == lir_move) { - assert(!is_call_site, "move must not be a call site"); - assert(op->as_Op1() != NULL, "move must be LIR_Op1"); - LIR_Op1* move = (LIR_Op1*)op; - - is_patch_info = move->patch_code() != lir_patch_none; - } - // Iterate through active intervals for (Interval* interval = iw->active_first(fixedKind); interval != Interval::end(); interval = interval->next()) { int assigned_reg = interval->assigned_reg(); @@ -2406,7 +2396,7 @@ // moves, any intervals which end at this instruction are included // in the oop map since we may safepoint while doing the patch // before we've consumed the inputs. - if (is_patch_info || op->id() < interval->current_to()) { + if (op->is_patching() || op->id() < interval->current_to()) { // caller-save registers must not be included into oop-maps at calls assert(!is_call_site || assigned_reg >= nof_regs || !is_caller_save(assigned_reg), "interval is in a caller-save register at a call -> register will be overwritten"); diff -r 460b0b08247f -r ee8b93466869 src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Fri May 30 13:11:33 2014 -0700 @@ -819,7 +819,7 @@ // false before we start remark. At this point we should also be // in a STW phase. assert(!concurrent_marking_in_progress(), "invariant"); - assert(_finger == _heap_end, + assert(out_of_regions(), err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, p2i(_finger), p2i(_heap_end))); update_g1_committed(true); @@ -978,7 +978,9 @@ if (concurrent()) { ConcurrentGCThread::stsLeave(); } - _first_overflow_barrier_sync.enter(); + + bool barrier_aborted = !_first_overflow_barrier_sync.enter(); + if (concurrent()) { ConcurrentGCThread::stsJoin(); } @@ -986,7 +988,17 @@ // more work if (verbose_low()) { - gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); + if (barrier_aborted) { + gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); + } else { + gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); + } + } + + if (barrier_aborted) { + // If the barrier aborted we ignore the overflow condition and + // just abort the whole marking phase as quickly as possible. + return; } // If we're executing the concurrent phase of marking, reset the marking @@ -1026,14 +1038,20 @@ if (concurrent()) { ConcurrentGCThread::stsLeave(); } - _second_overflow_barrier_sync.enter(); + + bool barrier_aborted = !_second_overflow_barrier_sync.enter(); + if (concurrent()) { ConcurrentGCThread::stsJoin(); } // at this point everything should be re-initialized and ready to go if (verbose_low()) { - gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); + if (barrier_aborted) { + gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); + } else { + gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); + } } } @@ -3232,6 +3250,8 @@ for (uint i = 0; i < _max_worker_id; ++i) { _tasks[i]->clear_region_fields(); } + _first_overflow_barrier_sync.abort(); + _second_overflow_barrier_sync.abort(); _has_aborted = true; SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); diff -r 460b0b08247f -r ee8b93466869 src/share/vm/gc_implementation/g1/concurrentMark.hpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp Fri May 30 13:11:33 2014 -0700 @@ -542,8 +542,12 @@ // frequently. HeapRegion* claim_region(uint worker_id); - // It determines whether we've run out of regions to scan. - bool out_of_regions() { return _finger == _heap_end; } + // It determines whether we've run out of regions to scan. Note that + // the finger can point past the heap end in case the heap was expanded + // to satisfy an allocation without doing a GC. This is fine, because all + // objects in those regions will be considered live anyway because of + // SATB guarantees (i.e. their TAMS will be equal to bottom). + bool out_of_regions() { return _finger >= _heap_end; } // Returns the task with the given id CMTask* task(int id) { diff -r 460b0b08247f -r ee8b93466869 src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Fri May 30 13:11:33 2014 -0700 @@ -89,6 +89,10 @@ while (!_should_terminate) { // wait until started is set. sleepBeforeNextCycle(); + if (_should_terminate) { + break; + } + { ResourceMark rm; HandleMark hm; @@ -303,11 +307,21 @@ } void ConcurrentMarkThread::stop() { - // it is ok to take late safepoints here, if needed - MutexLockerEx mu(Terminator_lock); - _should_terminate = true; - while (!_has_terminated) { - Terminator_lock->wait(); + { + MutexLockerEx ml(Terminator_lock); + _should_terminate = true; + } + + { + MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag); + CGC_lock->notify_all(); + } + + { + MutexLockerEx ml(Terminator_lock); + while (!_has_terminated) { + Terminator_lock->wait(); + } } } @@ -327,11 +341,14 @@ assert(!in_progress(), "should have been cleared"); MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); - while (!started()) { + while (!started() && !_should_terminate) { CGC_lock->wait(Mutex::_no_safepoint_check_flag); } - set_in_progress(); - clear_started(); + + if (started()) { + set_in_progress(); + clear_started(); + } } // Note: As is the case with CMS - this method, although exported diff -r 460b0b08247f -r ee8b93466869 src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri May 30 13:11:33 2014 -0700 @@ -435,6 +435,9 @@ void G1CollectedHeap::stop_conc_gc_threads() { _cg1r->stop(); _cmThread->stop(); + if (G1StringDedup::is_enabled()) { + G1StringDedup::stop(); + } } #ifdef ASSERT @@ -2182,6 +2185,23 @@ return JNI_OK; } +void G1CollectedHeap::stop() { +#if 0 + // Stopping concurrent worker threads is currently disabled until + // some bugs in concurrent mark has been resolve. Without fixing + // those bugs first we risk haning during VM exit when trying to + // stop these threads. + + // Abort any ongoing concurrent root region scanning and stop all + // concurrent threads. We do this to make sure these threads do + // not continue to execute and access resources (e.g. gclog_or_tty) + // that are destroyed during shutdown. + _cm->root_regions()->abort(); + _cm->root_regions()->wait_until_scan_finished(); + stop_conc_gc_threads(); +#endif +} + size_t G1CollectedHeap::conservative_max_heap_alignment() { return HeapRegion::max_region_size(); } diff -r 460b0b08247f -r ee8b93466869 src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri May 30 13:11:33 2014 -0700 @@ -1082,6 +1082,8 @@ // specified by the policy object. jint initialize(); + virtual void stop(); + // Return the (conservative) maximum heap alignment for any G1 heap static size_t conservative_max_heap_alignment(); diff -r 460b0b08247f -r ee8b93466869 src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Fri May 30 13:11:33 2014 -0700 @@ -95,7 +95,15 @@ jbyte *const first = byte_for(mr.start()); jbyte *const last = byte_after(mr.last()); - memset(first, g1_young_gen, last - first); + // Below we may use an explicit loop instead of memset() because on + // certain platforms memset() can give concurrent readers phantom zeros. + if (UseMemSetInBOT) { + memset(first, g1_young_gen, last - first); + } else { + for (jbyte* i = first; i < last; i++) { + *i = g1_young_gen; + } + } } #ifndef PRODUCT diff -r 460b0b08247f -r ee8b93466869 src/share/vm/gc_implementation/g1/g1StringDedup.cpp --- a/src/share/vm/gc_implementation/g1/g1StringDedup.cpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/gc_implementation/g1/g1StringDedup.cpp Fri May 30 13:11:33 2014 -0700 @@ -44,6 +44,11 @@ } } +void G1StringDedup::stop() { + assert(is_enabled(), "String deduplication not enabled"); + G1StringDedupThread::stop(); +} + bool G1StringDedup::is_candidate_from_mark(oop obj) { if (java_lang_String::is_instance(obj)) { bool from_young = G1CollectedHeap::heap()->heap_region_containing_raw(obj)->is_young(); diff -r 460b0b08247f -r ee8b93466869 src/share/vm/gc_implementation/g1/g1StringDedup.hpp --- a/src/share/vm/gc_implementation/g1/g1StringDedup.hpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/gc_implementation/g1/g1StringDedup.hpp Fri May 30 13:11:33 2014 -0700 @@ -110,8 +110,12 @@ return _enabled; } + // Initialize string deduplication. static void initialize(); + // Stop the deduplication thread. + static void stop(); + // Immediately deduplicates the given String object, bypassing the // the deduplication queue. static void deduplicate(oop java_string); diff -r 460b0b08247f -r ee8b93466869 src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp --- a/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp Fri May 30 13:11:33 2014 -0700 @@ -35,6 +35,7 @@ G1StringDedupQueue::G1StringDedupQueue() : _cursor(0), + _cancel(false), _empty(true), _dropped(0) { _nqueues = MAX2(ParallelGCThreads, (size_t)1); @@ -55,11 +56,17 @@ void G1StringDedupQueue::wait() { MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag); - while (_queue->_empty) { + while (_queue->_empty && !_queue->_cancel) { ml.wait(Mutex::_no_safepoint_check_flag); } } +void G1StringDedupQueue::cancel_wait() { + MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag); + _queue->_cancel = true; + ml.notify(); +} + void G1StringDedupQueue::push(uint worker_id, oop java_string) { assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); assert(worker_id < _queue->_nqueues, "Invalid queue"); diff -r 460b0b08247f -r ee8b93466869 src/share/vm/gc_implementation/g1/g1StringDedupQueue.hpp --- a/src/share/vm/gc_implementation/g1/g1StringDedupQueue.hpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/gc_implementation/g1/g1StringDedupQueue.hpp Fri May 30 13:11:33 2014 -0700 @@ -65,6 +65,7 @@ G1StringDedupWorkerQueue* _queues; size_t _nqueues; size_t _cursor; + bool _cancel; volatile bool _empty; // Statistics counter, only used for logging. @@ -81,6 +82,9 @@ // Blocks and waits for the queue to become non-empty. static void wait(); + // Wakes up any thread blocked waiting for the queue to become non-empty. + static void cancel_wait(); + // Pushes a deduplication candidate onto a specific GC worker queue. static void push(uint worker_id, oop java_string); diff -r 460b0b08247f -r ee8b93466869 src/share/vm/gc_implementation/g1/g1StringDedupThread.cpp --- a/src/share/vm/gc_implementation/g1/g1StringDedupThread.cpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/gc_implementation/g1/g1StringDedupThread.cpp Fri May 30 13:11:33 2014 -0700 @@ -73,6 +73,9 @@ // Wait for the queue to become non-empty G1StringDedupQueue::wait(); + if (_should_terminate) { + break; + } // Include this thread in safepoints stsJoin(); @@ -108,7 +111,23 @@ stsLeave(); } - ShouldNotReachHere(); + terminate(); +} + +void G1StringDedupThread::stop() { + { + MonitorLockerEx ml(Terminator_lock); + _thread->_should_terminate = true; + } + + G1StringDedupQueue::cancel_wait(); + + { + MonitorLockerEx ml(Terminator_lock); + while (!_thread->_has_terminated) { + ml.wait(); + } + } } void G1StringDedupThread::print(outputStream* st, const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) { diff -r 460b0b08247f -r ee8b93466869 src/share/vm/gc_implementation/g1/g1StringDedupThread.hpp --- a/src/share/vm/gc_implementation/g1/g1StringDedupThread.hpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/gc_implementation/g1/g1StringDedupThread.hpp Fri May 30 13:11:33 2014 -0700 @@ -47,6 +47,8 @@ public: static void create(); + static void stop(); + static G1StringDedupThread* thread(); virtual void run(); diff -r 460b0b08247f -r ee8b93466869 src/share/vm/gc_interface/collectedHeap.hpp --- a/src/share/vm/gc_interface/collectedHeap.hpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Fri May 30 13:11:33 2014 -0700 @@ -208,6 +208,9 @@ // This is the correct place to place such initialization methods. virtual void post_initialize() = 0; + // Stop any onging concurrent work and prepare for exit. + virtual void stop() {} + MemRegion reserved_region() const { return _reserved; } address base() const { return (address)reserved_region().start(); } diff -r 460b0b08247f -r ee8b93466869 src/share/vm/memory/allocation.cpp --- a/src/share/vm/memory/allocation.cpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/memory/allocation.cpp Fri May 30 13:11:33 2014 -0700 @@ -561,6 +561,7 @@ _chunk = new (alloc_failmode, len) Chunk(len); if (_chunk == NULL) { + _chunk = k; // restore the previous value of _chunk return NULL; } if (k) k->set_next(_chunk); // Append new chunk to end of linked list diff -r 460b0b08247f -r ee8b93466869 src/share/vm/prims/jvmtiEnv.cpp --- a/src/share/vm/prims/jvmtiEnv.cpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/prims/jvmtiEnv.cpp Fri May 30 13:11:33 2014 -0700 @@ -307,9 +307,9 @@ !java_lang_Class::is_primitive(mirror)) { Klass* k = java_lang_Class::as_Klass(mirror); assert(k != NULL, "class for non-primitive mirror must exist"); - *size_ptr = k->size() * wordSize; + *size_ptr = (jlong)k->size() * wordSize; } else { - *size_ptr = mirror->size() * wordSize; + *size_ptr = (jlong)mirror->size() * wordSize; } return JVMTI_ERROR_NONE; } /* end GetObjectSize */ diff -r 460b0b08247f -r ee8b93466869 src/share/vm/runtime/java.cpp --- a/src/share/vm/runtime/java.cpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/runtime/java.cpp Fri May 30 13:11:33 2014 -0700 @@ -497,6 +497,9 @@ os::infinite_sleep(); } + // Stop any ongoing concurrent GC work + Universe::heap()->stop(); + // Terminate watcher thread - must before disenrolling any periodic task if (PeriodicTask::num_tasks() > 0) WatcherThread::stop(); diff -r 460b0b08247f -r ee8b93466869 src/share/vm/utilities/workgroup.cpp --- a/src/share/vm/utilities/workgroup.cpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/utilities/workgroup.cpp Fri May 30 13:11:33 2014 -0700 @@ -378,21 +378,22 @@ WorkGangBarrierSync::WorkGangBarrierSync() : _monitor(Mutex::safepoint, "work gang barrier sync", true), - _n_workers(0), _n_completed(0), _should_reset(false) { + _n_workers(0), _n_completed(0), _should_reset(false), _aborted(false) { } WorkGangBarrierSync::WorkGangBarrierSync(uint n_workers, const char* name) : _monitor(Mutex::safepoint, name, true), - _n_workers(n_workers), _n_completed(0), _should_reset(false) { + _n_workers(n_workers), _n_completed(0), _should_reset(false), _aborted(false) { } void WorkGangBarrierSync::set_n_workers(uint n_workers) { - _n_workers = n_workers; - _n_completed = 0; + _n_workers = n_workers; + _n_completed = 0; _should_reset = false; + _aborted = false; } -void WorkGangBarrierSync::enter() { +bool WorkGangBarrierSync::enter() { MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag); if (should_reset()) { // The should_reset() was set and we are the first worker to enter @@ -415,10 +416,17 @@ set_should_reset(true); monitor()->notify_all(); } else { - while (n_completed() != n_workers()) { + while (n_completed() != n_workers() && !aborted()) { monitor()->wait(/* no_safepoint_check */ true); } } + return !aborted(); +} + +void WorkGangBarrierSync::abort() { + MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag); + set_aborted(); + monitor()->notify_all(); } // SubTasksDone functions. diff -r 460b0b08247f -r ee8b93466869 src/share/vm/utilities/workgroup.hpp --- a/src/share/vm/utilities/workgroup.hpp Wed May 28 02:27:39 2014 -0700 +++ b/src/share/vm/utilities/workgroup.hpp Fri May 30 13:11:33 2014 -0700 @@ -359,18 +359,20 @@ class WorkGangBarrierSync : public StackObj { protected: Monitor _monitor; - uint _n_workers; - uint _n_completed; + uint _n_workers; + uint _n_completed; bool _should_reset; + bool _aborted; Monitor* monitor() { return &_monitor; } uint n_workers() { return _n_workers; } uint n_completed() { return _n_completed; } bool should_reset() { return _should_reset; } + bool aborted() { return _aborted; } void zero_completed() { _n_completed = 0; } void inc_completed() { _n_completed++; } - + void set_aborted() { _aborted = true; } void set_should_reset(bool v) { _should_reset = v; } public: @@ -383,8 +385,14 @@ // Enter the barrier. A worker that enters the barrier will // not be allowed to leave until all other threads have - // also entered the barrier. - void enter(); + // also entered the barrier or the barrier is aborted. + // Returns false if the barrier was aborted. + bool enter(); + + // Aborts the barrier and wakes up any threads waiting for + // the barrier to complete. The barrier will remain in the + // aborted state until the next call to set_n_workers(). + void abort(); }; // A class to manage claiming of subtasks within a group of tasks. The diff -r 460b0b08247f -r ee8b93466869 test/TEST.groups --- a/test/TEST.groups Wed May 28 02:27:39 2014 -0700 +++ b/test/TEST.groups Fri May 30 13:11:33 2014 -0700 @@ -134,6 +134,8 @@ gc/arguments/TestDynMaxHeapFreeRatio.java \ runtime/InternalApi/ThreadCpuTimesDeadlock.java \ serviceability/threads/TestFalseDeadLock.java \ + serviceability/jvmti/GetObjectSizeOverflow.java \ + serviceability/jvmti/TestRedefineWithUnresolvedClass.java \ compiler/tiered/NonTieredLevelsTest.java \ compiler/tiered/TieredLevelsTest.java \ compiler/intrinsics/bmi/verifycode diff -r 460b0b08247f -r ee8b93466869 test/serviceability/jvmti/GetObjectSizeOverflow.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/serviceability/jvmti/GetObjectSizeOverflow.java Fri May 30 13:11:33 2014 -0700 @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2014 Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +import java.io.PrintWriter; +import com.oracle.java.testlibrary.*; + +/* + * Test to verify GetObjectSize does not overflow on a 600M element int[] + * + * @test + * @bug 8027230 + * @library /testlibrary + * @build GetObjectSizeOverflowAgent + * @run main ClassFileInstaller GetObjectSizeOverflowAgent + * @run main GetObjectSizeOverflow + */ +public class GetObjectSizeOverflow { + public static void main(String[] args) throws Exception { + + if (!Platform.is64bit()) { + System.out.println("Test needs a 4GB heap and can only be run as a 64bit process, skipping."); + return; + } + + PrintWriter pw = new PrintWriter("MANIFEST.MF"); + pw.println("Premain-Class: GetObjectSizeOverflowAgent"); + pw.close(); + + ProcessBuilder pb = new ProcessBuilder(); + pb.command(new String[] { JDKToolFinder.getJDKTool("jar"), "cmf", "MANIFEST.MF", "agent.jar", "GetObjectSizeOverflowAgent.class"}); + pb.start().waitFor(); + + ProcessBuilder pt = ProcessTools.createJavaProcessBuilder(true, "-Xmx4000m", "-javaagent:agent.jar", "GetObjectSizeOverflowAgent"); + OutputAnalyzer output = new OutputAnalyzer(pt.start()); + + if (output.getStdout().contains("Could not reserve enough space") || output.getStderr().contains("java.lang.OutOfMemoryError")) { + System.out.println("stdout: " + output.getStdout()); + System.out.println("stderr: " + output.getStderr()); + System.out.println("Test could not reserve or allocate enough space, skipping"); + return; + } + + output.stdoutShouldContain("GetObjectSizeOverflow passed"); + } +} diff -r 460b0b08247f -r ee8b93466869 test/serviceability/jvmti/GetObjectSizeOverflowAgent.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/serviceability/jvmti/GetObjectSizeOverflowAgent.java Fri May 30 13:11:33 2014 -0700 @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +import java.lang.instrument.*; + +public class GetObjectSizeOverflowAgent { + + static Instrumentation instrumentation; + + public static void premain(String agentArgs, Instrumentation instrumentation) { + GetObjectSizeOverflowAgent.instrumentation = instrumentation; + } + + public static void main(String[] args) throws Exception { + int[] a = new int[600_000_000]; + long size = instrumentation.getObjectSize(a); + + if (size < 2_400_000_000L) { + throw new RuntimeException("Invalid size of array, expected >= 2400000000, got " + size); + } + + System.out.println("GetObjectSizeOverflow passed"); + } +}