# HG changeset patch # User coleenp # Date 1310092474 14400 # Node ID 5447b2c582ad21d251a9314cc74684dd5e9a7dab # Parent b16582d6c7dbcc73e47132bd38aebc692b5eaa59# Parent 109d1d26592446b77f5e2c8d9965844dc8214b01 Merge diff -r 109d1d265924 -r 5447b2c582ad make/hotspot_version --- a/make/hotspot_version Sat Jul 02 04:17:12 2011 -0400 +++ b/make/hotspot_version Thu Jul 07 22:34:34 2011 -0400 @@ -33,9 +33,9 @@ # Don't put quotes (fail windows build). HOTSPOT_VM_COPYRIGHT=Copyright 2011 -HS_MAJOR_VER=21 +HS_MAJOR_VER=22 HS_MINOR_VER=0 -HS_BUILD_NUMBER=14 +HS_BUILD_NUMBER=01 JDK_MAJOR_VER=1 JDK_MINOR_VER=7 diff -r 109d1d265924 -r 5447b2c582ad src/cpu/sparc/vm/methodHandles_sparc.cpp --- a/src/cpu/sparc/vm/methodHandles_sparc.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -307,11 +307,12 @@ __ stop("damaged ricochet frame: L4 < FP"); __ BIND(L_ok_2); - __ sub(L4_saved_args_base, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize, O7_temp); - __ cmp(O7_temp, FP_temp); - __ br(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok_3); - __ delayed()->nop(); - __ stop("damaged ricochet frame: (L4 - UNREASONABLE_STACK_MOVE) > FP"); + // Disable until we decide on it's fate + // __ sub(L4_saved_args_base, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize, O7_temp); + // __ cmp(O7_temp, FP_temp); + // __ br(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok_3); + // __ delayed()->nop(); + // __ stop("damaged ricochet frame: (L4 - UNREASONABLE_STACK_MOVE) > FP"); __ BIND(L_ok_3); extract_conversion_dest_type(_masm, L5_conversion, O7_temp); @@ -547,8 +548,9 @@ __ brx(Assembler::notEqual, false, Assembler::pt, invoke_generic_slow_path); __ delayed()->nop(); __ mov(O0_mtype, G5_method_type); // required by throw_WrongMethodType - // mov(G3_method_handle, G3_method_handle); // already in this register - __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch); + __ mov(G3_method_handle, G3_method_handle); // already in this register + // O0 will be filled in with JavaThread in stub + __ jump_to(AddressLiteral(StubRoutines::throw_WrongMethodTypeException_entry()), O3_scratch); __ delayed()->nop(); // here's where control starts out: @@ -1145,23 +1147,13 @@ // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method __ set(AddressLiteral((address) &_raise_exception_method), G5_method); __ ld_ptr(Address(G5_method, 0), G5_method); - __ tst(G5_method); - __ brx(Assembler::zero, false, Assembler::pn, L_no_method); - __ delayed()->nop(); const int jobject_oop_offset = 0; __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method); - __ tst(G5_method); - __ brx(Assembler::zero, false, Assembler::pn, L_no_method); - __ delayed()->nop(); __ verify_oop(G5_method); __ jump_indirect_to(G5_method_fce, O3_scratch); // jump to compiled entry __ delayed()->nop(); - - // Do something that is at least causes a valid throw from the interpreter. - __ bind(L_no_method); - __ unimplemented("call throw_WrongMethodType_entry"); } break; diff -r 109d1d265924 -r 5447b2c582ad src/cpu/sparc/vm/stubGenerator_sparc.cpp --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -440,7 +440,8 @@ #undef __ #define __ masm-> - address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc) { + address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc, + Register arg1 = noreg, Register arg2 = noreg) { #ifdef ASSERT int insts_size = VerifyThread ? 1 * K : 600; #else @@ -476,6 +477,13 @@ __ set_last_Java_frame(last_java_sp, G0); if (VerifyThread) __ mov(G2_thread, O0); // about to be smashed; pass early __ save_thread(noreg); + if (arg1 != noreg) { + assert(arg2 != O1, "clobbered"); + __ mov(arg1, O1); + } + if (arg2 != noreg) { + __ mov(arg2, O2); + } // do the call BLOCK_COMMENT("call runtime_entry"); __ call(runtime_entry, relocInfo::runtime_call_type); @@ -3240,6 +3248,14 @@ StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry; #endif // COMPILER2 !=> _LP64 + + // Build this early so it's available for the interpreter. The + // stub expects the required and actual type to already be in O1 + // and O2 respectively. + StubRoutines::_throw_WrongMethodTypeException_entry = + generate_throw_exception("WrongMethodTypeException throw_exception", + CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException), + false, G5_method_type, G3_method_handle); } diff -r 109d1d265924 -r 5447b2c582ad src/cpu/sparc/vm/templateInterpreter_sparc.cpp --- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -128,24 +128,6 @@ } -// Arguments are: required type in G5_method_type, and -// failing object (or NULL) in G3_method_handle. -address TemplateInterpreterGenerator::generate_WrongMethodType_handler() { - address entry = __ pc(); - // expression stack must be empty before entering the VM if an exception - // happened - __ empty_expression_stack(); - // load exception object - __ call_VM(Oexception, - CAST_FROM_FN_PTR(address, - InterpreterRuntime::throw_WrongMethodTypeException), - G5_method_type, // required - G3_method_handle); // actual - __ should_not_reach_here(); - return entry; -} - - address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { address entry = __ pc(); // expression stack must be empty before entering the VM if an exception happened diff -r 109d1d265924 -r 5447b2c582ad src/cpu/sparc/vm/templateTable_sparc.cpp --- a/src/cpu/sparc/vm/templateTable_sparc.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -266,7 +266,7 @@ void TemplateTable::ldc(bool wide) { transition(vtos, vtos); - Label call_ldc, notInt, notString, notClass, exit; + Label call_ldc, notInt, isString, notString, notClass, exit; if (wide) { __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); @@ -317,8 +317,11 @@ __ bind(notInt); // __ cmp(O2, JVM_CONSTANT_String); + __ brx(Assembler::equal, true, Assembler::pt, isString); + __ delayed()->cmp(O2, JVM_CONSTANT_Object); __ brx(Assembler::notEqual, true, Assembler::pt, notString); __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); + __ bind(isString); __ ld_ptr(O0, O1, Otos_i); __ verify_oop(Otos_i); __ push(atos); diff -r 109d1d265924 -r 5447b2c582ad src/cpu/x86/vm/assembler_x86.cpp --- a/src/cpu/x86/vm/assembler_x86.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/x86/vm/assembler_x86.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -3804,6 +3804,14 @@ emit_arith(0x03, 0xC0, dst, src); } +void Assembler::andq(Address dst, int32_t imm32) { + InstructionMark im(this); + prefixq(dst); + emit_byte(0x81); + emit_operand(rsp, dst, 4); + emit_long(imm32); +} + void Assembler::andq(Register dst, int32_t imm32) { (void) prefixq_and_encode(dst->encoding()); emit_arith(0x81, 0xE0, dst, imm32); diff -r 109d1d265924 -r 5447b2c582ad src/cpu/x86/vm/assembler_x86.hpp --- a/src/cpu/x86/vm/assembler_x86.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/x86/vm/assembler_x86.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -779,6 +779,7 @@ void andl(Register dst, Address src); void andl(Register dst, Register src); + void andq(Address dst, int32_t imm32); void andq(Register dst, int32_t imm32); void andq(Register dst, Address src); void andq(Register dst, Register src); diff -r 109d1d265924 -r 5447b2c582ad src/cpu/x86/vm/icache_x86.hpp --- a/src/cpu/x86/vm/icache_x86.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/x86/vm/icache_x86.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -43,8 +43,8 @@ #ifdef AMD64 enum { stub_size = 64, // Size of the icache flush stub in bytes - line_size = 32, // Icache line size in bytes - log2_line_size = 5 // log2(line_size) + line_size = 64, // Icache line size in bytes + log2_line_size = 6 // log2(line_size) }; // Use default implementation diff -r 109d1d265924 -r 5447b2c582ad src/cpu/x86/vm/methodHandles_x86.cpp --- a/src/cpu/x86/vm/methodHandles_x86.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/x86/vm/methodHandles_x86.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -602,15 +602,8 @@ // error path for invokeExact (only) __ bind(invoke_exact_error_path); - // jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry())); - Register rdx_last_Java_sp = rdx_temp; - __ lea(rdx_last_Java_sp, __ argument_address(constant(0))); - __ super_call_VM(noreg, - rdx_last_Java_sp, - CAST_FROM_FN_PTR(address, - InterpreterRuntime::throw_WrongMethodTypeException), - // pass required type, then failing mh object - rax_mtype, rcx_recv); + // Stub wants expected type in rax and the actual type in rcx + __ jump(ExternalAddress(StubRoutines::throw_WrongMethodTypeException_entry())); // for invokeGeneric (only), apply argument and result conversions on the fly __ bind(invoke_generic_slow_path); @@ -1175,27 +1168,15 @@ __ mov(rsp, saved_last_sp); // cut the stack back to where the caller started Register rbx_method = rbx_temp; - Label L_no_method; - // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method)); - __ testptr(rbx_method, rbx_method); - __ jccb(Assembler::zero, L_no_method); const int jobject_oop_offset = 0; __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset)); // dereference the jobject - __ testptr(rbx_method, rbx_method); - __ jccb(Assembler::zero, L_no_method); __ verify_oop(rbx_method); NOT_LP64(__ push(rarg2_required)); __ push(rdi_pc); // restore caller PC __ jmp(rbx_method_fce); // jump to compiled entry - - // Do something that is at least causes a valid throw from the interpreter. - __ bind(L_no_method); - __ push(rarg2_required); - __ push(rarg1_actual); - __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry())); } break; diff -r 109d1d265924 -r 5447b2c582ad src/cpu/x86/vm/stubGenerator_x86_32.cpp --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -2151,6 +2151,8 @@ // if they expect all registers to be preserved. enum layout { thread_off, // last_java_sp + arg1_off, + arg2_off, rbp_off, // callee saved register ret_pc, framesize @@ -2185,7 +2187,7 @@ // either at call sites or otherwise assume that stack unwinding will be initiated, // so caller saved registers were assumed volatile in the compiler. address generate_throw_exception(const char* name, address runtime_entry, - bool restore_saved_exception_pc) { + bool restore_saved_exception_pc, Register arg1 = noreg, Register arg2 = noreg) { int insts_size = 256; int locs_size = 32; @@ -2218,6 +2220,13 @@ // push java thread (becomes first argument of C function) __ movptr(Address(rsp, thread_off * wordSize), java_thread); + if (arg1 != noreg) { + __ movptr(Address(rsp, arg1_off * wordSize), arg1); + } + if (arg2 != noreg) { + assert(arg1 != noreg, "missing reg arg"); + __ movptr(Address(rsp, arg2_off * wordSize), arg2); + } // Set up last_Java_sp and last_Java_fp __ set_last_Java_frame(java_thread, rsp, rbp, NULL); @@ -2309,6 +2318,12 @@ CAST_FROM_FN_PTR(address, SharedRuntime::d2i)); StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG, CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); + + // Build this early so it's available for the interpreter + StubRoutines::_throw_WrongMethodTypeException_entry = + generate_throw_exception("WrongMethodTypeException throw_exception", + CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException), + false, rax, rcx); } diff -r 109d1d265924 -r 5447b2c582ad src/cpu/x86/vm/stubGenerator_x86_64.cpp --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -2934,7 +2934,9 @@ // caller saved registers were assumed volatile in the compiler. address generate_throw_exception(const char* name, address runtime_entry, - bool restore_saved_exception_pc) { + bool restore_saved_exception_pc, + Register arg1 = noreg, + Register arg2 = noreg) { // Information about frame layout at time of blocking runtime call. // Note that we only have to preserve callee-saved registers since // the compilers are responsible for supplying a continuation point @@ -2980,6 +2982,13 @@ __ set_last_Java_frame(rsp, rbp, NULL); // Call runtime + if (arg1 != noreg) { + assert(arg2 != c_rarg1, "clobbered"); + __ movptr(c_rarg1, arg1); + } + if (arg2 != noreg) { + __ movptr(c_rarg2, arg2); + } __ movptr(c_rarg0, r15_thread); BLOCK_COMMENT("call runtime_entry"); __ call(RuntimeAddress(runtime_entry)); @@ -3052,6 +3061,14 @@ StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); + + // Build this early so it's available for the interpreter. Stub + // expects the required and actual types as register arguments in + // j_rarg0 and j_rarg1 respectively. + StubRoutines::_throw_WrongMethodTypeException_entry = + generate_throw_exception("WrongMethodTypeException throw_exception", + CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException), + false, rax, rcx); } void generate_all() { diff -r 109d1d265924 -r 5447b2c582ad src/cpu/x86/vm/templateInterpreter_x86_32.cpp --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -112,32 +112,6 @@ return entry; } -// Arguments are: required type at TOS+4, failing object (or NULL) at TOS. -address TemplateInterpreterGenerator::generate_WrongMethodType_handler() { - address entry = __ pc(); - - __ pop(rbx); // actual failing object is at TOS - __ pop(rax); // required type is at TOS+4 - - __ verify_oop(rbx); - __ verify_oop(rax); - - // Various method handle types use interpreter registers as temps. - __ restore_bcp(); - __ restore_locals(); - - // Expression stack must be empty before entering the VM for an exception. - __ empty_expression_stack(); - __ empty_FPU_stack(); - __ call_VM(noreg, - CAST_FROM_FN_PTR(address, - InterpreterRuntime::throw_WrongMethodTypeException), - // pass required type, failing object (or NULL) - rax, rbx); - return entry; -} - - address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { assert(!pass_oop || message == NULL, "either oop or message but not both"); address entry = __ pc(); diff -r 109d1d265924 -r 5447b2c582ad src/cpu/x86/vm/templateInterpreter_x86_64.cpp --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -120,31 +120,6 @@ return entry; } -// Arguments are: required type at TOS+8, failing object (or NULL) at TOS+4. -address TemplateInterpreterGenerator::generate_WrongMethodType_handler() { - address entry = __ pc(); - - __ pop(c_rarg2); // failing object is at TOS - __ pop(c_rarg1); // required type is at TOS+8 - - __ verify_oop(c_rarg1); - __ verify_oop(c_rarg2); - - // Various method handle types use interpreter registers as temps. - __ restore_bcp(); - __ restore_locals(); - - // Expression stack must be empty before entering the VM for an exception. - __ empty_expression_stack(); - - __ call_VM(noreg, - CAST_FROM_FN_PTR(address, - InterpreterRuntime::throw_WrongMethodTypeException), - // pass required type, failing object (or NULL) - c_rarg1, c_rarg2); - return entry; -} - address TemplateInterpreterGenerator::generate_exception_handler_common( const char* name, const char* message, bool pass_oop) { assert(!pass_oop || message == NULL, "either oop or message but not both"); diff -r 109d1d265924 -r 5447b2c582ad src/cpu/x86/vm/templateTable_x86_32.cpp --- a/src/cpu/x86/vm/templateTable_x86_32.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -373,15 +373,17 @@ __ jcc(Assembler::equal, L); __ cmpl(rdx, JVM_CONSTANT_String); __ jcc(Assembler::equal, L); + __ cmpl(rdx, JVM_CONSTANT_Object); + __ jcc(Assembler::equal, L); __ stop("unexpected tag type in ldc"); __ bind(L); } #endif Label isOop; // atos and itos - // String is only oop type we will see here - __ cmpl(rdx, JVM_CONSTANT_String); - __ jccb(Assembler::equal, isOop); + // Integer is only non-oop type we will see here + __ cmpl(rdx, JVM_CONSTANT_Integer); + __ jccb(Assembler::notEqual, isOop); __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset)); __ push(itos); __ jmp(Done); diff -r 109d1d265924 -r 5447b2c582ad src/cpu/x86/vm/templateTable_x86_64.cpp --- a/src/cpu/x86/vm/templateTable_x86_64.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -385,6 +385,8 @@ __ jcc(Assembler::equal, L); __ cmpl(rdx, JVM_CONSTANT_String); __ jcc(Assembler::equal, L); + __ cmpl(rdx, JVM_CONSTANT_Object); + __ jcc(Assembler::equal, L); __ stop("unexpected tag type in ldc"); __ bind(L); } diff -r 109d1d265924 -r 5447b2c582ad src/cpu/x86/vm/vm_version_x86.cpp --- a/src/cpu/x86/vm/vm_version_x86.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/x86/vm/vm_version_x86.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -321,6 +321,20 @@ if (UseSSE < 2) UseSSE = 2; #endif +#ifdef AMD64 + // flush_icache_stub have to be generated first. + // That is why Icache line size is hard coded in ICache class, + // see icache_x86.hpp. It is also the reason why we can't use + // clflush instruction in 32-bit VM since it could be running + // on CPU which does not support it. + // + // The only thing we can do is to verify that flushed + // ICache::line_size has correct value. + guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported"); + // clflush_size is size in quadwords (8 bytes). + guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported"); +#endif + // If the OS doesn't support SSE, we can't use this feature even if the HW does if (!os::supports_sse()) _cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2); diff -r 109d1d265924 -r 5447b2c582ad src/cpu/x86/vm/vm_version_x86.hpp --- a/src/cpu/x86/vm/vm_version_x86.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/x86/vm/vm_version_x86.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -91,7 +91,9 @@ cmpxchg8 : 1, : 6, cmov : 1, - : 7, + : 3, + clflush : 1, + : 3, mmx : 1, fxsr : 1, sse : 1, diff -r 109d1d265924 -r 5447b2c582ad src/cpu/x86/vm/x86_64.ad --- a/src/cpu/x86/vm/x86_64.ad Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/x86/vm/x86_64.ad Thu Jul 07 22:34:34 2011 -0400 @@ -830,6 +830,17 @@ } } +// This could be in MacroAssembler but it's fairly C2 specific +void emit_cmpfp_fixup(MacroAssembler& _masm) { + Label exit; + __ jccb(Assembler::noParity, exit); + __ pushf(); + __ andq(Address(rsp, 0), 0xffffff2b); + __ popf(); + __ bind(exit); + __ nop(); // (target for branch to avoid branch to branch) +} + //============================================================================= const bool Matcher::constant_table_absolute_addressing = true; @@ -2173,27 +2184,9 @@ emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7); %} - enc_class cmpfp_fixup() - %{ - // jnp,s exit - emit_opcode(cbuf, 0x7B); - emit_d8(cbuf, 0x0A); - - // pushfq - emit_opcode(cbuf, 0x9C); - - // andq $0xffffff2b, (%rsp) - emit_opcode(cbuf, Assembler::REX_W); - emit_opcode(cbuf, 0x81); - emit_opcode(cbuf, 0x24); - emit_opcode(cbuf, 0x24); - emit_d32(cbuf, 0xffffff2b); - - // popfq - emit_opcode(cbuf, 0x9D); - - // nop (target for branch to avoid branch to branch) - emit_opcode(cbuf, 0x90); + enc_class cmpfp_fixup() %{ + MacroAssembler _masm(&cbuf); + emit_cmpfp_fixup(_masm); %} enc_class cmpfp3(rRegI dst) @@ -10253,14 +10246,8 @@ "popfq\n" "exit: nop\t# avoid branch to branch" %} ins_encode %{ - Label L_exit; __ ucomiss($src$$XMMRegister, $constantaddress($con)); - __ jcc(Assembler::noParity, L_exit); - __ pushf(); - __ andq(rsp, 0xffffff2b); - __ popf(); - __ bind(L_exit); - __ nop(); + emit_cmpfp_fixup(_masm); %} ins_pipe(pipe_slow); %} @@ -10341,14 +10328,8 @@ "popfq\n" "exit: nop\t# avoid branch to branch" %} ins_encode %{ - Label L_exit; __ ucomisd($src$$XMMRegister, $constantaddress($con)); - __ jcc(Assembler::noParity, L_exit); - __ pushf(); - __ andq(rsp, 0xffffff2b); - __ popf(); - __ bind(L_exit); - __ nop(); + emit_cmpfp_fixup(_masm); %} ins_pipe(pipe_slow); %} diff -r 109d1d265924 -r 5447b2c582ad src/cpu/zero/vm/cppInterpreter_zero.cpp --- a/src/cpu/zero/vm/cppInterpreter_zero.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -657,7 +657,7 @@ if (!is_exact) { if (method->intrinsic_id() == vmIntrinsics::_invokeExact) { CALL_VM_NOCHECK_NOFIX( - InterpreterRuntime::throw_WrongMethodTypeException( + SharedRuntime::throw_WrongMethodTypeException( thread, method_type, mhtype)); // NB all oops trashed! assert(HAS_PENDING_EXCEPTION, "should do"); @@ -673,7 +673,7 @@ oop adapter = java_lang_invoke_MethodTypeForm::genericInvoker(form); if (adapter == NULL) { CALL_VM_NOCHECK_NOFIX( - InterpreterRuntime::throw_WrongMethodTypeException( + SharedRuntime::throw_WrongMethodTypeException( thread, method_type, mhtype)); // NB all oops trashed! assert(HAS_PENDING_EXCEPTION, "should do"); diff -r 109d1d265924 -r 5447b2c582ad src/share/tools/hsdis/README --- a/src/share/tools/hsdis/README Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/tools/hsdis/README Thu Jul 07 22:34:34 2011 -0400 @@ -75,8 +75,16 @@ * Installing Products are named like build/$OS-$LIBARCH/hsdis-$LIBARCH.so. You can -install them on your LD_LIBRARY_PATH, or inside of your JRE next to -$LIBARCH/libjvm.so. +install them on your LD_LIBRARY_PATH, or inside of your JRE/JDK. The +search path in the JVM is: + +1. /jre/lib///libhsdis-.so +2. /jre/lib///hsdis-.so +3. /jre/lib//hsdis-.so +4. hsdis-.so (using LD_LIBRARY_PATH) + +Note that there's a bug in hotspot versions prior to hs22 that causes +steps 2 and 3 to fail when used with JDK7. Now test: diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/adlc/adlparse.cpp --- a/src/share/vm/adlc/adlparse.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/adlc/adlparse.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -2812,6 +2812,13 @@ params->add_entry(param); } + // Check for duplicate ins_encode sections after parsing the block + // so that parsing can continue and find any other errors. + if (inst._insencode != NULL) { + parse_err(SYNERR, "Multiple ins_encode sections defined\n"); + return; + } + // Set encode class of this instruction. inst._insencode = encrule; } @@ -3044,6 +3051,13 @@ next_char(); // move past ';' skipws(); // be friendly to oper_parse() + // Check for duplicate ins_encode sections after parsing the block + // so that parsing can continue and find any other errors. + if (inst._insencode != NULL) { + parse_err(SYNERR, "Multiple ins_encode sections defined\n"); + return; + } + // Debug Stuff if (_AD._adl_debug > 1) fprintf(stderr,"Instruction Encode: %s\n", ec_name); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/c1/c1_GraphBuilder.cpp --- a/src/share/vm/c1/c1_GraphBuilder.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -33,6 +33,7 @@ #include "compiler/compileBroker.hpp" #include "interpreter/bytecode.hpp" #include "runtime/sharedRuntime.hpp" +#include "runtime/compilationPolicy.hpp" #include "utilities/bitMap.inline.hpp" class BlockListBuilder VALUE_OBJ_CLASS_SPEC { @@ -3395,8 +3396,8 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) { assert(!callee->is_native(), "callee must not be native"); - if (count_backedges() && callee->has_loops()) { - INLINE_BAILOUT("too complex for tiered"); + if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) { + INLINE_BAILOUT("inlining prohibited by policy"); } // first perform tests of things it's not possible to inline if (callee->has_exception_handlers() && diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/c1/c1_Optimizer.cpp --- a/src/share/vm/c1/c1_Optimizer.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/c1/c1_Optimizer.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -642,7 +642,7 @@ void NullCheckVisitor::do_NewTypeArray (NewTypeArray* x) { nce()->handle_NewArray(x); } void NullCheckVisitor::do_NewObjectArray (NewObjectArray* x) { nce()->handle_NewArray(x); } void NullCheckVisitor::do_NewMultiArray (NewMultiArray* x) { nce()->handle_NewArray(x); } -void NullCheckVisitor::do_CheckCast (CheckCast* x) {} +void NullCheckVisitor::do_CheckCast (CheckCast* x) { nce()->clear_last_explicit_null_check(); } void NullCheckVisitor::do_InstanceOf (InstanceOf* x) {} void NullCheckVisitor::do_MonitorEnter (MonitorEnter* x) { nce()->handle_AccessMonitor(x); } void NullCheckVisitor::do_MonitorExit (MonitorExit* x) { nce()->handle_AccessMonitor(x); } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/c1/c1_Runtime1.cpp --- a/src/share/vm/c1/c1_Runtime1.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/c1/c1_Runtime1.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -383,8 +383,10 @@ } JRT_END -// This is a helper to allow us to safepoint but allow the outer entry -// to be safepoint free if we need to do an osr +// counter_overflow() is called from within C1-compiled methods. The enclosing method is the method +// associated with the top activation record. The inlinee (that is possibly included in the enclosing +// method) method oop is passed as an argument. In order to do that it is embedded in the code as +// a constant. static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, methodOopDesc* m) { nmethod* osr_nm = NULL; methodHandle method(THREAD, m); @@ -420,7 +422,7 @@ bci = branch_bci + offset; } - osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, THREAD); + osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD); return osr_nm; } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/ci/ciEnv.cpp --- a/src/share/vm/ci/ciEnv.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/ci/ciEnv.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -50,6 +50,7 @@ #include "oops/oop.inline.hpp" #include "oops/oop.inline2.hpp" #include "prims/jvmtiExport.hpp" +#include "prims/methodHandleWalk.hpp" #include "runtime/init.hpp" #include "runtime/reflection.hpp" #include "runtime/sharedRuntime.hpp" @@ -371,6 +372,7 @@ // ------------------------------------------------------------------ // ciEnv::get_klass_by_name_impl ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass, + constantPoolHandle cpool, ciSymbol* name, bool require_local) { ASSERT_IN_VM; @@ -386,7 +388,7 @@ sym->utf8_length()-2, KILL_COMPILE_ON_FATAL_(_unloaded_ciinstance_klass)); ciSymbol* strippedname = get_symbol(strippedsym); - return get_klass_by_name_impl(accessing_klass, strippedname, require_local); + return get_klass_by_name_impl(accessing_klass, cpool, strippedname, require_local); } // Check for prior unloaded klass. The SystemDictionary's answers @@ -443,6 +445,7 @@ // Get element ciKlass recursively. ciKlass* elem_klass = get_klass_by_name_impl(accessing_klass, + cpool, get_symbol(elem_sym), require_local); if (elem_klass != NULL && elem_klass->is_loaded()) { @@ -451,6 +454,19 @@ } } + if (found_klass() == NULL && !cpool.is_null() && cpool->has_preresolution()) { + // Look inside the constant pool for pre-resolved class entries. + for (int i = cpool->length() - 1; i >= 1; i--) { + if (cpool->tag_at(i).is_klass()) { + klassOop kls = cpool->resolved_klass_at(i); + if (Klass::cast(kls)->name() == sym) { + found_klass = KlassHandle(THREAD, kls); + break; + } + } + } + } + if (found_klass() != NULL) { // Found it. Build a CI handle. return get_object(found_klass())->as_klass(); @@ -468,6 +484,7 @@ ciSymbol* klass_name, bool require_local) { GUARDED_VM_ENTRY(return get_klass_by_name_impl(accessing_klass, + constantPoolHandle(), klass_name, require_local);) } @@ -508,13 +525,14 @@ if (klass.is_null()) { // Not found in constant pool. Use the name to do the lookup. ciKlass* k = get_klass_by_name_impl(accessor, + cpool, get_symbol(klass_name), false); // Calculate accessibility the hard way. if (!k->is_loaded()) { is_accessible = false; } else if (k->loader() != accessor->loader() && - get_klass_by_name_impl(accessor, k->name(), true) == NULL) { + get_klass_by_name_impl(accessor, cpool, k->name(), true) == NULL) { // Loaded only remotely. Not linked yet. is_accessible = false; } else { @@ -565,7 +583,7 @@ index = cpc_entry->constant_pool_index(); oop obj = cpc_entry->f1(); if (obj != NULL) { - assert(obj->is_instance(), "must be an instance"); + assert(obj->is_instance() || obj->is_array(), "must be a Java reference"); ciObject* ciobj = get_object(obj); return ciConstant(T_OBJECT, ciobj); } @@ -607,7 +625,7 @@ return ciConstant(T_OBJECT, klass->java_mirror()); } else if (tag.is_object()) { oop obj = cpool->object_at(index); - assert(obj->is_instance(), "must be an instance"); + assert(obj->is_instance() || obj->is_array(), "must be a Java reference"); ciObject* ciobj = get_object(obj); return ciConstant(T_OBJECT, ciobj); } else if (tag.is_method_type()) { @@ -729,9 +747,35 @@ Symbol* name_sym = cpool->name_ref_at(index); Symbol* sig_sym = cpool->signature_ref_at(index); + if (cpool->has_preresolution() + || (holder == ciEnv::MethodHandle_klass() && + methodOopDesc::is_method_handle_invoke_name(name_sym))) { + // Short-circuit lookups for JSR 292-related call sites. + // That is, do not rely only on name-based lookups, because they may fail + // if the names are not resolvable in the boot class loader (7056328). + switch (bc) { + case Bytecodes::_invokevirtual: + case Bytecodes::_invokeinterface: + case Bytecodes::_invokespecial: + case Bytecodes::_invokestatic: + { + methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index, bc); + if (m != NULL) { + return get_object(m)->as_method(); + } + } + } + } + if (holder_is_accessible) { // Our declared holder is loaded. instanceKlass* lookup = declared_holder->get_instanceKlass(); methodOop m = lookup_method(accessor->get_instanceKlass(), lookup, name_sym, sig_sym, bc); + if (m != NULL && + (bc == Bytecodes::_invokestatic + ? instanceKlass::cast(m->method_holder())->is_not_initialized() + : !instanceKlass::cast(m->method_holder())->is_loaded())) { + m = NULL; + } if (m != NULL) { // We found the method. return get_object(m)->as_method(); @@ -1046,7 +1090,7 @@ // ciEnv::find_system_klass ciKlass* ciEnv::find_system_klass(ciSymbol* klass_name) { VM_ENTRY_MARK; - return get_klass_by_name_impl(NULL, klass_name, false); + return get_klass_by_name_impl(NULL, constantPoolHandle(), klass_name, false); } // ------------------------------------------------------------------ diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/ci/ciEnv.hpp --- a/src/share/vm/ci/ciEnv.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/ci/ciEnv.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -137,6 +137,7 @@ // Implementation methods for loading and constant pool access. ciKlass* get_klass_by_name_impl(ciKlass* accessing_klass, + constantPoolHandle cpool, ciSymbol* klass_name, bool require_local); ciKlass* get_klass_by_index_impl(constantPoolHandle cpool, diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/ci/ciField.cpp --- a/src/share/vm/ci/ciField.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/ci/ciField.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -287,7 +287,7 @@ } ciType* ciField::compute_type_impl() { - ciKlass* type = CURRENT_ENV->get_klass_by_name_impl(_holder, _signature, false); + ciKlass* type = CURRENT_ENV->get_klass_by_name_impl(_holder, constantPoolHandle(), _signature, false); if (!type->is_primitive_type() && is_shared()) { // We must not cache a pointer to an unshared type, in a shared field. bool type_is_also_shared = false; diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/ci/ciMethod.cpp --- a/src/share/vm/ci/ciMethod.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/ci/ciMethod.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -125,7 +125,8 @@ _name = env->get_symbol(h_m()->name()); _holder = env->get_object(h_m()->method_holder())->as_instance_klass(); ciSymbol* sig_symbol = env->get_symbol(h_m()->signature()); - _signature = new (env->arena()) ciSignature(_holder, sig_symbol); + constantPoolHandle cpool = h_m()->constants(); + _signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol); _method_data = NULL; // Take a snapshot of these values, so they will be commensurate with the MDO. if (ProfileInterpreter || TieredCompilation) { @@ -152,7 +153,7 @@ // These fields are always filled in. _name = name; _holder = holder; - _signature = new (CURRENT_ENV->arena()) ciSignature(_holder, signature); + _signature = new (CURRENT_ENV->arena()) ciSignature(_holder, constantPoolHandle(), signature); _intrinsic_id = vmIntrinsics::_none; _liveness = NULL; _can_be_statically_bound = false; @@ -1009,6 +1010,12 @@ return 0; } +int ciMethod::highest_osr_comp_level() { + check_is_loaded(); + VM_ENTRY_MARK; + return get_methodOop()->highest_osr_comp_level(); +} + // ------------------------------------------------------------------ // ciMethod::instructions_size // diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/ci/ciMethod.hpp --- a/src/share/vm/ci/ciMethod.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/ci/ciMethod.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -158,6 +158,7 @@ int interpreter_throwout_count() const { check_is_loaded(); return _interpreter_throwout_count; } int comp_level(); + int highest_osr_comp_level(); Bytecodes::Code java_code_at_bci(int bci) { address bcp = code() + bci; diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/ci/ciMethodHandle.cpp --- a/src/share/vm/ci/ciMethodHandle.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/ci/ciMethodHandle.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -41,6 +41,16 @@ VM_ENTRY_MARK; Handle h(get_oop()); methodHandle callee(_callee->get_methodOop()); + assert(callee->is_method_handle_invoke(), ""); + oop mt1 = callee->method_handle_type(); + oop mt2 = java_lang_invoke_MethodHandle::type(h()); + if (!java_lang_invoke_MethodType::equals(mt1, mt2)) { + if (PrintMiscellaneous && (Verbose || WizardMode)) { + tty->print_cr("ciMethodHandle::get_adapter: types not equal"); + mt1->print(); mt2->print(); + } + return NULL; + } // We catch all exceptions here that could happen in the method // handle compiler and stop the VM. MethodHandleCompiler mhc(h, callee->name(), callee->signature(), _profile.count(), is_invokedynamic, THREAD); @@ -53,7 +63,7 @@ if (PrintMiscellaneous && (Verbose || WizardMode)) { tty->print("*** ciMethodHandle::get_adapter => "); PENDING_EXCEPTION->print(); - tty->print("*** get_adapter (%s): ", is_invokedynamic ? "indy" : "mh"); ((ciObject*)this)->print(); //@@ + tty->print("*** get_adapter (%s): ", is_invokedynamic ? "indy" : "mh"); ((ciObject*)this)->print(); } CLEAR_PENDING_EXCEPTION; return NULL; diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/ci/ciObjArrayKlass.cpp --- a/src/share/vm/ci/ciObjArrayKlass.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/ci/ciObjArrayKlass.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -93,6 +93,7 @@ // element klass by name. _element_klass = CURRENT_THREAD_ENV->get_klass_by_name_impl( this, + constantPoolHandle(), construct_array_name(base_element_klass()->name(), dimension() - 1), false); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/ci/ciSignature.cpp --- a/src/share/vm/ci/ciSignature.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/ci/ciSignature.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -35,7 +35,7 @@ // ------------------------------------------------------------------ // ciSignature::ciSignature -ciSignature::ciSignature(ciKlass* accessing_klass, ciSymbol* symbol) { +ciSignature::ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* symbol) { ASSERT_IN_VM; EXCEPTION_CONTEXT; _accessing_klass = accessing_klass; @@ -64,7 +64,7 @@ CLEAR_PENDING_EXCEPTION; } else { ciSymbol* klass_name = env->get_symbol(name); - type = env->get_klass_by_name_impl(_accessing_klass, klass_name, false); + type = env->get_klass_by_name_impl(_accessing_klass, cpool, klass_name, false); } } _types->append(type); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/ci/ciSignature.hpp --- a/src/share/vm/ci/ciSignature.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/ci/ciSignature.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -44,7 +44,7 @@ friend class ciMethod; - ciSignature(ciKlass* accessing_klass, ciSymbol* signature); + ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* signature); void get_all_klasses(); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/classfile/classFileParser.cpp --- a/src/share/vm/classfile/classFileParser.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/classfile/classFileParser.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -3287,9 +3287,9 @@ // Fields allocation: oops fields in super and sub classes are together. if( nonstatic_field_size > 0 && super_klass() != NULL && super_klass->nonstatic_oop_map_size() > 0 ) { - int map_size = super_klass->nonstatic_oop_map_size(); + int map_count = super_klass->nonstatic_oop_map_count(); OopMapBlock* first_map = super_klass->start_of_nonstatic_oop_maps(); - OopMapBlock* last_map = first_map + map_size - 1; + OopMapBlock* last_map = first_map + map_count - 1; int next_offset = last_map->offset() + (last_map->count() * heapOopSize); if (next_offset == next_nonstatic_field_offset) { allocation_style = 0; // allocate oops first diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/classfile/javaClasses.cpp --- a/src/share/vm/classfile/javaClasses.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/classfile/javaClasses.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -1258,7 +1258,6 @@ objArrayOop _methods; typeArrayOop _bcis; int _index; - bool _dirty; No_Safepoint_Verifier _nsv; public: @@ -1272,37 +1271,13 @@ }; // constructor for new backtrace - BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _dirty(false) { + BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL) { expand(CHECK); _backtrace = _head; _index = 0; } - void flush() { - // The following appears to have been an optimization to save from - // doing a barrier for each individual store into the _methods array, - // but rather to do it for the entire array after the series of writes. - // That optimization seems to have been lost when compressed oops was - // implemented. However, the extra card-marks below was left in place, - // but is now redundant because the individual stores into the - // _methods array already execute the barrier code. CR 6918185 has - // been filed so the original code may be restored by deferring the - // barriers until after the entire sequence of stores, thus re-enabling - // the intent of the original optimization. In the meantime the redundant - // card mark below is now disabled. - if (_dirty && _methods != NULL) { -#if 0 - BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); - bs->write_ref_array((HeapWord*)_methods->base(), _methods->length()); -#endif - _dirty = false; - } - } - void expand(TRAPS) { - flush(); - objArrayHandle old_head(THREAD, _head); Pause_No_Safepoint_Verifier pnsv(&_nsv); @@ -1328,7 +1303,6 @@ } oop backtrace() { - flush(); return _backtrace(); } @@ -1342,7 +1316,6 @@ _methods->obj_at_put(_index, method); _bcis->ushort_at_put(_index, bci); _index++; - _dirty = true; } methodOop current_method() { @@ -2574,6 +2547,18 @@ return name; } +bool java_lang_invoke_MethodType::equals(oop mt1, oop mt2) { + if (rtype(mt1) != rtype(mt2)) + return false; + if (ptype_count(mt1) != ptype_count(mt2)) + return false; + for (int i = ptype_count(mt1) - 1; i >= 0; i--) { + if (ptype(mt1, i) != ptype(mt2, i)) + return false; + } + return true; +} + oop java_lang_invoke_MethodType::rtype(oop mt) { assert(is_instance(mt), "must be a MethodType"); return mt->obj_field(_rtype_offset); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/classfile/javaClasses.hpp --- a/src/share/vm/classfile/javaClasses.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/classfile/javaClasses.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -1079,6 +1079,8 @@ return obj != NULL && obj->klass() == SystemDictionary::MethodType_klass(); } + static bool equals(oop mt1, oop mt2); + // Accessors for code generation: static int rtype_offset_in_bytes() { return _rtype_offset; } static int ptypes_offset_in_bytes() { return _ptypes_offset; } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/classfile/systemDictionary.cpp --- a/src/share/vm/classfile/systemDictionary.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/classfile/systemDictionary.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -2367,6 +2367,8 @@ // Link m to his method type, if it is suitably generic. oop mtform = java_lang_invoke_MethodType::form(mt()); if (mtform != NULL && mt() == java_lang_invoke_MethodTypeForm::erasedType(mtform) + // vmlayout must be an invokeExact: + && name_id == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name) && java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() > 0) { java_lang_invoke_MethodTypeForm::init_vmlayout(mtform, m()); } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/code/nmethod.cpp --- a/src/share/vm/code/nmethod.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/code/nmethod.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -1832,7 +1832,9 @@ if (!method()->is_native()) { SimpleScopeDesc ssd(this, fr.pc()); Bytecode_invoke call(ssd.method(), ssd.bci()); - bool has_receiver = call.has_receiver(); + // compiled invokedynamic call sites have an implicit receiver at + // resolution time, so make sure it gets GC'ed. + bool has_receiver = !call.is_invokestatic(); Symbol* signature = call.signature(); fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f); } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/compiler/disassembler.cpp --- a/src/share/vm/compiler/disassembler.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/compiler/disassembler.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -78,21 +78,46 @@ char buf[JVM_MAXPATHLEN]; os::jvm_path(buf, sizeof(buf)); int jvm_offset = -1; + int lib_offset = -1; { // Match "jvm[^/]*" in jvm_path. const char* base = buf; const char* p = strrchr(buf, '/'); + if (p != NULL) lib_offset = p - base + 1; p = strstr(p ? p : base, "jvm"); if (p != NULL) jvm_offset = p - base; } + // Find the disassembler shared library. + // Search for several paths derived from libjvm, in this order: + // 1. /jre/lib///libhsdis-.so (for compatibility) + // 2. /jre/lib///hsdis-.so + // 3. /jre/lib//hsdis-.so + // 4. hsdis-.so (using LD_LIBRARY_PATH) if (jvm_offset >= 0) { - // Find the disassembler next to libjvm.so. + // 1. /jre/lib///libhsdis-.so strcpy(&buf[jvm_offset], hsdis_library_name); strcat(&buf[jvm_offset], os::dll_file_extension()); _library = os::dll_load(buf, ebuf, sizeof ebuf); + if (_library == NULL) { + // 2. /jre/lib///hsdis-.so + strcpy(&buf[lib_offset], hsdis_library_name); + strcat(&buf[lib_offset], os::dll_file_extension()); + _library = os::dll_load(buf, ebuf, sizeof ebuf); + } + if (_library == NULL) { + // 3. /jre/lib//hsdis-.so + buf[lib_offset - 1] = '\0'; + const char* p = strrchr(buf, '/'); + if (p != NULL) { + lib_offset = p - buf + 1; + strcpy(&buf[lib_offset], hsdis_library_name); + strcat(&buf[lib_offset], os::dll_file_extension()); + _library = os::dll_load(buf, ebuf, sizeof ebuf); + } + } } if (_library == NULL) { - // Try a free-floating lookup. + // 4. hsdis-.so (using LD_LIBRARY_PATH) strcpy(&buf[0], hsdis_library_name); strcat(&buf[0], os::dll_file_extension()); _library = os::dll_load(buf, ebuf, sizeof ebuf); @@ -249,7 +274,13 @@ return arg; } } else if (match(event, "mach")) { - output()->print_cr("[Disassembling for mach='%s']", arg); + static char buffer[32] = { 0, }; + if (strcmp(buffer, (const char*)arg) != 0 || + strlen((const char*)arg) > sizeof(buffer) - 1) { + // Only print this when the mach changes + strncpy(buffer, (const char*)arg, sizeof(buffer) - 1); + output()->print_cr("[Disassembling for mach='%s']", arg); + } } else if (match(event, "format bytes-per-line")) { _bytes_per_line = (int) (intptr_t) arg; } else { diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -1833,8 +1833,6 @@ } ) _indexedFreeList[size].removeChunk(fc); - debug_only(fc->clearNext()); - debug_only(fc->clearPrev()); NOT_PRODUCT( if (FLSVerifyIndexTable) { verifyIndexedFreeList(size); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -114,17 +114,11 @@ linkNext(ptr); if (ptr != NULL) ptr->linkPrev(this); } - void linkAfterNonNull(FreeChunk* ptr) { - assert(ptr != NULL, "precondition violation"); - linkNext(ptr); - ptr->linkPrev(this); - } void linkNext(FreeChunk* ptr) { _next = ptr; } void linkPrev(FreeChunk* ptr) { LP64_ONLY(if (UseCompressedOops) _prev = ptr; else) _prev = (FreeChunk*)((intptr_t)ptr | 0x1); } - void clearPrev() { _prev = NULL; } void clearNext() { _next = NULL; } void markNotFree() { // Set _prev (klass) to null before (if) clearing the mark word below diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -300,8 +300,21 @@ // dictionary for example, this might be the first block and // in that case there would be no place that we could record // the stats (which are kept in the block itself). - assert(_allocation_stats.prevSweep() + _allocation_stats.splitBirths() + 1 // Total Stock + 1 - >= _allocation_stats.splitDeaths() + (ssize_t)count(), "Conservation Principle"); + assert((_allocation_stats.prevSweep() + _allocation_stats.splitBirths() + + _allocation_stats.coalBirths() + 1) // Total Production Stock + 1 + >= (_allocation_stats.splitDeaths() + _allocation_stats.coalDeaths() + + (ssize_t)count()), // Total Current Stock + depletion + err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT + " violates Conservation Principle: " + "prevSweep(" SIZE_FORMAT ")" + " + splitBirths(" SIZE_FORMAT ")" + " + coalBirths(" SIZE_FORMAT ") + 1 >= " + " splitDeaths(" SIZE_FORMAT ")" + " coalDeaths(" SIZE_FORMAT ")" + " + count(" SSIZE_FORMAT ")", + this, _size, _allocation_stats.prevSweep(), _allocation_stats.splitBirths(), + _allocation_stats.splitBirths(), _allocation_stats.splitDeaths(), + _allocation_stats.coalDeaths(), count())); } void FreeList::assert_proper_lock_protection_work() const { diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -24,10 +24,11 @@ #include "precompiled.hpp" #include "classfile/symbolTable.hpp" -#include "gc_implementation/g1/concurrentMark.hpp" +#include "gc_implementation/g1/concurrentMark.inline.hpp" #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp" +#include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp" @@ -69,7 +70,9 @@ addr = (HeapWord*)align_size_up((intptr_t)addr, HeapWordSize << _shifter); size_t addrOffset = heapWordToOffset(addr); - if (limit == NULL) limit = _bmStartWord + _bmWordSize; + if (limit == NULL) { + limit = _bmStartWord + _bmWordSize; + } size_t limitOffset = heapWordToOffset(limit); size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); HeapWord* nextAddr = offsetToHeapWord(nextOffset); @@ -82,7 +85,9 @@ HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr, HeapWord* limit) const { size_t addrOffset = heapWordToOffset(addr); - if (limit == NULL) limit = _bmStartWord + _bmWordSize; + if (limit == NULL) { + limit = _bmStartWord + _bmWordSize; + } size_t limitOffset = heapWordToOffset(limit); size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); HeapWord* nextAddr = offsetToHeapWord(nextOffset); @@ -176,18 +181,20 @@ void CMMarkStack::allocate(size_t size) { _base = NEW_C_HEAP_ARRAY(oop, size); - if (_base == NULL) + if (_base == NULL) { vm_exit_during_initialization("Failed to allocate " "CM region mark stack"); + } _index = 0; - // QQQQ cast ... _capacity = (jint) size; _oops_do_bound = -1; NOT_PRODUCT(_max_depth = 0); } CMMarkStack::~CMMarkStack() { - if (_base != NULL) FREE_C_HEAP_ARRAY(oop, _base); + if (_base != NULL) { + FREE_C_HEAP_ARRAY(oop, _base); + } } void CMMarkStack::par_push(oop ptr) { @@ -280,16 +287,17 @@ void CMRegionStack::allocate(size_t size) { _base = NEW_C_HEAP_ARRAY(MemRegion, size); - if (_base == NULL) - vm_exit_during_initialization("Failed to allocate " - "CM region mark stack"); + if (_base == NULL) { + vm_exit_during_initialization("Failed to allocate CM region mark stack"); + } _index = 0; - // QQQQ cast ... _capacity = (jint) size; } CMRegionStack::~CMRegionStack() { - if (_base != NULL) FREE_C_HEAP_ARRAY(oop, _base); + if (_base != NULL) { + FREE_C_HEAP_ARRAY(oop, _base); + } } void CMRegionStack::push_lock_free(MemRegion mr) { @@ -421,7 +429,8 @@ // the ones in CMS generation. newOop->oop_iterate(cl); if (yield_after && _cm->do_yield_check()) { - res = false; break; + res = false; + break; } } debug_only(_drain_in_progress = false); @@ -492,19 +501,20 @@ _total_counting_time(0.0), _total_rs_scrub_time(0.0), - _parallel_workers(NULL) -{ - CMVerboseLevel verbose_level = - (CMVerboseLevel) G1MarkingVerboseLevel; - if (verbose_level < no_verbose) + _parallel_workers(NULL) { + CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; + if (verbose_level < no_verbose) { verbose_level = no_verbose; - if (verbose_level > high_verbose) + } + if (verbose_level > high_verbose) { verbose_level = high_verbose; + } _verbose_level = verbose_level; - if (verbose_low()) + if (verbose_low()) { gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " "heap end = "PTR_FORMAT, _heap_start, _heap_end); + } _markStack.allocate(MarkStackSize); _regionStack.allocate(G1MarkRegionStackSize); @@ -580,10 +590,11 @@ _marking_task_overhead = 1.0; } - if (parallel_marking_threads() > 1) + if (parallel_marking_threads() > 1) { _cleanup_task_overhead = 1.0; - else + } else { _cleanup_task_overhead = marking_task_overhead(); + } _cleanup_sleep_factor = (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); @@ -621,8 +632,7 @@ // at the beginning of remark to be false. By ensuring that we do // not observe heap expansions after marking is complete, then we do // not have this problem. - if (!concurrent_marking_in_progress() && !force) - return; + if (!concurrent_marking_in_progress() && !force) return; MemRegion committed = _g1h->g1_committed(); assert(committed.start() == _heap_start, "start shouldn't change"); @@ -655,8 +665,9 @@ // reset all the marking data structures and any necessary flags clear_marking_state(); - if (verbose_low()) + if (verbose_low()) { gclog_or_tty->print_cr("[global] resetting"); + } // We do reset all of them, since different phases will use // different number of active threads. So, it's easiest to have all @@ -742,8 +753,9 @@ size_t chunkSize = M; while (cur < end) { HeapWord* next = cur + chunkSize; - if (next > end) + if (next > end) { next = end; + } MemRegion mr(cur,next); _nextMarkBitMap->clearRange(mr); cur = next; @@ -781,7 +793,7 @@ #ifndef PRODUCT if (G1PrintReachableAtInitialMark) { print_reachable("at-cycle-start", - true /* use_prev_marking */, true /* all */); + VerifyOption_G1UsePrevMarking, true /* all */); } #endif @@ -922,8 +934,9 @@ */ void ConcurrentMark::enter_first_sync_barrier(int task_num) { - if (verbose_low()) + if (verbose_low()) { gclog_or_tty->print_cr("[%d] entering first barrier", task_num); + } if (concurrent()) { ConcurrentGCThread::stsLeave(); @@ -935,8 +948,9 @@ // at this point everyone should have synced up and not be doing any // more work - if (verbose_low()) + if (verbose_low()) { gclog_or_tty->print_cr("[%d] leaving first barrier", task_num); + } // let task 0 do this if (task_num == 0) { @@ -960,8 +974,9 @@ } void ConcurrentMark::enter_second_sync_barrier(int task_num) { - if (verbose_low()) + if (verbose_low()) { gclog_or_tty->print_cr("[%d] entering second barrier", task_num); + } if (concurrent()) { ConcurrentGCThread::stsLeave(); @@ -972,8 +987,9 @@ } // at this point everything should be re-initialised and ready to go - if (verbose_low()) + if (verbose_low()) { gclog_or_tty->print_cr("[%d] leaving second barrier", task_num); + } } #ifndef PRODUCT @@ -1012,8 +1028,9 @@ assert(_g1h->g1_committed().contains(addr), "address should be within the heap bounds"); - if (!_nextMarkBitMap->isMarked(addr)) + if (!_nextMarkBitMap->isMarked(addr)) { _nextMarkBitMap->parMark(addr); + } } void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) { @@ -1021,17 +1038,19 @@ // the caller. We only need to decide whether to push the region on // the region stack or not. - if (!concurrent_marking_in_progress() || !_should_gray_objects) + if (!concurrent_marking_in_progress() || !_should_gray_objects) { // We're done with marking and waiting for remark. We do not need to // push anything else on the region stack. return; + } HeapWord* finger = _finger; - if (verbose_low()) + if (verbose_low()) { gclog_or_tty->print_cr("[global] attempting to push " "region ["PTR_FORMAT", "PTR_FORMAT"), finger is at " PTR_FORMAT, mr.start(), mr.end(), finger); + } if (mr.start() < finger) { // The finger is always heap region aligned and it is not possible @@ -1045,14 +1064,16 @@ "region boundaries should fall within the committed space"); assert(mr.end() <= _heap_end, "region boundaries should fall within the committed space"); - if (verbose_low()) + if (verbose_low()) { gclog_or_tty->print_cr("[global] region ["PTR_FORMAT", "PTR_FORMAT") " "below the finger, pushing it", mr.start(), mr.end()); + } if (!region_stack_push_lock_free(mr)) { - if (verbose_low()) + if (verbose_low()) { gclog_or_tty->print_cr("[global] region stack has overflown."); + } } } } @@ -1066,10 +1087,11 @@ // We definitely need to mark it, irrespective whether we bail out // because we're done with marking. if (_nextMarkBitMap->parMark(addr)) { - if (!concurrent_marking_in_progress() || !_should_gray_objects) + if (!concurrent_marking_in_progress() || !_should_gray_objects) { // If we're done with concurrent marking and we're waiting for // remark, then we're not pushing anything on the stack. return; + } // No OrderAccess:store_load() is needed. It is implicit in the // CAS done in parMark(addr) above @@ -1077,9 +1099,10 @@ if (addr < finger) { if (!mark_stack_push(oop(addr))) { - if (verbose_low()) + if (verbose_low()) { gclog_or_tty->print_cr("[global] global stack overflow " "during parMark"); + } } } } @@ -1174,10 +1197,11 @@ set_phase(active_workers, true /* concurrent */); CMConcurrentMarkingTask markingTask(this, cmThread()); - if (parallel_marking_threads() > 0) + if (parallel_marking_threads() > 0) { _parallel_workers->run_task(&markingTask); - else + } else { markingTask.work(0); + } print_stats(); } @@ -1199,7 +1223,9 @@ HandleMark hm; // handle scope gclog_or_tty->print(" VerifyDuringGC:(before)"); Universe::heap()->prepare_for_verify(); - Universe::verify(true, false, true); + Universe::verify(/* allow dirty */ true, + /* silent */ false, + /* option */ VerifyOption_G1UsePrevMarking); } G1CollectorPolicy* g1p = g1h->g1_policy(); @@ -1218,8 +1244,9 @@ _restart_for_overflow = true; // Clear the flag. We do not need it any more. clear_has_overflown(); - if (G1TraceMarkStackOverflow) + if (G1TraceMarkStackOverflow) { gclog_or_tty->print_cr("\nRemark led to restart for overflow."); + } } else { SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); // We're done with marking. @@ -1232,9 +1259,9 @@ HandleMark hm; // handle scope gclog_or_tty->print(" VerifyDuringGC:(after)"); Universe::heap()->prepare_for_verify(); - Universe::heap()->verify(/* allow_dirty */ true, - /* silent */ false, - /* use_prev_marking */ false); + Universe::verify(/* allow dirty */ true, + /* silent */ false, + /* option */ VerifyOption_G1UseNextMarking); } assert(!restart_for_overflow(), "sanity"); } @@ -1326,9 +1353,7 @@ size_t end_index = index + 1; while (end_index < g1h->n_regions()) { HeapRegion* chr = g1h->region_at(end_index); - if (!chr->continuesHumongous()) { - break; - } + if (!chr->continuesHumongous()) break; end_index += 1; } _region_bm->par_at_put_range((BitMap::idx_t) index, @@ -1337,8 +1362,9 @@ } bool doHeapRegion(HeapRegion* hr) { - if (!_final && _regions_done == 0) + if (!_final && _regions_done == 0) { _start_vtime_sec = os::elapsedVTime(); + } if (hr->continuesHumongous()) { // We will ignore these here and process them when their @@ -1431,8 +1457,9 @@ _changed = true; } // Handle the last range, if any. - if (start_card_num != -1) + if (start_card_num != -1) { mark_card_num_range(start_card_num, last_card_num); + } if (_final) { // Mark the allocated-since-marking portion... HeapWord* tp = hr->top(); @@ -1509,14 +1536,14 @@ BitMap* _card_bm; public: G1ParFinalCountTask(G1CollectedHeap* g1h, CMBitMap* bm, - BitMap* region_bm, BitMap* card_bm) : - AbstractGangTask("G1 final counting"), _g1h(g1h), - _bm(bm), _region_bm(region_bm), _card_bm(card_bm) - { - if (ParallelGCThreads > 0) + BitMap* region_bm, BitMap* card_bm) + : AbstractGangTask("G1 final counting"), _g1h(g1h), + _bm(bm), _region_bm(region_bm), _card_bm(card_bm) { + if (ParallelGCThreads > 0) { _n_workers = _g1h->workers()->total_workers(); - else + } else { _n_workers = 1; + } _live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers); _used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers); } @@ -1628,6 +1655,23 @@ _max_live_bytes += g1_note_end.max_live_bytes(); _freed_bytes += g1_note_end.freed_bytes(); + // If we iterate over the global cleanup list at the end of + // cleanup to do this printing we will not guarantee to only + // generate output for the newly-reclaimed regions (the list + // might not be empty at the beginning of cleanup; we might + // still be working on its previous contents). So we do the + // printing here, before we append the new regions to the global + // cleanup list. + + G1HRPrinter* hr_printer = _g1h->hr_printer(); + if (hr_printer->is_active()) { + HeapRegionLinkedListIterator iter(&local_cleanup_list); + while (iter.more_available()) { + HeapRegion* hr = iter.get_next(); + hr_printer->cleanup(hr); + } + } + _cleanup_list->add_as_tail(&local_cleanup_list); assert(local_cleanup_list.is_empty(), "post-condition"); @@ -1701,7 +1745,9 @@ true /* par */); double region_time = (os::elapsedTime() - start); _claimed_region_time += region_time; - if (region_time > _max_region_time) _max_region_time = region_time; + if (region_time > _max_region_time) { + _max_region_time = region_time; + } } return false; } @@ -1724,9 +1770,9 @@ HandleMark hm; // handle scope gclog_or_tty->print(" VerifyDuringGC:(before)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* allow dirty */ true, - /* silent */ false, - /* prev marking */ true); + Universe::verify(/* allow dirty */ true, + /* silent */ false, + /* option */ VerifyOption_G1UsePrevMarking); } G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); @@ -1872,9 +1918,9 @@ HandleMark hm; // handle scope gclog_or_tty->print(" VerifyDuringGC:(after)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* allow dirty */ true, - /* silent */ false, - /* prev marking */ true); + Universe::verify(/* allow dirty */ true, + /* silent */ false, + /* option */ VerifyOption_G1UsePrevMarking); } g1h->verify_region_sets_optional(); @@ -1960,10 +2006,11 @@ oop obj = oopDesc::load_decode_heap_oop(p); HeapWord* addr = (HeapWord*)obj; - if (_cm->verbose_high()) + if (_cm->verbose_high()) { gclog_or_tty->print_cr("\t[0] we're looking at location " - "*"PTR_FORMAT" = "PTR_FORMAT, - p, (void*) obj); + "*"PTR_FORMAT" = "PTR_FORMAT, + p, (void*) obj); + } if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) { _bitMap->mark(addr); @@ -2025,10 +2072,11 @@ template void do_oop_work(T* p) { if (!_cm->has_overflown()) { oop obj = oopDesc::load_decode_heap_oop(p); - if (_cm->verbose_high()) + if (_cm->verbose_high()) { gclog_or_tty->print_cr("\t[%d] we're looking at location " "*"PTR_FORMAT" = "PTR_FORMAT, _task->task_id(), p, (void*) obj); + } _task->deal_with_reference(obj); _ref_counter--; @@ -2055,8 +2103,9 @@ _ref_counter = _ref_counter_limit; } } else { - if (_cm->verbose_high()) + if (_cm->verbose_high()) { gclog_or_tty->print_cr("\t[%d] CM Overflow", _task->task_id()); + } } } }; @@ -2071,8 +2120,10 @@ void do_void() { do { - if (_cm->verbose_high()) - gclog_or_tty->print_cr("\t[%d] Drain: Calling do marking_step", _task->task_id()); + if (_cm->verbose_high()) { + gclog_or_tty->print_cr("\t[%d] Drain: Calling do marking_step", + _task->task_id()); + } // We call CMTask::do_marking_step() to completely drain the local and // global marking stacks. The routine is called in a loop, which we'll @@ -2343,18 +2394,16 @@ class PrintReachableOopClosure: public OopClosure { private: G1CollectedHeap* _g1h; - CMBitMapRO* _bitmap; outputStream* _out; - bool _use_prev_marking; + VerifyOption _vo; bool _all; public: - PrintReachableOopClosure(CMBitMapRO* bitmap, - outputStream* out, - bool use_prev_marking, + PrintReachableOopClosure(outputStream* out, + VerifyOption vo, bool all) : _g1h(G1CollectedHeap::heap()), - _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { } + _out(out), _vo(vo), _all(all) { } void do_oop(narrowOop* p) { do_oop_work(p); } void do_oop( oop* p) { do_oop_work(p); } @@ -2372,12 +2421,23 @@ HeapRegion* hr = _g1h->heap_region_containing(obj); guarantee(hr != NULL, "invariant"); bool over_tams = false; - if (_use_prev_marking) { - over_tams = hr->obj_allocated_since_prev_marking(obj); - } else { - over_tams = hr->obj_allocated_since_next_marking(obj); + bool marked = false; + + switch (_vo) { + case VerifyOption_G1UsePrevMarking: + over_tams = hr->obj_allocated_since_prev_marking(obj); + marked = _g1h->isMarkedPrev(obj); + break; + case VerifyOption_G1UseNextMarking: + over_tams = hr->obj_allocated_since_next_marking(obj); + marked = _g1h->isMarkedNext(obj); + break; + case VerifyOption_G1UseMarkWord: + marked = obj->is_gc_marked(); + break; + default: + ShouldNotReachHere(); } - bool marked = _bitmap->isMarked((HeapWord*) obj); if (over_tams) { str = " >"; @@ -2398,35 +2458,45 @@ class PrintReachableObjectClosure : public ObjectClosure { private: - CMBitMapRO* _bitmap; - outputStream* _out; - bool _use_prev_marking; - bool _all; - HeapRegion* _hr; + G1CollectedHeap* _g1h; + outputStream* _out; + VerifyOption _vo; + bool _all; + HeapRegion* _hr; public: - PrintReachableObjectClosure(CMBitMapRO* bitmap, - outputStream* out, - bool use_prev_marking, + PrintReachableObjectClosure(outputStream* out, + VerifyOption vo, bool all, HeapRegion* hr) : - _bitmap(bitmap), _out(out), - _use_prev_marking(use_prev_marking), _all(all), _hr(hr) { } + _g1h(G1CollectedHeap::heap()), + _out(out), _vo(vo), _all(all), _hr(hr) { } void do_object(oop o) { - bool over_tams; - if (_use_prev_marking) { - over_tams = _hr->obj_allocated_since_prev_marking(o); - } else { - over_tams = _hr->obj_allocated_since_next_marking(o); + bool over_tams = false; + bool marked = false; + + switch (_vo) { + case VerifyOption_G1UsePrevMarking: + over_tams = _hr->obj_allocated_since_prev_marking(o); + marked = _g1h->isMarkedPrev(o); + break; + case VerifyOption_G1UseNextMarking: + over_tams = _hr->obj_allocated_since_next_marking(o); + marked = _g1h->isMarkedNext(o); + break; + case VerifyOption_G1UseMarkWord: + marked = o->is_gc_marked(); + break; + default: + ShouldNotReachHere(); } - bool marked = _bitmap->isMarked((HeapWord*) o); bool print_it = _all || over_tams || marked; if (print_it) { _out->print_cr(" "PTR_FORMAT"%s", o, (over_tams) ? " >" : (marked) ? " M" : ""); - PrintReachableOopClosure oopCl(_bitmap, _out, _use_prev_marking, _all); + PrintReachableOopClosure oopCl(_out, _vo, _all); o->oop_iterate(&oopCl); } } @@ -2434,9 +2504,8 @@ class PrintReachableRegionClosure : public HeapRegionClosure { private: - CMBitMapRO* _bitmap; outputStream* _out; - bool _use_prev_marking; + VerifyOption _vo; bool _all; public: @@ -2445,10 +2514,21 @@ HeapWord* e = hr->end(); HeapWord* t = hr->top(); HeapWord* p = NULL; - if (_use_prev_marking) { - p = hr->prev_top_at_mark_start(); - } else { - p = hr->next_top_at_mark_start(); + + switch (_vo) { + case VerifyOption_G1UsePrevMarking: + p = hr->prev_top_at_mark_start(); + break; + case VerifyOption_G1UseNextMarking: + p = hr->next_top_at_mark_start(); + break; + case VerifyOption_G1UseMarkWord: + // When we are verifying marking using the mark word + // TAMS has no relevance. + assert(p == NULL, "post-condition"); + break; + default: + ShouldNotReachHere(); } _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " "TAMS: "PTR_FORMAT, b, e, t, p); @@ -2460,8 +2540,7 @@ if (to > from) { _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to); _out->cr(); - PrintReachableObjectClosure ocl(_bitmap, _out, - _use_prev_marking, _all, hr); + PrintReachableObjectClosure ocl(_out, _vo, _all, hr); hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); _out->cr(); } @@ -2469,15 +2548,25 @@ return false; } - PrintReachableRegionClosure(CMBitMapRO* bitmap, - outputStream* out, - bool use_prev_marking, + PrintReachableRegionClosure(outputStream* out, + VerifyOption vo, bool all) : - _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { } + _out(out), _vo(vo), _all(all) { } }; +static const char* verify_option_to_tams(VerifyOption vo) { + switch (vo) { + case VerifyOption_G1UsePrevMarking: + return "PTAMS"; + case VerifyOption_G1UseNextMarking: + return "NTAMS"; + default: + return "NONE"; + } +} + void ConcurrentMark::print_reachable(const char* str, - bool use_prev_marking, + VerifyOption vo, bool all) { gclog_or_tty->cr(); gclog_or_tty->print_cr("== Doing heap dump... "); @@ -2504,20 +2593,12 @@ } outputStream* out = &fout; - - CMBitMapRO* bitmap = NULL; - if (use_prev_marking) { - bitmap = _prevMarkBitMap; - } else { - bitmap = _nextMarkBitMap; - } - - out->print_cr("-- USING %s", (use_prev_marking) ? "PTAMS" : "NTAMS"); + out->print_cr("-- USING %s", verify_option_to_tams(vo)); out->cr(); out->print_cr("--- ITERATING OVER REGIONS"); out->cr(); - PrintReachableRegionClosure rcl(bitmap, out, use_prev_marking, all); + PrintReachableRegionClosure rcl(out, vo, all); _g1h->heap_region_iterate(&rcl); out->cr(); @@ -2546,34 +2627,42 @@ }; void ConcurrentMark::deal_with_reference(oop obj) { - if (verbose_high()) + if (verbose_high()) { gclog_or_tty->print_cr("[global] we're dealing with reference "PTR_FORMAT, (void*) obj); - + } HeapWord* objAddr = (HeapWord*) obj; assert(obj->is_oop_or_null(true /* ignore mark word */), "Error"); if (_g1h->is_in_g1_reserved(objAddr)) { - assert(obj != NULL, "is_in_g1_reserved should ensure this"); - HeapRegion* hr = _g1h->heap_region_containing(obj); - if (_g1h->is_obj_ill(obj, hr)) { - if (verbose_high()) - gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered " - "marked", (void*) obj); - - // we need to mark it first - if (_nextMarkBitMap->parMark(objAddr)) { - // No OrderAccess:store_load() is needed. It is implicit in the - // CAS done in parMark(objAddr) above - HeapWord* finger = _finger; - if (objAddr < finger) { - if (verbose_high()) - gclog_or_tty->print_cr("[global] below the global finger " - "("PTR_FORMAT"), pushing it", finger); - if (!mark_stack_push(obj)) { - if (verbose_low()) - gclog_or_tty->print_cr("[global] global stack overflow during " - "deal_with_reference"); + assert(obj != NULL, "null check is implicit"); + if (!_nextMarkBitMap->isMarked(objAddr)) { + // Only get the containing region if the object is not marked on the + // bitmap (otherwise, it's a waste of time since we won't do + // anything with it). + HeapRegion* hr = _g1h->heap_region_containing_raw(obj); + if (!hr->obj_allocated_since_next_marking(obj)) { + if (verbose_high()) { + gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered " + "marked", (void*) obj); + } + + // we need to mark it first + if (_nextMarkBitMap->parMark(objAddr)) { + // No OrderAccess:store_load() is needed. It is implicit in the + // CAS done in parMark(objAddr) above + HeapWord* finger = _finger; + if (objAddr < finger) { + if (verbose_high()) { + gclog_or_tty->print_cr("[global] below the global finger " + "("PTR_FORMAT"), pushing it", finger); + } + if (!mark_stack_push(obj)) { + if (verbose_low()) { + gclog_or_tty->print_cr("[global] global stack overflow during " + "deal_with_reference"); + } + } } } } @@ -2587,8 +2676,9 @@ satb_mq_set.set_closure(&oc); while (satb_mq_set.apply_closure_to_completed_buffer()) { - if (verbose_medium()) + if (verbose_medium()) { gclog_or_tty->print_cr("[global] processed an SATB buffer"); + } } // no need to check whether we should do this, as this is only @@ -2631,21 +2721,43 @@ while (finger < _heap_end) { assert(_g1h->is_in_g1_reserved(finger), "invariant"); - // is the gap between reading the finger and doing the CAS too long? - - HeapRegion* curr_region = _g1h->heap_region_containing(finger); + // Note on how this code handles humongous regions. In the + // normal case the finger will reach the start of a "starts + // humongous" (SH) region. Its end will either be the end of the + // last "continues humongous" (CH) region in the sequence, or the + // standard end of the SH region (if the SH is the only region in + // the sequence). That way claim_region() will skip over the CH + // regions. However, there is a subtle race between a CM thread + // executing this method and a mutator thread doing a humongous + // object allocation. The two are not mutually exclusive as the CM + // thread does not need to hold the Heap_lock when it gets + // here. So there is a chance that claim_region() will come across + // a free region that's in the progress of becoming a SH or a CH + // region. In the former case, it will either + // a) Miss the update to the region's end, in which case it will + // visit every subsequent CH region, will find their bitmaps + // empty, and do nothing, or + // b) Will observe the update of the region's end (in which case + // it will skip the subsequent CH regions). + // If it comes across a region that suddenly becomes CH, the + // scenario will be similar to b). So, the race between + // claim_region() and a humongous object allocation might force us + // to do a bit of unnecessary work (due to some unnecessary bitmap + // iterations) but it should not introduce and correctness issues. + HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); HeapWord* bottom = curr_region->bottom(); HeapWord* end = curr_region->end(); HeapWord* limit = curr_region->next_top_at_mark_start(); - if (verbose_low()) + if (verbose_low()) { gclog_or_tty->print_cr("[%d] curr_region = "PTR_FORMAT" " "["PTR_FORMAT", "PTR_FORMAT"), " "limit = "PTR_FORMAT, task_num, curr_region, bottom, end, limit); - - HeapWord* res = - (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); + } + + // Is the gap between reading the finger and doing the CAS too long? + HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); if (res == finger) { // we succeeded @@ -2653,32 +2765,36 @@ // someone else might have moved the finger even further assert(_finger >= end, "the finger should have moved forward"); - if (verbose_low()) + if (verbose_low()) { gclog_or_tty->print_cr("[%d] we were successful with region = " PTR_FORMAT, task_num, curr_region); + } if (limit > bottom) { - if (verbose_low()) + if (verbose_low()) { gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is not empty, " "returning it ", task_num, curr_region); + } return curr_region; } else { assert(limit == bottom, "the region limit should be at bottom"); - if (verbose_low()) + if (verbose_low()) { gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is empty, " "returning NULL", task_num, curr_region); + } // we return NULL and the caller should try calling // claim_region() again. return NULL; } } else { assert(_finger > finger, "the finger should have moved forward"); - if (verbose_low()) + if (verbose_low()) { gclog_or_tty->print_cr("[%d] somebody else moved the finger, " "global finger = "PTR_FORMAT", " "our finger = "PTR_FORMAT, task_num, _finger, finger); + } // read it again finger = _finger; @@ -2722,18 +2838,20 @@ } void ConcurrentMark::oops_do(OopClosure* cl) { - if (_markStack.size() > 0 && verbose_low()) + if (_markStack.size() > 0 && verbose_low()) { gclog_or_tty->print_cr("[global] scanning the global marking stack, " "size = %d", _markStack.size()); + } // we first iterate over the contents of the mark stack... _markStack.oops_do(cl); for (int i = 0; i < (int)_max_task_num; ++i) { OopTaskQueue* queue = _task_queues->queue((int)i); - if (queue->size() > 0 && verbose_low()) + if (queue->size() > 0 && verbose_low()) { gclog_or_tty->print_cr("[global] scanning task queue of task %d, " "size = %d", i, queue->size()); + } // ...then over the contents of the all the task queues. queue->oops_do(cl); @@ -2805,14 +2923,17 @@ return false; } _ms[_ms_ind] = obj; - if (obj->is_objArray()) _array_ind_stack[_ms_ind] = arr_ind; + if (obj->is_objArray()) { + _array_ind_stack[_ms_ind] = arr_ind; + } _ms_ind++; return true; } oop pop() { - if (_ms_ind == 0) return NULL; - else { + if (_ms_ind == 0) { + return NULL; + } else { _ms_ind--; return _ms[_ms_ind]; } @@ -3011,17 +3132,19 @@ // newCSet(). void ConcurrentMark::newCSet() { - if (!concurrent_marking_in_progress()) + if (!concurrent_marking_in_progress()) { // nothing to do if marking is not in progress return; + } // find what the lowest finger is among the global and local fingers _min_finger = _finger; for (int i = 0; i < (int)_max_task_num; ++i) { CMTask* task = _tasks[i]; HeapWord* task_finger = task->finger(); - if (task_finger != NULL && task_finger < _min_finger) + if (task_finger != NULL && task_finger < _min_finger) { _min_finger = task_finger; + } } _should_gray_objects = false; @@ -3041,17 +3164,18 @@ // irrespective whether all collection set regions are below the // finger, if the region stack is not empty. This is expected to be // a rare case, so I don't think it's necessary to be smarted about it. - if (!region_stack_empty() || has_aborted_regions()) + if (!region_stack_empty() || has_aborted_regions()) { _should_gray_objects = true; + } } void ConcurrentMark::registerCSetRegion(HeapRegion* hr) { - if (!concurrent_marking_in_progress()) - return; + if (!concurrent_marking_in_progress()) return; HeapWord* region_end = hr->end(); - if (region_end > _min_finger) + if (region_end > _min_finger) { _should_gray_objects = true; + } } // Resets the region fields of active CMTasks whose values point @@ -3152,11 +3276,13 @@ // We take a break if someone is trying to stop the world. bool ConcurrentMark::do_yield_check(int worker_i) { if (should_yield()) { - if (worker_i == 0) + if (worker_i == 0) { _g1h->g1_policy()->record_concurrent_pause(); + } cmThread()->yield(); - if (worker_i == 0) + if (worker_i == 0) { _g1h->g1_policy()->record_concurrent_pause_end(); + } return true; } else { return false; @@ -3174,9 +3300,8 @@ bool ConcurrentMark::containing_cards_are_marked(void* start, void* last) { - return - containing_card_is_marked(start) && - containing_card_is_marked(last); + return containing_card_is_marked(start) && + containing_card_is_marked(last); } #ifndef PRODUCT @@ -3191,6 +3316,22 @@ } #endif +void CMTask::scan_object(oop obj) { + assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); + + if (_cm->verbose_high()) { + gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT, + _task_id, (void*) obj); + } + + size_t obj_size = obj->size(); + _words_scanned += obj_size; + + obj->oop_iterate(_cm_oop_closure); + statsOnly( ++_objs_scanned ); + check_limits(); +} + // Closure for iteration over bitmaps class CMBitMapClosure : public BitMapClosure { private: @@ -3254,43 +3395,17 @@ CMObjectClosure(CMTask* task) : _task(task) { } }; -// Closure for iterating over object fields -class CMOopClosure : public OopClosure { -private: - G1CollectedHeap* _g1h; - ConcurrentMark* _cm; - CMTask* _task; - -public: - virtual void do_oop(narrowOop* p) { do_oop_work(p); } - virtual void do_oop( oop* p) { do_oop_work(p); } - - template void do_oop_work(T* p) { - assert( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant"); - assert(!_g1h->is_on_master_free_list( - _g1h->heap_region_containing((HeapWord*) p)), "invariant"); - - oop obj = oopDesc::load_decode_heap_oop(p); - if (_cm->verbose_high()) - gclog_or_tty->print_cr("[%d] we're looking at location " - "*"PTR_FORMAT" = "PTR_FORMAT, - _task->task_id(), p, (void*) obj); - _task->deal_with_reference(obj); +G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, + ConcurrentMark* cm, + CMTask* task) + : _g1h(g1h), _cm(cm), _task(task) { + assert(_ref_processor == NULL, "should be initialized to NULL"); + + if (G1UseConcMarkReferenceProcessing) { + _ref_processor = g1h->ref_processor(); + assert(_ref_processor != NULL, "should not be NULL"); } - - CMOopClosure(G1CollectedHeap* g1h, - ConcurrentMark* cm, - CMTask* task) - : _g1h(g1h), _cm(cm), _task(task) - { - assert(_ref_processor == NULL, "should be initialized to NULL"); - - if (G1UseConcMarkReferenceProcessing) { - _ref_processor = g1h->ref_processor(); - assert(_ref_processor != NULL, "should not be NULL"); - } - } -}; +} void CMTask::setup_for_region(HeapRegion* hr) { // Separated the asserts so that we know which one fires. @@ -3299,9 +3414,10 @@ assert(!hr->continuesHumongous(), "claim_region() should have filtered out continues humongous regions"); - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] setting up for region "PTR_FORMAT, _task_id, hr); + } _curr_region = hr; _finger = hr->bottom(); @@ -3314,10 +3430,11 @@ HeapWord* limit = hr->next_top_at_mark_start(); if (limit == bottom) { - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] found an empty region " "["PTR_FORMAT", "PTR_FORMAT")", _task_id, bottom, limit); + } // The region was collected underneath our feet. // We set the finger to bottom to ensure that the bitmap // iteration that will follow this will not do anything. @@ -3346,9 +3463,10 @@ void CMTask::giveup_current_region() { assert(_curr_region != NULL, "invariant"); - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] giving up region "PTR_FORMAT, _task_id, _curr_region); + } clear_region_fields(); } @@ -3362,11 +3480,21 @@ _region_finger = NULL; } +void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { + if (cm_oop_closure == NULL) { + assert(_cm_oop_closure != NULL, "invariant"); + } else { + assert(_cm_oop_closure == NULL, "invariant"); + } + _cm_oop_closure = cm_oop_closure; +} + void CMTask::reset(CMBitMap* nextMarkBitMap) { guarantee(nextMarkBitMap != NULL, "invariant"); - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] resetting", _task_id); + } _nextMarkBitMap = nextMarkBitMap; clear_region_fields(); @@ -3411,118 +3539,6 @@ return !_cm->mark_stack_empty() || has_aborted(); } -// This determines whether the method below will check both the local -// and global fingers when determining whether to push on the stack a -// gray object (value 1) or whether it will only check the global one -// (value 0). The tradeoffs are that the former will be a bit more -// accurate and possibly push less on the stack, but it might also be -// a little bit slower. - -#define _CHECK_BOTH_FINGERS_ 1 - -void CMTask::deal_with_reference(oop obj) { - if (_cm->verbose_high()) - gclog_or_tty->print_cr("[%d] we're dealing with reference = "PTR_FORMAT, - _task_id, (void*) obj); - - ++_refs_reached; - - HeapWord* objAddr = (HeapWord*) obj; - assert(obj->is_oop_or_null(true /* ignore mark word */), "Error"); - if (_g1h->is_in_g1_reserved(objAddr)) { - assert(obj != NULL, "is_in_g1_reserved should ensure this"); - HeapRegion* hr = _g1h->heap_region_containing(obj); - if (_g1h->is_obj_ill(obj, hr)) { - if (_cm->verbose_high()) - gclog_or_tty->print_cr("[%d] "PTR_FORMAT" is not considered marked", - _task_id, (void*) obj); - - // we need to mark it first - if (_nextMarkBitMap->parMark(objAddr)) { - // No OrderAccess:store_load() is needed. It is implicit in the - // CAS done in parMark(objAddr) above - HeapWord* global_finger = _cm->finger(); - -#if _CHECK_BOTH_FINGERS_ - // we will check both the local and global fingers - - if (_finger != NULL && objAddr < _finger) { - if (_cm->verbose_high()) - gclog_or_tty->print_cr("[%d] below the local finger ("PTR_FORMAT"), " - "pushing it", _task_id, _finger); - push(obj); - } else if (_curr_region != NULL && objAddr < _region_limit) { - // do nothing - } else if (objAddr < global_finger) { - // Notice that the global finger might be moving forward - // concurrently. This is not a problem. In the worst case, we - // mark the object while it is above the global finger and, by - // the time we read the global finger, it has moved forward - // passed this object. In this case, the object will probably - // be visited when a task is scanning the region and will also - // be pushed on the stack. So, some duplicate work, but no - // correctness problems. - - if (_cm->verbose_high()) - gclog_or_tty->print_cr("[%d] below the global finger " - "("PTR_FORMAT"), pushing it", - _task_id, global_finger); - push(obj); - } else { - // do nothing - } -#else // _CHECK_BOTH_FINGERS_ - // we will only check the global finger - - if (objAddr < global_finger) { - // see long comment above - - if (_cm->verbose_high()) - gclog_or_tty->print_cr("[%d] below the global finger " - "("PTR_FORMAT"), pushing it", - _task_id, global_finger); - push(obj); - } -#endif // _CHECK_BOTH_FINGERS_ - } - } - } -} - -void CMTask::push(oop obj) { - HeapWord* objAddr = (HeapWord*) obj; - assert(_g1h->is_in_g1_reserved(objAddr), "invariant"); - assert(!_g1h->is_on_master_free_list( - _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant"); - assert(!_g1h->is_obj_ill(obj), "invariant"); - assert(_nextMarkBitMap->isMarked(objAddr), "invariant"); - - if (_cm->verbose_high()) - gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj); - - if (!_task_queue->push(obj)) { - // The local task queue looks full. We need to push some entries - // to the global stack. - - if (_cm->verbose_medium()) - gclog_or_tty->print_cr("[%d] task queue overflow, " - "moving entries to the global stack", - _task_id); - move_entries_to_global_stack(); - - // this should succeed since, even if we overflow the global - // stack, we should have definitely removed some entries from the - // local queue. So, there must be space on it. - bool success = _task_queue->push(obj); - assert(success, "invariant"); - } - - statsOnly( int tmp_size = _task_queue->size(); - if (tmp_size > _local_max_size) - _local_max_size = tmp_size; - ++_local_pushes ); -} - void CMTask::reached_limit() { assert(_words_scanned >= _words_scanned_limit || _refs_reached >= _refs_reached_limit , @@ -3531,8 +3547,7 @@ } void CMTask::regular_clock_call() { - if (has_aborted()) - return; + if (has_aborted()) return; // First, we need to recalculate the words scanned and refs reached // limits for the next clock call. @@ -3549,8 +3564,7 @@ // If we are not concurrent (i.e. we're doing remark) we don't need // to check anything else. The other steps are only needed during // the concurrent marking phase. - if (!concurrent()) - return; + if (!concurrent()) return; // (2) If marking has been aborted for Full GC, then we also abort. if (_cm->has_aborted()) { @@ -3563,23 +3577,25 @@ // (3) If marking stats are enabled, then we update the step history. #if _MARKING_STATS_ - if (_words_scanned >= _words_scanned_limit) + if (_words_scanned >= _words_scanned_limit) { ++_clock_due_to_scanning; - if (_refs_reached >= _refs_reached_limit) + } + if (_refs_reached >= _refs_reached_limit) { ++_clock_due_to_marking; + } double last_interval_ms = curr_time_ms - _interval_start_time_ms; _interval_start_time_ms = curr_time_ms; _all_clock_intervals_ms.add(last_interval_ms); if (_cm->verbose_medium()) { - gclog_or_tty->print_cr("[%d] regular clock, interval = %1.2lfms, " - "scanned = %d%s, refs reached = %d%s", - _task_id, last_interval_ms, - _words_scanned, - (_words_scanned >= _words_scanned_limit) ? " (*)" : "", - _refs_reached, - (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); + gclog_or_tty->print_cr("[%d] regular clock, interval = %1.2lfms, " + "scanned = %d%s, refs reached = %d%s", + _task_id, last_interval_ms, + _words_scanned, + (_words_scanned >= _words_scanned_limit) ? " (*)" : "", + _refs_reached, + (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); } #endif // _MARKING_STATS_ @@ -3606,9 +3622,10 @@ // buffers available for processing. If there are, we abort. SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] aborting to deal with pending SATB buffers", _task_id); + } // we do need to process SATB buffers, we'll abort and restart // the marking task to do so set_has_aborted(); @@ -3631,8 +3648,9 @@ // entries to/from the global stack). It basically tries to decrease the // scanning limit so that the clock is called earlier. - if (_cm->verbose_medium()) + if (_cm->verbose_medium()) { gclog_or_tty->print_cr("[%d] decreasing limits", _task_id); + } _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4; @@ -3658,18 +3676,22 @@ statsOnly( ++_global_transfers_to; _local_pops += n ); if (!_cm->mark_stack_push(buffer, n)) { - if (_cm->verbose_low()) - gclog_or_tty->print_cr("[%d] aborting due to global stack overflow", _task_id); + if (_cm->verbose_low()) { + gclog_or_tty->print_cr("[%d] aborting due to global stack overflow", + _task_id); + } set_has_aborted(); } else { // the transfer was successful - if (_cm->verbose_medium()) + if (_cm->verbose_medium()) { gclog_or_tty->print_cr("[%d] pushed %d entries to the global stack", _task_id, n); + } statsOnly( int tmp_size = _cm->mark_stack_size(); - if (tmp_size > _global_max_size) + if (tmp_size > _global_max_size) { _global_max_size = tmp_size; + } _global_pushes += n ); } } @@ -3690,9 +3712,10 @@ // yes, we did actually pop at least one entry statsOnly( ++_global_transfers_from; _global_pops += n ); - if (_cm->verbose_medium()) + if (_cm->verbose_medium()) { gclog_or_tty->print_cr("[%d] popped %d entries from the global stack", _task_id, n); + } for (int i = 0; i < n; ++i) { bool success = _task_queue->push(buffer[i]); // We only call this when the local queue is empty or under a @@ -3701,8 +3724,9 @@ } statsOnly( int tmp_size = _task_queue->size(); - if (tmp_size > _local_max_size) + if (tmp_size > _local_max_size) { _local_max_size = tmp_size; + } _local_pushes += n ); } @@ -3711,31 +3735,33 @@ } void CMTask::drain_local_queue(bool partially) { - if (has_aborted()) - return; + if (has_aborted()) return; // Decide what the target size is, depending whether we're going to // drain it partially (so that other tasks can steal if they run out // of things to do) or totally (at the very end). size_t target_size; - if (partially) + if (partially) { target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); - else + } else { target_size = 0; + } if (_task_queue->size() > target_size) { - if (_cm->verbose_high()) + if (_cm->verbose_high()) { gclog_or_tty->print_cr("[%d] draining local queue, target size = %d", _task_id, target_size); + } oop obj; bool ret = _task_queue->pop_local(obj); while (ret) { statsOnly( ++_local_pops ); - if (_cm->verbose_high()) + if (_cm->verbose_high()) { gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id, (void*) obj); + } assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); assert(!_g1h->is_on_master_free_list( @@ -3743,21 +3769,22 @@ scan_object(obj); - if (_task_queue->size() <= target_size || has_aborted()) + if (_task_queue->size() <= target_size || has_aborted()) { ret = false; - else + } else { ret = _task_queue->pop_local(obj); + } } - if (_cm->verbose_high()) + if (_cm->verbose_high()) { gclog_or_tty->print_cr("[%d] drained local queue, size = %d", _task_id, _task_queue->size()); + } } } void CMTask::drain_global_stack(bool partially) { - if (has_aborted()) - return; + if (has_aborted()) return; // We have a policy to drain the local queue before we attempt to // drain the global stack. @@ -3770,24 +3797,27 @@ // because another task might be doing the same, we might in fact // drop below the target. But, this is not a problem. size_t target_size; - if (partially) + if (partially) { target_size = _cm->partial_mark_stack_size_target(); - else + } else { target_size = 0; + } if (_cm->mark_stack_size() > target_size) { - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] draining global_stack, target size %d", _task_id, target_size); + } while (!has_aborted() && _cm->mark_stack_size() > target_size) { get_entries_from_global_stack(); drain_local_queue(partially); } - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] drained global stack, size = %d", _task_id, _cm->mark_stack_size()); + } } } @@ -3796,8 +3826,7 @@ // replicated. We should really get rid of the single-threaded version // of the code to simplify things. void CMTask::drain_satb_buffers() { - if (has_aborted()) - return; + if (has_aborted()) return; // We set this so that the regular clock knows that we're in the // middle of draining buffers and doesn't set the abort flag when it @@ -3807,26 +3836,29 @@ CMObjectClosure oc(this); SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); - if (G1CollectedHeap::use_parallel_gc_threads()) + if (G1CollectedHeap::use_parallel_gc_threads()) { satb_mq_set.set_par_closure(_task_id, &oc); - else + } else { satb_mq_set.set_closure(&oc); + } // This keeps claiming and applying the closure to completed buffers // until we run out of buffers or we need to abort. if (G1CollectedHeap::use_parallel_gc_threads()) { while (!has_aborted() && satb_mq_set.par_apply_closure_to_completed_buffer(_task_id)) { - if (_cm->verbose_medium()) + if (_cm->verbose_medium()) { gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id); + } statsOnly( ++_satb_buffers_processed ); regular_clock_call(); } } else { while (!has_aborted() && satb_mq_set.apply_closure_to_completed_buffer()) { - if (_cm->verbose_medium()) + if (_cm->verbose_medium()) { gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id); + } statsOnly( ++_satb_buffers_processed ); regular_clock_call(); } @@ -3834,10 +3866,11 @@ if (!concurrent() && !has_aborted()) { // We should only do this during remark. - if (G1CollectedHeap::use_parallel_gc_threads()) + if (G1CollectedHeap::use_parallel_gc_threads()) { satb_mq_set.par_iterate_closure_all_threads(_task_id); - else + } else { satb_mq_set.iterate_closure_all_threads(); + } } _draining_satb_buffers = false; @@ -3846,10 +3879,11 @@ concurrent() || satb_mq_set.completed_buffers_num() == 0, "invariant"); - if (G1CollectedHeap::use_parallel_gc_threads()) + if (G1CollectedHeap::use_parallel_gc_threads()) { satb_mq_set.set_par_closure(_task_id, NULL); - else + } else { satb_mq_set.set_closure(NULL); + } // again, this was a potentially expensive operation, decrease the // limits to get the regular clock call early @@ -3857,16 +3891,16 @@ } void CMTask::drain_region_stack(BitMapClosure* bc) { - if (has_aborted()) - return; + if (has_aborted()) return; assert(_region_finger == NULL, "it should be NULL when we're not scanning a region"); if (!_cm->region_stack_empty() || !_aborted_region.is_empty()) { - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] draining region stack, size = %d", _task_id, _cm->region_stack_size()); + } MemRegion mr; @@ -3874,9 +3908,11 @@ mr = _aborted_region; _aborted_region = MemRegion(); - if (_cm->verbose_low()) - gclog_or_tty->print_cr("[%d] scanning aborted region [ " PTR_FORMAT ", " PTR_FORMAT " )", - _task_id, mr.start(), mr.end()); + if (_cm->verbose_low()) { + gclog_or_tty->print_cr("[%d] scanning aborted region " + "[ " PTR_FORMAT ", " PTR_FORMAT " )", + _task_id, mr.start(), mr.end()); + } } else { mr = _cm->region_stack_pop_lock_free(); // it returns MemRegion() if the pop fails @@ -3884,10 +3920,11 @@ } while (mr.start() != NULL) { - if (_cm->verbose_medium()) + if (_cm->verbose_medium()) { gclog_or_tty->print_cr("[%d] we are scanning region " "["PTR_FORMAT", "PTR_FORMAT")", _task_id, mr.start(), mr.end()); + } assert(mr.end() <= _cm->finger(), "otherwise the region shouldn't be on the stack"); @@ -3898,9 +3935,9 @@ // We finished iterating over the region without aborting. regular_clock_call(); - if (has_aborted()) + if (has_aborted()) { mr = MemRegion(); - else { + } else { mr = _cm->region_stack_pop_lock_free(); // it returns MemRegion() if the pop fails statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); @@ -3946,9 +3983,10 @@ _region_finger = NULL; } - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] drained region stack, size = %d", _task_id, _cm->region_stack_size()); + } } } @@ -4149,17 +4187,18 @@ ++_calls; - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] >>>>>>>>>> START, call = %d, " "target = %1.2lfms >>>>>>>>>>", _task_id, _calls, _time_target_ms); + } // Set up the bitmap and oop closures. Anything that uses them is // eventually called from this method, so it is OK to allocate these // statically. CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); - CMOopClosure oop_closure(_g1h, _cm, this); - set_oop_closure(&oop_closure); + G1CMOopClosure cm_oop_closure(_g1h, _cm, this); + set_cm_oop_closure(&cm_oop_closure); if (_cm->has_overflown()) { // This can happen if the region stack or the mark stack overflows @@ -4209,11 +4248,12 @@ // fresh region, _finger points to start(). MemRegion mr = MemRegion(_finger, _region_limit); - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] we're scanning part " "["PTR_FORMAT", "PTR_FORMAT") " "of region "PTR_FORMAT, _task_id, _finger, _region_limit, _curr_region); + } // Let's iterate over the bitmap of the part of the // region that is left. @@ -4269,17 +4309,19 @@ assert(_curr_region == NULL, "invariant"); assert(_finger == NULL, "invariant"); assert(_region_limit == NULL, "invariant"); - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] trying to claim a new region", _task_id); + } HeapRegion* claimed_region = _cm->claim_region(_task_id); if (claimed_region != NULL) { // Yes, we managed to claim one statsOnly( ++_regions_claimed ); - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] we successfully claimed " "region "PTR_FORMAT, _task_id, claimed_region); + } setup_for_region(claimed_region); assert(_curr_region == claimed_region, "invariant"); @@ -4306,8 +4348,9 @@ assert(_cm->out_of_regions(), "at this point we should be out of regions"); - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] all regions claimed", _task_id); + } // Try to reduce the number of available SATB buffers so that // remark has less work to do. @@ -4331,17 +4374,19 @@ assert(_cm->out_of_regions() && _task_queue->size() == 0, "only way to reach here"); - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] starting to steal", _task_id); + } while (!has_aborted()) { oop obj; statsOnly( ++_steal_attempts ); if (_cm->try_stealing(_task_id, &_hash_seed, obj)) { - if (_cm->verbose_medium()) + if (_cm->verbose_medium()) { gclog_or_tty->print_cr("[%d] stolen "PTR_FORMAT" successfully", _task_id, (void*) obj); + } statsOnly( ++_steals ); @@ -4379,8 +4424,9 @@ assert(_cm->out_of_regions(), "only way to reach here"); assert(_task_queue->size() == 0, "only way to reach here"); - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] starting termination protocol", _task_id); + } _termination_start_time_ms = os::elapsedVTime() * 1000.0; // The CMTask class also extends the TerminatorTerminator class, @@ -4418,14 +4464,17 @@ guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); guarantee(!_cm->region_stack_overflow(), "only way to reach here"); - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] all tasks terminated", _task_id); + } } else { // Apparently there's more work to do. Let's abort this task. It // will restart it and we can hopefully find more things to do. - if (_cm->verbose_low()) - gclog_or_tty->print_cr("[%d] apparently there is more work to do", _task_id); + if (_cm->verbose_low()) { + gclog_or_tty->print_cr("[%d] apparently there is more work to do", + _task_id); + } set_has_aborted(); statsOnly( ++_aborted_termination ); @@ -4435,7 +4484,7 @@ // Mainly for debugging purposes to make sure that a pointer to the // closure which was statically allocated in this frame doesn't // escape it by accident. - set_oop_closure(NULL); + set_cm_oop_closure(NULL); double end_time_ms = os::elapsedVTime() * 1000.0; double elapsed_time_ms = end_time_ms - _start_time_ms; // Update the step history. @@ -4462,8 +4511,9 @@ // what they are doing and re-initialise in a safe manner. We // will achieve this with the use of two barrier sync points. - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] detected overflow", _task_id); + } _cm->enter_first_sync_barrier(_task_id); // When we exit this sync barrier we know that all tasks have @@ -4486,15 +4536,17 @@ gclog_or_tty->print_cr("[%d] <<<<<<<<<< ABORTING, target = %1.2lfms, " "elapsed = %1.2lfms <<<<<<<<<<", _task_id, _time_target_ms, elapsed_time_ms); - if (_cm->has_aborted()) + if (_cm->has_aborted()) { gclog_or_tty->print_cr("[%d] ========== MARKING ABORTED ==========", _task_id); + } } } else { - if (_cm->verbose_low()) + if (_cm->verbose_low()) { gclog_or_tty->print_cr("[%d] <<<<<<<<<< FINISHED, target = %1.2lfms, " "elapsed = %1.2lfms <<<<<<<<<<", _task_id, _time_target_ms, elapsed_time_ms); + } } _claimed = false; @@ -4510,7 +4562,7 @@ _nextMarkBitMap(NULL), _hash_seed(17), _task_queue(task_queue), _task_queues(task_queues), - _oop_closure(NULL), + _cm_oop_closure(NULL), _aborted_region(MemRegion()) { guarantee(task_queue != NULL, "invariant"); guarantee(task_queues != NULL, "invariant"); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/concurrentMark.hpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -131,22 +131,22 @@ void mark(HeapWord* addr) { assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), "outside underlying space?"); - _bm.at_put(heapWordToOffset(addr), true); + _bm.set_bit(heapWordToOffset(addr)); } void clear(HeapWord* addr) { assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), "outside underlying space?"); - _bm.at_put(heapWordToOffset(addr), false); + _bm.clear_bit(heapWordToOffset(addr)); } bool parMark(HeapWord* addr) { assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), "outside underlying space?"); - return _bm.par_at_put(heapWordToOffset(addr), true); + return _bm.par_set_bit(heapWordToOffset(addr)); } bool parClear(HeapWord* addr) { assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), "outside underlying space?"); - return _bm.par_at_put(heapWordToOffset(addr), false); + return _bm.par_clear_bit(heapWordToOffset(addr)); } void markRange(MemRegion mr); void clearAll(); @@ -605,10 +605,10 @@ void mark_stack_pop(oop* arr, int max, int* n) { _markStack.par_pop_arr(arr, max, n); } - size_t mark_stack_size() { return _markStack.size(); } + size_t mark_stack_size() { return _markStack.size(); } size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; } - bool mark_stack_overflow() { return _markStack.overflow(); } - bool mark_stack_empty() { return _markStack.isEmpty(); } + bool mark_stack_overflow() { return _markStack.overflow(); } + bool mark_stack_empty() { return _markStack.isEmpty(); } // (Lock-free) Manipulation of the region stack bool region_stack_push_lock_free(MemRegion mr) { @@ -736,12 +736,14 @@ // will dump the contents of its reference fields, as well as // liveness information for the object and its referents. The dump // will be written to a file with the following name: - // G1PrintReachableBaseFile + "." + str. use_prev_marking decides - // whether the prev (use_prev_marking == true) or next - // (use_prev_marking == false) marking information will be used to - // determine the liveness of each object / referent. If all is true, - // all objects in the heap will be dumped, otherwise only the live - // ones. In the dump the following symbols / abbreviations are used: + // G1PrintReachableBaseFile + "." + str. + // vo decides whether the prev (vo == UsePrevMarking), the next + // (vo == UseNextMarking) marking information, or the mark word + // (vo == UseMarkWord) will be used to determine the liveness of + // each object / referent. + // If all is true, all objects in the heap will be dumped, otherwise + // only the live ones. In the dump the following symbols / breviations + // are used: // M : an explicitly live object (its bitmap bit is set) // > : an implicitly live object (over tams) // O : an object outside the G1 heap (typically: in the perm gen) @@ -749,7 +751,7 @@ // AND MARKED : indicates that an object is both explicitly and // implicitly live (it should be one or the other, not both) void print_reachable(const char* str, - bool use_prev_marking, bool all) PRODUCT_RETURN; + VerifyOption vo, bool all) PRODUCT_RETURN; // Clear the next marking bitmap (will be called concurrently). void clearNextBitmap(); @@ -831,8 +833,9 @@ // _min_finger then we need to gray objects. // This routine is like registerCSetRegion but for an entire // collection of regions. - if (max_finger > _min_finger) + if (max_finger > _min_finger) { _should_gray_objects = true; + } } // Returns "true" if at least one mark has been completed. @@ -878,14 +881,18 @@ // The following indicate whether a given verbose level has been // set. Notice that anything above stats is conditional to // _MARKING_VERBOSE_ having been set to 1 - bool verbose_stats() - { return _verbose_level >= stats_verbose; } - bool verbose_low() - { return _MARKING_VERBOSE_ && _verbose_level >= low_verbose; } - bool verbose_medium() - { return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose; } - bool verbose_high() - { return _MARKING_VERBOSE_ && _verbose_level >= high_verbose; } + bool verbose_stats() { + return _verbose_level >= stats_verbose; + } + bool verbose_low() { + return _MARKING_VERBOSE_ && _verbose_level >= low_verbose; + } + bool verbose_medium() { + return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose; + } + bool verbose_high() { + return _MARKING_VERBOSE_ && _verbose_level >= high_verbose; + } }; // A class representing a marking task. @@ -928,7 +935,7 @@ double _start_time_ms; // the oop closure used for iterations over oops - OopClosure* _oop_closure; + G1CMOopClosure* _cm_oop_closure; // the region this task is scanning, NULL if we're not scanning any HeapRegion* _curr_region; @@ -1061,8 +1068,9 @@ // respective limit and calls reached_limit() if they have void check_limits() { if (_words_scanned >= _words_scanned_limit || - _refs_reached >= _refs_reached_limit) + _refs_reached >= _refs_reached_limit) { reached_limit(); + } } // this is supposed to be called regularly during a marking step as // it checks a bunch of conditions that might cause the marking step @@ -1122,32 +1130,17 @@ // Clears any recorded partially scanned region void clear_aborted_region() { set_aborted_region(MemRegion()); } - void set_oop_closure(OopClosure* oop_closure) { - _oop_closure = oop_closure; - } + void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure); // It grays the object by marking it and, if necessary, pushing it // on the local queue - void deal_with_reference(oop obj); + inline void deal_with_reference(oop obj); // It scans an object and visits its children. - void scan_object(oop obj) { - assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); - - if (_cm->verbose_high()) - gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT, - _task_id, (void*) obj); - - size_t obj_size = obj->size(); - _words_scanned += obj_size; - - obj->oop_iterate(_oop_closure); - statsOnly( ++_objs_scanned ); - check_limits(); - } + void scan_object(oop obj); // It pushes an object on the local queue. - void push(oop obj); + inline void push(oop obj); // These two move entries to/from the global stack. void move_entries_to_global_stack(); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP + +#include "gc_implementation/g1/concurrentMark.hpp" +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" + +inline void CMTask::push(oop obj) { + HeapWord* objAddr = (HeapWord*) obj; + assert(_g1h->is_in_g1_reserved(objAddr), "invariant"); + assert(!_g1h->is_on_master_free_list( + _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant"); + assert(!_g1h->is_obj_ill(obj), "invariant"); + assert(_nextMarkBitMap->isMarked(objAddr), "invariant"); + + if (_cm->verbose_high()) { + gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj); + } + + if (!_task_queue->push(obj)) { + // The local task queue looks full. We need to push some entries + // to the global stack. + + if (_cm->verbose_medium()) { + gclog_or_tty->print_cr("[%d] task queue overflow, " + "moving entries to the global stack", + _task_id); + } + move_entries_to_global_stack(); + + // this should succeed since, even if we overflow the global + // stack, we should have definitely removed some entries from the + // local queue. So, there must be space on it. + bool success = _task_queue->push(obj); + assert(success, "invariant"); + } + + statsOnly( int tmp_size = _task_queue->size(); + if (tmp_size > _local_max_size) { + _local_max_size = tmp_size; + } + ++_local_pushes ); +} + +// This determines whether the method below will check both the local +// and global fingers when determining whether to push on the stack a +// gray object (value 1) or whether it will only check the global one +// (value 0). The tradeoffs are that the former will be a bit more +// accurate and possibly push less on the stack, but it might also be +// a little bit slower. + +#define _CHECK_BOTH_FINGERS_ 1 + +inline void CMTask::deal_with_reference(oop obj) { + if (_cm->verbose_high()) { + gclog_or_tty->print_cr("[%d] we're dealing with reference = "PTR_FORMAT, + _task_id, (void*) obj); + } + + ++_refs_reached; + + HeapWord* objAddr = (HeapWord*) obj; + assert(obj->is_oop_or_null(true /* ignore mark word */), "Error"); + if (_g1h->is_in_g1_reserved(objAddr)) { + assert(obj != NULL, "null check is implicit"); + if (!_nextMarkBitMap->isMarked(objAddr)) { + // Only get the containing region if the object is not marked on the + // bitmap (otherwise, it's a waste of time since we won't do + // anything with it). + HeapRegion* hr = _g1h->heap_region_containing_raw(obj); + if (!hr->obj_allocated_since_next_marking(obj)) { + if (_cm->verbose_high()) { + gclog_or_tty->print_cr("[%d] "PTR_FORMAT" is not considered marked", + _task_id, (void*) obj); + } + + // we need to mark it first + if (_nextMarkBitMap->parMark(objAddr)) { + // No OrderAccess:store_load() is needed. It is implicit in the + // CAS done in parMark(objAddr) above + HeapWord* global_finger = _cm->finger(); + +#if _CHECK_BOTH_FINGERS_ + // we will check both the local and global fingers + + if (_finger != NULL && objAddr < _finger) { + if (_cm->verbose_high()) { + gclog_or_tty->print_cr("[%d] below the local finger ("PTR_FORMAT"), " + "pushing it", _task_id, _finger); + } + push(obj); + } else if (_curr_region != NULL && objAddr < _region_limit) { + // do nothing + } else if (objAddr < global_finger) { + // Notice that the global finger might be moving forward + // concurrently. This is not a problem. In the worst case, we + // mark the object while it is above the global finger and, by + // the time we read the global finger, it has moved forward + // passed this object. In this case, the object will probably + // be visited when a task is scanning the region and will also + // be pushed on the stack. So, some duplicate work, but no + // correctness problems. + + if (_cm->verbose_high()) { + gclog_or_tty->print_cr("[%d] below the global finger " + "("PTR_FORMAT"), pushing it", + _task_id, global_finger); + } + push(obj); + } else { + // do nothing + } +#else // _CHECK_BOTH_FINGERS_ + // we will only check the global finger + + if (objAddr < global_finger) { + // see long comment above + + if (_cm->verbose_high()) { + gclog_or_tty->print_cr("[%d] below the global finger " + "("PTR_FORMAT"), pushing it", + _task_id, global_finger); + } + push(obj); + } +#endif // _CHECK_BOTH_FINGERS_ + } + } + } + } +} + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -578,16 +578,10 @@ } if (res == NULL && do_expand) { if (expand(word_size * HeapWordSize)) { - // The expansion succeeded and so we should have at least one - // region on the free list. - res = _free_list.remove_head(); - } - } - if (res != NULL) { - if (G1PrintHeapRegions) { - gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT","PTR_FORMAT"], " - "top "PTR_FORMAT, res->hrs_index(), - res->bottom(), res->end(), res->top()); + // Even though the heap was expanded, it might not have reached + // the desired size. So, we cannot assume that the allocation + // will succeed. + res = _free_list.remove_head_or_null(); } } return res; @@ -598,22 +592,27 @@ HeapRegion* alloc_region = NULL; if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { alloc_region = new_region(word_size, true /* do_expand */); - if (purpose == GCAllocForSurvived && alloc_region != NULL) { - alloc_region->set_survivor(); + if (alloc_region != NULL) { + if (purpose == GCAllocForSurvived) { + _hr_printer.alloc(alloc_region, G1HRPrinter::Survivor); + alloc_region->set_survivor(); + } else { + _hr_printer.alloc(alloc_region, G1HRPrinter::Old); + } + ++_gc_alloc_region_counts[purpose]; } - ++_gc_alloc_region_counts[purpose]; } else { g1_policy()->note_alloc_region_limit_reached(purpose); } return alloc_region; } -int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, - size_t word_size) { +size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, + size_t word_size) { assert(isHumongous(word_size), "word_size should be humongous"); assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); - int first = -1; + size_t first = G1_NULL_HRS_INDEX; if (num_regions == 1) { // Only one region to allocate, no need to go through the slower // path. The caller will attempt the expasion if this fails, so @@ -622,7 +621,7 @@ if (hr != NULL) { first = hr->hrs_index(); } else { - first = -1; + first = G1_NULL_HRS_INDEX; } } else { // We can't allocate humongous regions while cleanupComplete() is @@ -637,10 +636,10 @@ append_secondary_free_list_if_not_empty_with_lock(); if (free_regions() >= num_regions) { - first = _hrs->find_contiguous(num_regions); - if (first != -1) { - for (int i = first; i < first + (int) num_regions; ++i) { - HeapRegion* hr = _hrs->at(i); + first = _hrs.find_contiguous(num_regions); + if (first != G1_NULL_HRS_INDEX) { + for (size_t i = first; i < first + num_regions; ++i) { + HeapRegion* hr = region_at(i); assert(hr->is_empty(), "sanity"); assert(is_on_master_free_list(hr), "sanity"); hr->set_pending_removal(true); @@ -653,15 +652,15 @@ } HeapWord* -G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first, +G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first, size_t num_regions, size_t word_size) { - assert(first != -1, "pre-condition"); + assert(first != G1_NULL_HRS_INDEX, "pre-condition"); assert(isHumongous(word_size), "word_size should be humongous"); assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); // Index of last region in the series + 1. - int last = first + (int) num_regions; + size_t last = first + num_regions; // We need to initialize the region(s) we just discovered. This is // a bit tricky given that it can happen concurrently with @@ -676,7 +675,7 @@ assert(word_size <= word_size_sum, "sanity"); // This will be the "starts humongous" region. - HeapRegion* first_hr = _hrs->at(first); + HeapRegion* first_hr = region_at(first); // The header of the new object will be placed at the bottom of // the first region. HeapWord* new_obj = first_hr->bottom(); @@ -711,8 +710,8 @@ // Then, if there are any, we will set up the "continues // humongous" regions. HeapRegion* hr = NULL; - for (int i = first + 1; i < last; ++i) { - hr = _hrs->at(i); + for (size_t i = first + 1; i < last; ++i) { + hr = region_at(i); hr->set_continuesHumongous(first_hr); } // If we have "continues humongous" regions (hr != NULL), then the @@ -733,6 +732,17 @@ assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), "new_top should be in this region"); first_hr->set_top(new_top); + if (_hr_printer.is_active()) { + HeapWord* bottom = first_hr->bottom(); + HeapWord* end = first_hr->orig_end(); + if ((first + 1) == last) { + // the series has a single humongous region + _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top); + } else { + // the series has more than one humongous regions + _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end); + } + } // Now, we will update the top fields of the "continues humongous" // regions. The reason we need to do this is that, otherwise, @@ -746,17 +756,19 @@ // last one) is actually used when we will free up the humongous // region in free_humongous_region(). hr = NULL; - for (int i = first + 1; i < last; ++i) { - hr = _hrs->at(i); + for (size_t i = first + 1; i < last; ++i) { + hr = region_at(i); if ((i + 1) == last) { // last continues humongous region assert(hr->bottom() < new_top && new_top <= hr->end(), "new_top should fall on this region"); hr->set_top(new_top); + _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top); } else { // not last one assert(new_top > hr->end(), "new_top should be above this region"); hr->set_top(hr->end()); + _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end()); } } // If we have continues humongous regions (hr != NULL), then the @@ -783,9 +795,9 @@ size_t num_regions = round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; size_t x_size = expansion_regions(); - size_t fs = _hrs->free_suffix(); - int first = humongous_obj_allocate_find_first(num_regions, word_size); - if (first == -1) { + size_t fs = _hrs.free_suffix(); + size_t first = humongous_obj_allocate_find_first(num_regions, word_size); + if (first == G1_NULL_HRS_INDEX) { // The only thing we can do now is attempt expansion. if (fs + x_size >= num_regions) { // If the number of regions we're trying to allocate for this @@ -799,16 +811,16 @@ assert(num_regions > fs, "earlier allocation should have succeeded"); if (expand((num_regions - fs) * HeapRegion::GrainBytes)) { + // Even though the heap was expanded, it might not have + // reached the desired size. So, we cannot assume that the + // allocation will succeed. first = humongous_obj_allocate_find_first(num_regions, word_size); - // If the expansion was successful then the allocation - // should have been successful. - assert(first != -1, "this should have worked"); } } } HeapWord* result = NULL; - if (first != -1) { + if (first != G1_NULL_HRS_INDEX) { result = humongous_obj_allocate_initialize_regions(first, num_regions, word_size); assert(result != NULL, "it should always return a valid result"); @@ -829,12 +841,8 @@ HeapWord* G1CollectedHeap::mem_allocate(size_t word_size, - bool is_noref, - bool is_tlab, bool* gc_overhead_limit_was_exceeded) { assert_heap_not_locked_and_not_at_safepoint(); - assert(!is_tlab, "mem_allocate() this should not be called directly " - "to allocate TLABs"); // Loop until the allocation is satisified, or unsatisfied after GC. for (int try_count = 1; /* we'll return */; try_count += 1) { @@ -1158,6 +1166,35 @@ } }; +class PostCompactionPrinterClosure: public HeapRegionClosure { +private: + G1HRPrinter* _hr_printer; +public: + bool doHeapRegion(HeapRegion* hr) { + assert(!hr->is_young(), "not expecting to find young regions"); + // We only generate output for non-empty regions. + if (!hr->is_empty()) { + if (!hr->isHumongous()) { + _hr_printer->post_compaction(hr, G1HRPrinter::Old); + } else if (hr->startsHumongous()) { + if (hr->capacity() == (size_t) HeapRegion::GrainBytes) { + // single humongous region + _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous); + } else { + _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous); + } + } else { + assert(hr->continuesHumongous(), "only way to get here"); + _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous); + } + } + return false; + } + + PostCompactionPrinterClosure(G1HRPrinter* hr_printer) + : _hr_printer(hr_printer) { } +}; + bool G1CollectedHeap::do_collection(bool explicit_gc, bool clear_all_soft_refs, size_t word_size) { @@ -1211,7 +1248,10 @@ HandleMark hm; // Discard invalid handles created during verification gclog_or_tty->print(" VerifyBeforeGC:"); prepare_for_verify(); - Universe::verify(true); + Universe::verify(/* allow dirty */ true, + /* silent */ false, + /* option */ VerifyOption_G1UsePrevMarking); + } COMPILER2_PRESENT(DerivedPointerTable::clear()); @@ -1236,6 +1276,11 @@ g1_rem_set()->cleanupHRRS(); tear_down_region_lists(); + // We should call this after we retire any currently active alloc + // regions so that all the ALLOC / RETIRE events are generated + // before the start GC event. + _hr_printer.start_gc(true /* full */, (size_t) total_collections()); + // We may have added regions to the current incremental collection // set between the last GC or pause and now. We need to clear the // incremental collection set and then start rebuilding it afresh @@ -1263,7 +1308,6 @@ ref_processor()->enable_discovery(); ref_processor()->setup_policy(do_clear_all_soft_refs); - // Do collection work { HandleMark hm; // Discard invalid handles created during gc @@ -1284,7 +1328,10 @@ HandleMark hm; // Discard invalid handles created during verification gclog_or_tty->print(" VerifyAfterGC:"); prepare_for_verify(); - Universe::verify(false); + Universe::verify(/* allow dirty */ false, + /* silent */ false, + /* option */ VerifyOption_G1UsePrevMarking); + } NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); @@ -1298,6 +1345,17 @@ // Resize the heap if necessary. resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); + if (_hr_printer.is_active()) { + // We should do this after we potentially resize the heap so + // that all the COMMIT / UNCOMMIT events are generated before + // the end GC event. + + PostCompactionPrinterClosure cl(hr_printer()); + heap_region_iterate(&cl); + + _hr_printer.end_gc(true /* full */, (size_t) total_collections()); + } + if (_cg1r->use_cache()) { _cg1r->clear_and_record_card_counts(); _cg1r->clear_hot_cache(); @@ -1366,6 +1424,7 @@ // Update the number of full collections that have been completed. increment_full_collections_completed(false /* concurrent */); + _hrs.verify_optional(); verify_region_sets_optional(); if (PrintHeapAtGC) { @@ -1589,6 +1648,7 @@ size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); if (expand(expand_bytes)) { + _hrs.verify_optional(); verify_region_sets_optional(); return attempt_allocation_at_safepoint(word_size, false /* expect_null_mutator_alloc_region */); @@ -1596,6 +1656,19 @@ return NULL; } +void G1CollectedHeap::update_committed_space(HeapWord* old_end, + HeapWord* new_end) { + assert(old_end != new_end, "don't call this otherwise"); + assert((HeapWord*) _g1_storage.high() == new_end, "invariant"); + + // Update the committed mem region. + _g1_committed.set_end(new_end); + // Tell the card table about the update. + Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); + // Tell the BOT about the update. + _bot_shared->resize(_g1_committed.word_size()); +} + bool G1CollectedHeap::expand(size_t expand_bytes) { size_t old_mem_size = _g1_storage.committed_size(); size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); @@ -1607,47 +1680,47 @@ old_mem_size/K, aligned_expand_bytes/K); } - HeapWord* old_end = (HeapWord*)_g1_storage.high(); + // First commit the memory. + HeapWord* old_end = (HeapWord*) _g1_storage.high(); bool successful = _g1_storage.expand_by(aligned_expand_bytes); if (successful) { - HeapWord* new_end = (HeapWord*)_g1_storage.high(); - - // Expand the committed region. - _g1_committed.set_end(new_end); - - // Tell the cardtable about the expansion. - Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); - - // And the offset table as well. - _bot_shared->resize(_g1_committed.word_size()); - - expand_bytes = aligned_expand_bytes; - HeapWord* base = old_end; - - // Create the heap regions for [old_end, new_end) - while (expand_bytes > 0) { - HeapWord* high = base + HeapRegion::GrainWords; - - // Create a new HeapRegion. - MemRegion mr(base, high); - bool is_zeroed = !_g1_max_committed.contains(base); - HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); - - // Add it to the HeapRegionSeq. - _hrs->insert(hr); - _free_list.add_as_tail(hr); - - // And we used up an expansion region to create it. - _expansion_regions--; - - expand_bytes -= HeapRegion::GrainBytes; - base += HeapRegion::GrainWords; + // Then propagate this update to the necessary data structures. + HeapWord* new_end = (HeapWord*) _g1_storage.high(); + update_committed_space(old_end, new_end); + + FreeRegionList expansion_list("Local Expansion List"); + MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list); + assert(mr.start() == old_end, "post-condition"); + // mr might be a smaller region than what was requested if + // expand_by() was unable to allocate the HeapRegion instances + assert(mr.end() <= new_end, "post-condition"); + + size_t actual_expand_bytes = mr.byte_size(); + assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition"); + assert(actual_expand_bytes == expansion_list.total_capacity_bytes(), + "post-condition"); + if (actual_expand_bytes < aligned_expand_bytes) { + // We could not expand _hrs to the desired size. In this case we + // need to shrink the committed space accordingly. + assert(mr.end() < new_end, "invariant"); + + size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes; + // First uncommit the memory. + _g1_storage.shrink_by(diff_bytes); + // Then propagate this update to the necessary data structures. + update_committed_space(new_end, mr.end()); } - assert(base == new_end, "sanity"); - - // Now update max_committed if necessary. - _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), new_end)); - + _free_list.add_as_tail(&expansion_list); + + if (_hr_printer.is_active()) { + HeapWord* curr = mr.start(); + while (curr < mr.end()) { + HeapWord* curr_end = curr + HeapRegion::GrainWords; + _hr_printer.commit(curr, curr_end); + curr = curr_end; + } + assert(curr == mr.end(), "post-condition"); + } } else { // The expansion of the virtual storage space was unsuccessful. // Let's see if it was because we ran out of swap. @@ -1667,37 +1740,41 @@ return successful; } -void G1CollectedHeap::shrink_helper(size_t shrink_bytes) -{ +void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { size_t old_mem_size = _g1_storage.committed_size(); size_t aligned_shrink_bytes = ReservedSpace::page_align_size_down(shrink_bytes); aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, HeapRegion::GrainBytes); size_t num_regions_deleted = 0; - MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); - - assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); - if (mr.byte_size() > 0) + MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted); + HeapWord* old_end = (HeapWord*) _g1_storage.high(); + assert(mr.end() == old_end, "post-condition"); + if (mr.byte_size() > 0) { + if (_hr_printer.is_active()) { + HeapWord* curr = mr.end(); + while (curr > mr.start()) { + HeapWord* curr_end = curr; + curr -= HeapRegion::GrainWords; + _hr_printer.uncommit(curr, curr_end); + } + assert(curr == mr.start(), "post-condition"); + } + _g1_storage.shrink_by(mr.byte_size()); - assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); - - _g1_committed.set_end(mr.start()); - _expansion_regions += num_regions_deleted; - - // Tell the cardtable about it. - Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); - - // And the offset table as well. - _bot_shared->resize(_g1_committed.word_size()); - - HeapRegionRemSet::shrink_heap(n_regions()); - - if (Verbose && PrintGC) { - size_t new_mem_size = _g1_storage.committed_size(); - gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", - old_mem_size/K, aligned_shrink_bytes/K, - new_mem_size/K); + HeapWord* new_end = (HeapWord*) _g1_storage.high(); + assert(mr.start() == new_end, "post-condition"); + + _expansion_regions += num_regions_deleted; + update_committed_space(old_end, new_end); + HeapRegionRemSet::shrink_heap(n_regions()); + + if (Verbose && PrintGC) { + size_t new_mem_size = _g1_storage.committed_size(); + gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", + old_mem_size/K, aligned_shrink_bytes/K, + new_mem_size/K); + } } } @@ -1712,6 +1789,7 @@ shrink_helper(shrink_bytes); rebuild_region_lists(); + _hrs.verify_optional(); verify_region_sets_optional(); } @@ -1799,6 +1877,10 @@ MutexLocker x(Heap_lock); + // We have to initialize the printer before committing the heap, as + // it will be used then. + _hr_printer.set_active(G1PrintHeapRegions); + // While there are no constraints in the GC code that HeapWordSize // be any particular value, there are multiple other areas in the // system which believe this to be true (e.g. oop->object_size in some @@ -1890,9 +1972,9 @@ _g1_storage.initialize(g1_rs, 0); _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); - _g1_max_committed = _g1_committed; - _hrs = new HeapRegionSeq(_expansion_regions); - guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); + _hrs.initialize((HeapWord*) _g1_reserved.start(), + (HeapWord*) _g1_reserved.end(), + _expansion_regions); // 6843694 - ensure that the maximum region index can fit // in the remembered set structures. @@ -1991,8 +2073,9 @@ // Here we allocate the dummy full region that is required by the // G1AllocRegion class. If we don't pass an address in the reserved // space here, lots of asserts fire. - MemRegion mr(_g1_reserved.start(), HeapRegion::GrainWords); - HeapRegion* dummy_region = new HeapRegion(_bot_shared, mr, true); + + HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */, + _g1_reserved.start()); // We'll re-use the same region whether the alloc region will // require BOT updates or not and, if it doesn't, then a non-young // region will complain that it cannot support allocations without @@ -2100,7 +2183,7 @@ size_t G1CollectedHeap::recalculate_used() const { SumUsedClosure blk; - _hrs->iterate(&blk); + heap_region_iterate(&blk); return blk.result(); } @@ -2120,7 +2203,7 @@ size_t G1CollectedHeap::recalculate_used_regions() const { SumUsedRegionsClosure blk; - _hrs->iterate(&blk); + heap_region_iterate(&blk); return blk.result(); } #endif // PRODUCT @@ -2285,8 +2368,8 @@ } bool G1CollectedHeap::is_in(const void* p) const { - if (_g1_committed.contains(p)) { - HeapRegion* hr = _hrs->addr_to_region(p); + HeapRegion* hr = _hrs.addr_to_region((HeapWord*) p); + if (hr != NULL) { return hr->is_in(p); } else { return _perm_gen->as_gen()->is_in(p); @@ -2314,7 +2397,7 @@ void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { IterateOopClosureRegionClosure blk(_g1_committed, cl); - _hrs->iterate(&blk); + heap_region_iterate(&blk); if (do_perm) { perm_gen()->oop_iterate(cl); } @@ -2322,7 +2405,7 @@ void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { IterateOopClosureRegionClosure blk(mr, cl); - _hrs->iterate(&blk); + heap_region_iterate(&blk); if (do_perm) { perm_gen()->oop_iterate(cl); } @@ -2344,7 +2427,7 @@ void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { IterateObjectClosureRegionClosure blk(cl); - _hrs->iterate(&blk); + heap_region_iterate(&blk); if (do_perm) { perm_gen()->object_iterate(cl); } @@ -2369,24 +2452,17 @@ void G1CollectedHeap::space_iterate(SpaceClosure* cl) { SpaceClosureRegionClosure blk(cl); - _hrs->iterate(&blk); -} - -void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { - _hrs->iterate(cl); + heap_region_iterate(&blk); +} + +void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const { + _hrs.iterate(cl); } void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, - HeapRegionClosure* cl) { - _hrs->iterate_from(r, cl); -} - -void -G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { - _hrs->iterate_from(idx, cl); -} - -HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } + HeapRegionClosure* cl) const { + _hrs.iterate_from(r, cl); +} void G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, @@ -2568,7 +2644,7 @@ } CompactibleSpace* G1CollectedHeap::first_compactible_space() { - return _hrs->length() > 0 ? _hrs->at(0) : NULL; + return n_regions() > 0 ? region_at(0) : NULL; } @@ -2623,11 +2699,6 @@ } } -size_t G1CollectedHeap::large_typearray_limit() { - // FIXME - return HeapRegion::GrainBytes/HeapWordSize; -} - size_t G1CollectedHeap::max_capacity() const { return _g1_reserved.byte_size(); } @@ -2645,17 +2716,18 @@ } class VerifyLivenessOopClosure: public OopClosure { - G1CollectedHeap* g1h; + G1CollectedHeap* _g1h; + VerifyOption _vo; public: - VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { - g1h = _g1h; - } + VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo): + _g1h(g1h), _vo(vo) + { } void do_oop(narrowOop *p) { do_oop_work(p); } void do_oop( oop *p) { do_oop_work(p); } template void do_oop_work(T *p) { oop obj = oopDesc::load_decode_heap_oop(p); - guarantee(obj == NULL || !g1h->is_obj_dead(obj), + guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo), "Dead object referenced by a not dead object"); } }; @@ -2665,18 +2737,30 @@ G1CollectedHeap* _g1h; size_t _live_bytes; HeapRegion *_hr; - bool _use_prev_marking; + VerifyOption _vo; public: - // use_prev_marking == true -> use "prev" marking information, - // use_prev_marking == false -> use "next" marking information - VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking) - : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) { + // _vo == UsePrevMarking -> use "prev" marking information, + // _vo == UseNextMarking -> use "next" marking information, + // _vo == UseMarkWord -> use mark word from object header. + VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo) + : _live_bytes(0), _hr(hr), _vo(vo) { _g1h = G1CollectedHeap::heap(); } void do_object(oop o) { - VerifyLivenessOopClosure isLive(_g1h); + VerifyLivenessOopClosure isLive(_g1h, _vo); assert(o != NULL, "Huh?"); - if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { + if (!_g1h->is_obj_dead_cond(o, _vo)) { + // If the object is alive according to the mark word, + // then verify that the marking information agrees. + // Note we can't verify the contra-positive of the + // above: if the object is dead (according to the mark + // word), it may not be marked, or may have been marked + // but has since became dead, or may have been allocated + // since the last marking. + if (_vo == VerifyOption_G1UseMarkWord) { + guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch"); + } + o->oop_iterate(&isLive); if (!_hr->obj_allocated_since_prev_marking(o)) { size_t obj_size = o->size(); // Make sure we don't overflow @@ -2718,17 +2802,18 @@ class VerifyRegionClosure: public HeapRegionClosure { private: - bool _allow_dirty; - bool _par; - bool _use_prev_marking; - bool _failures; + bool _allow_dirty; + bool _par; + VerifyOption _vo; + bool _failures; public: - // use_prev_marking == true -> use "prev" marking information, - // use_prev_marking == false -> use "next" marking information - VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking) + // _vo == UsePrevMarking -> use "prev" marking information, + // _vo == UseNextMarking -> use "next" marking information, + // _vo == UseMarkWord -> use mark word from object header. + VerifyRegionClosure(bool allow_dirty, bool par, VerifyOption vo) : _allow_dirty(allow_dirty), _par(par), - _use_prev_marking(use_prev_marking), + _vo(vo), _failures(false) {} bool failures() { @@ -2740,11 +2825,11 @@ "Should be unclaimed at verify points."); if (!r->continuesHumongous()) { bool failures = false; - r->verify(_allow_dirty, _use_prev_marking, &failures); + r->verify(_allow_dirty, _vo, &failures); if (failures) { _failures = true; } else { - VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking); + VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo); r->object_iterate(¬_dead_yet_cl); if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " @@ -2764,14 +2849,15 @@ class VerifyRootsClosure: public OopsInGenClosure { private: G1CollectedHeap* _g1h; - bool _use_prev_marking; + VerifyOption _vo; bool _failures; public: - // use_prev_marking == true -> use "prev" marking information, - // use_prev_marking == false -> use "next" marking information - VerifyRootsClosure(bool use_prev_marking) : + // _vo == UsePrevMarking -> use "prev" marking information, + // _vo == UseNextMarking -> use "next" marking information, + // _vo == UseMarkWord -> use mark word from object header. + VerifyRootsClosure(VerifyOption vo) : _g1h(G1CollectedHeap::heap()), - _use_prev_marking(use_prev_marking), + _vo(vo), _failures(false) { } bool failures() { return _failures; } @@ -2780,9 +2866,12 @@ T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { + if (_g1h->is_obj_dead_cond(obj, _vo)) { gclog_or_tty->print_cr("Root location "PTR_FORMAT" " "points to dead obj "PTR_FORMAT, p, (void*) obj); + if (_vo == VerifyOption_G1UseMarkWord) { + gclog_or_tty->print_cr(" Mark word: "PTR_FORMAT, (void*)(obj->mark())); + } obj->print_on(gclog_or_tty); _failures = true; } @@ -2798,19 +2887,19 @@ class G1ParVerifyTask: public AbstractGangTask { private: G1CollectedHeap* _g1h; - bool _allow_dirty; - bool _use_prev_marking; - bool _failures; + bool _allow_dirty; + VerifyOption _vo; + bool _failures; public: - // use_prev_marking == true -> use "prev" marking information, - // use_prev_marking == false -> use "next" marking information - G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, - bool use_prev_marking) : + // _vo == UsePrevMarking -> use "prev" marking information, + // _vo == UseNextMarking -> use "next" marking information, + // _vo == UseMarkWord -> use mark word from object header. + G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, VerifyOption vo) : AbstractGangTask("Parallel verify task"), _g1h(g1h), _allow_dirty(allow_dirty), - _use_prev_marking(use_prev_marking), + _vo(vo), _failures(false) { } bool failures() { @@ -2819,7 +2908,7 @@ void work(int worker_i) { HandleMark hm; - VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking); + VerifyRegionClosure blk(_allow_dirty, true, _vo); _g1h->heap_region_par_iterate_chunked(&blk, worker_i, HeapRegion::ParVerifyClaimValue); if (blk.failures()) { @@ -2829,19 +2918,21 @@ }; void G1CollectedHeap::verify(bool allow_dirty, bool silent) { - verify(allow_dirty, silent, /* use_prev_marking */ true); + verify(allow_dirty, silent, VerifyOption_G1UsePrevMarking); } void G1CollectedHeap::verify(bool allow_dirty, bool silent, - bool use_prev_marking) { + VerifyOption vo) { if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); } - VerifyRootsClosure rootsCl(use_prev_marking); + VerifyRootsClosure rootsCl(vo); CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); + // We apply the relevant closures to all the oops in the // system dictionary, the string table and the code cache. const int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; + process_strong_roots(true, // activate StrongRootsScope true, // we set "collecting perm gen" to true, // so we don't reset the dirty cards in the perm gen. @@ -2849,21 +2940,37 @@ &rootsCl, &blobsCl, &rootsCl); - // Since we used "collecting_perm_gen" == true above, we will not have - // checked the refs from perm into the G1-collected heap. We check those - // references explicitly below. Whether the relevant cards are dirty - // is checked further below in the rem set verification. - if (!silent) { gclog_or_tty->print("Permgen roots "); } - perm_gen()->oop_iterate(&rootsCl); + + // If we're verifying after the marking phase of a Full GC then we can't + // treat the perm gen as roots into the G1 heap. Some of the objects in + // the perm gen may be dead and hence not marked. If one of these dead + // objects is considered to be a root then we may end up with a false + // "Root location points to dead ob " failure. + if (vo != VerifyOption_G1UseMarkWord) { + // Since we used "collecting_perm_gen" == true above, we will not have + // checked the refs from perm into the G1-collected heap. We check those + // references explicitly below. Whether the relevant cards are dirty + // is checked further below in the rem set verification. + if (!silent) { gclog_or_tty->print("Permgen roots "); } + perm_gen()->oop_iterate(&rootsCl); + } bool failures = rootsCl.failures(); - if (!silent) { gclog_or_tty->print("HeapRegionSets "); } - verify_region_sets(); + + if (vo != VerifyOption_G1UseMarkWord) { + // If we're verifying during a full GC then the region sets + // will have been torn down at the start of the GC. Therefore + // verifying the region sets will fail. So we only verify + // the region sets when not in a full GC. + if (!silent) { gclog_or_tty->print("HeapRegionSets "); } + verify_region_sets(); + } + if (!silent) { gclog_or_tty->print("HeapRegions "); } if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity check"); - G1ParVerifyTask task(this, allow_dirty, use_prev_marking); + G1ParVerifyTask task(this, allow_dirty, vo); int n_workers = workers()->total_workers(); set_par_threads(n_workers); workers()->run_task(&task); @@ -2880,8 +2987,8 @@ assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity check"); } else { - VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); - _hrs->iterate(&blk); + VerifyRegionClosure blk(allow_dirty, false, vo); + heap_region_iterate(&blk); if (blk.failures()) { failures = true; } @@ -2896,7 +3003,7 @@ #ifndef PRODUCT if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { concurrent_mark()->print_reachable("at-verification-failure", - use_prev_marking, false /* all */); + vo, false /* all */); } #endif gclog_or_tty->flush(); @@ -2950,7 +3057,7 @@ void G1CollectedHeap::print_on_extended(outputStream* st) const { PrintRegionClosure blk(st); - _hrs->iterate(&blk); + heap_region_iterate(&blk); } void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { @@ -2989,14 +3096,55 @@ SpecializationStats::print(); } -int G1CollectedHeap::addr_to_arena_id(void* addr) const { - HeapRegion* hr = heap_region_containing(addr); - if (hr == NULL) { - return 0; - } else { - return 1; - } -} +#ifndef PRODUCT +// Helpful for debugging RSet issues. + +class PrintRSetsClosure : public HeapRegionClosure { +private: + const char* _msg; + size_t _occupied_sum; + +public: + bool doHeapRegion(HeapRegion* r) { + HeapRegionRemSet* hrrs = r->rem_set(); + size_t occupied = hrrs->occupied(); + _occupied_sum += occupied; + + gclog_or_tty->print_cr("Printing RSet for region "HR_FORMAT, + HR_FORMAT_PARAMS(r)); + if (occupied == 0) { + gclog_or_tty->print_cr(" RSet is empty"); + } else { + hrrs->print(); + } + gclog_or_tty->print_cr("----------"); + return false; + } + + PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) { + gclog_or_tty->cr(); + gclog_or_tty->print_cr("========================================"); + gclog_or_tty->print_cr(msg); + gclog_or_tty->cr(); + } + + ~PrintRSetsClosure() { + gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum); + gclog_or_tty->print_cr("========================================"); + gclog_or_tty->cr(); + } +}; + +void G1CollectedHeap::print_cset_rsets() { + PrintRSetsClosure cl("Printing CSet RSets"); + collection_set_iterate(&cl); +} + +void G1CollectedHeap::print_all_rsets() { + PrintRSetsClosure cl("Printing All RSets");; + heap_region_iterate(&cl); +} +#endif // PRODUCT G1CollectedHeap* G1CollectedHeap::heap() { assert(_sh->kind() == CollectedHeap::G1CollectedHeap, @@ -3053,24 +3201,6 @@ } } -class VerifyMarkedObjsClosure: public ObjectClosure { - G1CollectedHeap* _g1h; - public: - VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} - void do_object(oop obj) { - assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, - "markandsweep mark should agree with concurrent deadness"); - } -}; - -void -G1CollectedHeap::checkConcurrentMark() { - VerifyMarkedObjsClosure verifycl(this); - // MutexLockerEx x(getMarkBitMapLock(), - // Mutex::_no_safepoint_check_flag); - object_iterate(&verifycl, false); -} - void G1CollectedHeap::do_sync_mark() { _cm->checkpointRootsInitial(); _cm->markFromRoots(); @@ -3149,12 +3279,27 @@ // -struct PrepareForRSScanningClosure : public HeapRegionClosure { - bool doHeapRegion(HeapRegion *r) { - r->rem_set()->set_iter_claimed(0); +#ifdef ASSERT +class VerifyCSetClosure: public HeapRegionClosure { +public: + bool doHeapRegion(HeapRegion* hr) { + // Here we check that the CSet region's RSet is ready for parallel + // iteration. The fields that we'll verify are only manipulated + // when the region is part of a CSet and is collected. Afterwards, + // we reset these fields when we clear the region's RSet (when the + // region is freed) so they are ready when the region is + // re-allocated. The only exception to this is if there's an + // evacuation failure and instead of freeing the region we leave + // it in the heap. In that case, we reset these fields during + // evacuation failure handling. + guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification"); + + // Here's a good place to add any other checks we'd like to + // perform on CSet regions. return false; } }; +#endif // ASSERT #if TASKQUEUE_STATS void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) { @@ -3258,16 +3403,14 @@ gc_prologue(false); increment_total_collections(false /* full gc */); -#if G1_REM_SET_LOGGING - gclog_or_tty->print_cr("\nJust chose CS, heap:"); - print(); -#endif - if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification gclog_or_tty->print(" VerifyBeforeGC:"); prepare_for_verify(); - Universe::verify(false); + Universe::verify(/* allow dirty */ false, + /* silent */ false, + /* option */ VerifyOption_G1UsePrevMarking); + } COMPILER2_PRESENT(DerivedPointerTable::clear()); @@ -3284,6 +3427,11 @@ // of the collection set!). release_mutator_alloc_region(); + // We should call this after we retire the mutator alloc + // region(s) so that all the ALLOC / RETIRE events are generated + // before the start GC event. + _hr_printer.start_gc(false /* full */, (size_t) total_collections()); + // The elapsed time induced by the start time below deliberately elides // the possible verification above. double start_time_sec = os::elapsedTime(); @@ -3335,6 +3483,22 @@ g1_policy()->choose_collection_set(target_pause_time_ms); + if (_hr_printer.is_active()) { + HeapRegion* hr = g1_policy()->collection_set(); + while (hr != NULL) { + G1HRPrinter::RegionType type; + if (!hr->is_young()) { + type = G1HRPrinter::Old; + } else if (hr->is_survivor()) { + type = G1HRPrinter::Survivor; + } else { + type = G1HRPrinter::Eden; + } + _hr_printer.cset(hr); + hr = hr->next_in_collection_set(); + } + } + // We have chosen the complete collection set. If marking is // active then, we clear the region fields of any of the // concurrent marking tasks whose region fields point into @@ -3345,13 +3509,10 @@ concurrent_mark()->reset_active_task_region_fields_in_cset(); } - // Nothing to do if we were unable to choose a collection set. -#if G1_REM_SET_LOGGING - gclog_or_tty->print_cr("\nAfter pause, heap:"); - print(); -#endif - PrepareForRSScanningClosure prepare_for_rs_scan; - collection_set_iterate(&prepare_for_rs_scan); +#ifdef ASSERT + VerifyCSetClosure cl; + collection_set_iterate(&cl); +#endif // ASSERT setup_surviving_young_words(); @@ -3439,7 +3600,9 @@ HandleMark hm; // Discard invalid handles created during verification gclog_or_tty->print(" VerifyAfterGC:"); prepare_for_verify(); - Universe::verify(false); + Universe::verify(/* allow dirty */ true, + /* silent */ false, + /* option */ VerifyOption_G1UsePrevMarking); } if (was_enabled) ref_processor()->enable_discovery(); @@ -3457,6 +3620,15 @@ } } + // We should do this after we potentially expand the heap so + // that all the COMMIT events are generated before the end GC + // event, and after we retire the GC alloc regions so that all + // RETIRE events are generated before the end GC event. + _hr_printer.end_gc(false /* full */, (size_t) total_collections()); + + // We have to do this after we decide whether to expand the heap or not. + g1_policy()->print_heap_transition(); + if (mark_in_progress()) { concurrent_mark()->update_g1_committed(); } @@ -3475,6 +3647,7 @@ } } + _hrs.verify_optional(); verify_region_sets_optional(); TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); @@ -3607,8 +3780,8 @@ public: bool doHeapRegion(HeapRegion* r) { if (r->is_gc_alloc_region()) { - gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", - r->hrs_index(), r->bottom()); + gclog_or_tty->print_cr("Region "HR_FORMAT" is still a GC alloc region", + HR_FORMAT_PARAMS(r)); } return false; } @@ -3692,11 +3865,8 @@ } else { // the region was retained from the last collection ++_gc_alloc_region_counts[ap]; - if (G1PrintHeapRegions) { - gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " - "top "PTR_FORMAT, - alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top()); - } + + _hr_printer.reuse(alloc_region); } if (alloc_region != NULL) { @@ -3949,6 +4119,14 @@ assert(cur->in_collection_set(), "bad CS"); RemoveSelfPointerClosure rspc(_g1h, cur, cl); + // In the common case we make sure that this is done when the + // region is freed so that it is "ready-to-go" when it's + // re-allocated. However, when evacuation failure happens, a + // region will remain in the heap and might ultimately be added + // to a CSet in the future. So we have to be careful here and + // make sure the region's RSet is ready for parallel iteration + // whenever this might be required in the future. + cur->rem_set()->reset_for_par_iteration(); cur->reset_bot(); cl->set_region(cur); cur->object_iterate(&rspc); @@ -4061,11 +4239,7 @@ HeapRegion* r = heap_region_containing(old); if (!r->evacuation_failed()) { r->set_evacuation_failed(true); - if (G1PrintHeapRegions) { - gclog_or_tty->print("overflow in heap region "PTR_FORMAT" " - "["PTR_FORMAT","PTR_FORMAT")\n", - r, r->bottom(), r->end()); - } + _hr_printer.evac_failure(r); } push_on_evac_failure_scan_stack(old); @@ -4126,6 +4300,7 @@ // Now we can do the post-GC stuff on the region. alloc_region->note_end_of_copying(); g1_policy()->record_after_bytes(alloc_region->used()); + _hr_printer.retire(alloc_region); } HeapWord* @@ -4468,10 +4643,6 @@ // here the null check is implicit in the cset_fast_test() test if (_g1->in_cset_fast_test(obj)) { -#if G1_REM_SET_LOGGING - gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " - "into CS.", p, (void*) obj); -#endif if (obj->is_forwarded()) { oopDesc::encode_store_heap_oop(p, obj->forwardee()); } else { @@ -4906,10 +5077,10 @@ hr->set_notHumongous(); free_region(hr, &hr_pre_used, free_list, par); - int i = hr->hrs_index() + 1; + size_t i = hr->hrs_index() + 1; size_t num = 1; - while ((size_t) i < n_regions()) { - HeapRegion* curr_hr = _hrs->at(i); + while (i < n_regions()) { + HeapRegion* curr_hr = region_at(i); if (!curr_hr->continuesHumongous()) { break; } @@ -5269,16 +5440,6 @@ } } -size_t G1CollectedHeap::n_regions() { - return _hrs->length(); -} - -size_t G1CollectedHeap::max_regions() { - return - (size_t)align_size_up(max_capacity(), HeapRegion::GrainBytes) / - HeapRegion::GrainBytes; -} - void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { assert(heap_lock_held_for_gc(), "the heap lock should already be held by or for this thread"); @@ -5409,12 +5570,14 @@ assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); assert(!force || g1_policy()->can_expand_young_list(), "if force is true we should be able to expand the young list"); - if (force || !g1_policy()->is_young_list_full()) { + bool young_list_full = g1_policy()->is_young_list_full(); + if (force || !young_list_full) { HeapRegion* new_alloc_region = new_region(word_size, false /* do_expand */); if (new_alloc_region != NULL) { g1_policy()->update_region_num(true /* next_is_young */); set_region_short_lived_locked(new_alloc_region); + _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full); g1mm()->update_eden_counters(); return new_alloc_region; } @@ -5429,6 +5592,7 @@ g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); _summary_bytes_used += allocated_bytes; + _hr_printer.retire(alloc_region); } HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size, @@ -5475,6 +5639,15 @@ } }; +HeapRegion* G1CollectedHeap::new_heap_region(size_t hrs_index, + HeapWord* bottom) { + HeapWord* end = bottom + HeapRegion::GrainWords; + MemRegion mr(bottom, end); + assert(_g1_reserved.contains(mr), "invariant"); + // This might return NULL if the allocation fails + return new HeapRegion(hrs_index, _bot_shared, mr, true /* is_zeroed */); +} + void G1CollectedHeap::verify_region_sets() { assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -27,8 +27,10 @@ #include "gc_implementation/g1/concurrentMark.hpp" #include "gc_implementation/g1/g1AllocRegion.hpp" +#include "gc_implementation/g1/g1HRPrinter.hpp" #include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/g1MonitoringSupport.hpp" +#include "gc_implementation/g1/heapRegionSeq.hpp" #include "gc_implementation/g1/heapRegionSets.hpp" #include "gc_implementation/shared/hSpaceCounters.hpp" #include "gc_implementation/parNew/parGCAllocBuffer.hpp" @@ -42,7 +44,6 @@ // heap subsets that will yield large amounts of garbage. class HeapRegion; -class HeapRegionSeq; class HRRSCleanupTask; class PermanentGenerationSpec; class GenerationSpec; @@ -103,6 +104,19 @@ size_t length() { return _length; } size_t survivor_length() { return _survivor_length; } + // Currently we do not keep track of the used byte sum for the + // young list and the survivors and it'd be quite a lot of work to + // do so. When we'll eventually replace the young list with + // instances of HeapRegionLinkedList we'll get that for free. So, + // we'll report the more accurate information then. + size_t eden_used_bytes() { + assert(length() >= survivor_length(), "invariant"); + return (length() - survivor_length()) * HeapRegion::GrainBytes; + } + size_t survivor_used_bytes() { + return survivor_length() * HeapRegion::GrainBytes; + } + void rs_length_sampling_init(); bool rs_length_sampling_more(); void rs_length_sampling_next(); @@ -183,9 +197,6 @@ // The part of _g1_storage that is currently committed. MemRegion _g1_committed; - // The maximum part of _g1_storage that has ever been committed. - MemRegion _g1_max_committed; - // The master free list. It will satisfy all new region allocations. MasterFreeRegionList _free_list; @@ -209,7 +220,7 @@ void rebuild_region_lists(); // The sequence of all heap regions in the heap. - HeapRegionSeq* _hrs; + HeapRegionSeq _hrs; // Alloc region used to satisfy mutator allocation requests. MutatorAllocRegion _mutator_alloc_region; @@ -288,6 +299,8 @@ size_t* _surviving_young_words; + G1HRPrinter _hr_printer; + void setup_surviving_young_words(); void update_surviving_young_words(size_t* surv_young_words); void cleanup_surviving_young_words(); @@ -408,13 +421,15 @@ // Attempt to satisfy a humongous allocation request of the given // size by finding a contiguous set of free regions of num_regions // length and remove them from the master free list. Return the - // index of the first region or -1 if the search was unsuccessful. - int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size); + // index of the first region or G1_NULL_HRS_INDEX if the search + // was unsuccessful. + size_t humongous_obj_allocate_find_first(size_t num_regions, + size_t word_size); // Initialize a contiguous set of free regions of length num_regions // and starting at index first so that they appear as a single // humongous region. - HeapWord* humongous_obj_allocate_initialize_regions(int first, + HeapWord* humongous_obj_allocate_initialize_regions(size_t first, size_t num_regions, size_t word_size); @@ -434,8 +449,7 @@ // * All allocation requests for new TLABs should go to // allocate_new_tlab(). // - // * All non-TLAB allocation requests should go to mem_allocate() - // and mem_allocate() should never be called with is_tlab == true. + // * All non-TLAB allocation requests should go to mem_allocate(). // // * If either call cannot satisfy the allocation request using the // current allocating region, they will try to get a new one. If @@ -455,8 +469,6 @@ virtual HeapWord* allocate_new_tlab(size_t word_size); virtual HeapWord* mem_allocate(size_t word_size, - bool is_noref, - bool is_tlab, /* expected to be false */ bool* gc_overhead_limit_was_exceeded); // The following three methods take a gc_count_before_ret @@ -574,8 +586,8 @@ void register_region_with_in_cset_fast_test(HeapRegion* r) { assert(_in_cset_fast_test_base != NULL, "sanity"); assert(r->in_collection_set(), "invariant"); - int index = r->hrs_index(); - assert(0 <= index && (size_t) index < _in_cset_fast_test_length, "invariant"); + size_t index = r->hrs_index(); + assert(index < _in_cset_fast_test_length, "invariant"); assert(!_in_cset_fast_test_base[index], "invariant"); _in_cset_fast_test_base[index] = true; } @@ -626,6 +638,8 @@ return _full_collections_completed; } + G1HRPrinter* hr_printer() { return &_hr_printer; } + protected: // Shrink the garbage-first heap by at most the given size (in bytes!). @@ -741,6 +755,11 @@ HumongousRegionSet* humongous_proxy_set, bool par); + // Notifies all the necessary spaces that the committed space has + // been updated (either expanded or shrunk). It should be called + // after _g1_storage is updated. + void update_committed_space(HeapWord* old_end, HeapWord* new_end); + // The concurrent marker (and the thread it runs in.) ConcurrentMark* _cm; ConcurrentMarkThread* _cmThread; @@ -803,7 +822,6 @@ oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); void handle_evacuation_failure_common(oop obj, markOop m); - // Ensure that the relevant gc_alloc regions are set. void get_gc_alloc_regions(); // We're done with GC alloc regions. We are going to tear down the @@ -954,15 +972,13 @@ } // The total number of regions in the heap. - size_t n_regions(); + size_t n_regions() { return _hrs.length(); } + + // The max number of regions in the heap. + size_t max_regions() { return _hrs.max_length(); } // The number of regions that are completely free. - size_t max_regions(); - - // The number of regions that are completely free. - size_t free_regions() { - return _free_list.length(); - } + size_t free_regions() { return _free_list.length(); } // The number of regions that are not completely free. size_t used_regions() { return n_regions() - free_regions(); } @@ -970,6 +986,10 @@ // The number of regions available for "regular" expansion. size_t expansion_regions() { return _expansion_regions; } + // Factory method for HeapRegion instances. It will return NULL if + // the allocation fails. + HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom); + void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; @@ -1131,17 +1151,15 @@ // Iterate over heap regions, in address order, terminating the // iteration early if the "doHeapRegion" method returns "true". - void heap_region_iterate(HeapRegionClosure* blk); + void heap_region_iterate(HeapRegionClosure* blk) const; // Iterate over heap regions starting with r (or the first region if "r" // is NULL), in address order, terminating early if the "doHeapRegion" // method returns "true". - void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk); + void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const; - // As above but starting from the region at index idx. - void heap_region_iterate_from(int idx, HeapRegionClosure* blk); - - HeapRegion* region_at(size_t idx); + // Return the region with the given index. It assumes the index is valid. + HeapRegion* region_at(size_t index) const { return _hrs.at(index); } // Divide the heap region sequence into "chunks" of some size (the number // of regions divided by the number of parallel threads times some @@ -1182,12 +1200,14 @@ // A G1CollectedHeap will contain some number of heap regions. This // finds the region containing a given address, or else returns NULL. - HeapRegion* heap_region_containing(const void* addr) const; + template + inline HeapRegion* heap_region_containing(const T addr) const; // Like the above, but requires "addr" to be in the heap (to avoid a // null-check), and unlike the above, may return an continuing humongous // region. - HeapRegion* heap_region_containing_raw(const void* addr) const; + template + inline HeapRegion* heap_region_containing_raw(const T addr) const; // A CollectedHeap is divided into a dense sequence of "blocks"; that is, // each address in the (reserved) heap is a member of exactly @@ -1249,7 +1269,7 @@ return true; } - bool is_in_young(oop obj) { + bool is_in_young(const oop obj) { HeapRegion* hr = heap_region_containing(obj); return hr != NULL && hr->is_young(); } @@ -1286,10 +1306,6 @@ return true; } - // The boundary between a "large" and "small" array of primitives, in - // words. - virtual size_t large_typearray_limit(); - // Returns "true" iff the given word_size is "very large". static bool isHumongous(size_t word_size) { // Note this has to be strictly greater-than as the TLABs @@ -1329,14 +1345,20 @@ // Perform verification. - // use_prev_marking == true -> use "prev" marking information, - // use_prev_marking == false -> use "next" marking information + // vo == UsePrevMarking -> use "prev" marking information, + // vo == UseNextMarking -> use "next" marking information + // vo == UseMarkWord -> use the mark word in the object header + // // NOTE: Only the "prev" marking information is guaranteed to be // consistent most of the time, so most calls to this should use - // use_prev_marking == true. Currently, there is only one case where - // this is called with use_prev_marking == false, which is to verify - // the "next" marking information at the end of remark. - void verify(bool allow_dirty, bool silent, bool use_prev_marking); + // vo == UsePrevMarking. + // Currently, there is only one case where this is called with + // vo == UseNextMarking, which is to verify the "next" marking + // information at the end of remark. + // Currently there is only one place where this is called with + // vo == UseMarkWord, which is to verify the marking during a + // full GC. + void verify(bool allow_dirty, bool silent, VerifyOption vo); // Override; it uses the "prev" marking information virtual void verify(bool allow_dirty, bool silent); @@ -1355,10 +1377,9 @@ // Override void print_tracing_info() const; - // If "addr" is a pointer into the (reserved?) heap, returns a positive - // number indicating the "arena" within the heap in which "addr" falls. - // Or else returns 0. - virtual int addr_to_arena_id(void* addr) const; + // The following two methods are helpful for debugging RSet issues. + void print_cset_rsets() PRODUCT_RETURN; + void print_all_rsets() PRODUCT_RETURN; // Convenience function to be used in situations where the heap type can be // asserted to be this type. @@ -1389,24 +1410,27 @@ // bitmap off to the side. void doConcurrentMark(); - // This is called from the marksweep collector which then does - // a concurrent mark and verifies that the results agree with - // the stop the world marking. - void checkConcurrentMark(); + // Do a full concurrent marking, synchronously. void do_sync_mark(); bool isMarkedPrev(oop obj) const; bool isMarkedNext(oop obj) const; - // use_prev_marking == true -> use "prev" marking information, - // use_prev_marking == false -> use "next" marking information + // vo == UsePrevMarking -> use "prev" marking information, + // vo == UseNextMarking -> use "next" marking information, + // vo == UseMarkWord -> use mark word from object header bool is_obj_dead_cond(const oop obj, const HeapRegion* hr, - const bool use_prev_marking) const { - if (use_prev_marking) { - return is_obj_dead(obj, hr); - } else { - return is_obj_ill(obj, hr); + const VerifyOption vo) const { + + switch (vo) { + case VerifyOption_G1UsePrevMarking: + return is_obj_dead(obj, hr); + case VerifyOption_G1UseNextMarking: + return is_obj_ill(obj, hr); + default: + assert(vo == VerifyOption_G1UseMarkWord, "must be"); + return !obj->is_gc_marked(); } } @@ -1447,18 +1471,24 @@ // Added if it is in permanent gen it isn't dead. // Added if it is NULL it isn't dead. - // use_prev_marking == true -> use "prev" marking information, - // use_prev_marking == false -> use "next" marking information + // vo == UsePrevMarking -> use "prev" marking information, + // vo == UseNextMarking -> use "next" marking information, + // vo == UseMarkWord -> use mark word from object header bool is_obj_dead_cond(const oop obj, - const bool use_prev_marking) { - if (use_prev_marking) { - return is_obj_dead(obj); - } else { - return is_obj_ill(obj); + const VerifyOption vo) const { + + switch (vo) { + case VerifyOption_G1UsePrevMarking: + return is_obj_dead(obj); + case VerifyOption_G1UseNextMarking: + return is_obj_ill(obj); + default: + assert(vo == VerifyOption_G1UseMarkWord, "must be"); + return !obj->is_gc_marked(); } } - bool is_obj_dead(const oop obj) { + bool is_obj_dead(const oop obj) const { const HeapRegion* hr = heap_region_containing(obj); if (hr == NULL) { if (Universe::heap()->is_in_permanent(obj)) @@ -1469,7 +1499,7 @@ else return is_obj_dead(obj, hr); } - bool is_obj_ill(const oop obj) { + bool is_obj_ill(const oop obj) const { const HeapRegion* hr = heap_region_containing(obj); if (hr == NULL) { if (Universe::heap()->is_in_permanent(obj)) diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -34,9 +34,10 @@ // Inline functions for G1CollectedHeap +template inline HeapRegion* -G1CollectedHeap::heap_region_containing(const void* addr) const { - HeapRegion* hr = _hrs->addr_to_region(addr); +G1CollectedHeap::heap_region_containing(const T addr) const { + HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr); // hr can be null if addr in perm_gen if (hr != NULL && hr->continuesHumongous()) { hr = hr->humongous_start_region(); @@ -44,19 +45,16 @@ return hr; } +template inline HeapRegion* -G1CollectedHeap::heap_region_containing_raw(const void* addr) const { - assert(_g1_reserved.contains(addr), "invariant"); - size_t index = pointer_delta(addr, _g1_reserved.start(), 1) - >> HeapRegion::LogOfHRGrainBytes; - - HeapRegion* res = _hrs->at(index); - assert(res == _hrs->addr_to_region(addr), "sanity"); +G1CollectedHeap::heap_region_containing_raw(const T addr) const { + assert(_g1_reserved.contains((const void*) addr), "invariant"); + HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr); return res; } inline bool G1CollectedHeap::obj_in_cs(oop obj) { - HeapRegion* r = _hrs->addr_to_region(obj); + HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj); return r != NULL && r->in_collection_set(); } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -239,6 +239,10 @@ _should_revert_to_full_young_gcs(false), _last_full_young_gc(false), + _eden_bytes_before_gc(0), + _survivor_bytes_before_gc(0), + _capacity_before_gc(0), + _prev_collection_pause_used_at_end_bytes(0), _collection_set(NULL), @@ -897,6 +901,11 @@ _bytes_in_to_space_after_gc = 0; _bytes_in_collection_set_before_gc = 0; + YoungList* young_list = _g1->young_list(); + _eden_bytes_before_gc = young_list->eden_used_bytes(); + _survivor_bytes_before_gc = young_list->survivor_used_bytes(); + _capacity_before_gc = _g1->capacity(); + #ifdef DEBUG // initialise these to something well known so that we can spot // if they are not set properly @@ -1460,14 +1469,6 @@ } } } - if (PrintGCDetails) - gclog_or_tty->print(" ["); - if (PrintGC || PrintGCDetails) - _g1->print_size_transition(gclog_or_tty, - _cur_collection_pause_used_at_start_bytes, - _g1->used(), _g1->capacity()); - if (PrintGCDetails) - gclog_or_tty->print_cr("]"); _all_pause_times_ms->add(elapsed_ms); if (update_stats) { @@ -1672,6 +1673,40 @@ // } +#define EXT_SIZE_FORMAT "%d%s" +#define EXT_SIZE_PARAMS(bytes) \ + byte_size_in_proper_unit((bytes)), \ + proper_unit_for_byte_size((bytes)) + +void G1CollectorPolicy::print_heap_transition() { + if (PrintGCDetails) { + YoungList* young_list = _g1->young_list(); + size_t eden_bytes = young_list->eden_used_bytes(); + size_t survivor_bytes = young_list->survivor_used_bytes(); + size_t used_before_gc = _cur_collection_pause_used_at_start_bytes; + size_t used = _g1->used(); + size_t capacity = _g1->capacity(); + + gclog_or_tty->print_cr( + " [Eden: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" " + "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" " + "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->" + EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]", + EXT_SIZE_PARAMS(_eden_bytes_before_gc), + EXT_SIZE_PARAMS(eden_bytes), + EXT_SIZE_PARAMS(_survivor_bytes_before_gc), + EXT_SIZE_PARAMS(survivor_bytes), + EXT_SIZE_PARAMS(used_before_gc), + EXT_SIZE_PARAMS(_capacity_before_gc), + EXT_SIZE_PARAMS(used), + EXT_SIZE_PARAMS(capacity)); + } else if (PrintGC) { + _g1->print_size_transition(gclog_or_tty, + _cur_collection_pause_used_at_start_bytes, + _g1->used(), _g1->capacity()); + } +} + // void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, @@ -2435,21 +2470,6 @@ G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used); } -class NextNonCSElemFinder: public HeapRegionClosure { - HeapRegion* _res; -public: - NextNonCSElemFinder(): _res(NULL) {} - bool doHeapRegion(HeapRegion* r) { - if (!r->in_collection_set()) { - _res = r; - return true; - } else { - return false; - } - } - HeapRegion* res() { return _res; } -}; - class KnownGarbageClosure: public HeapRegionClosure { CollectionSetChooser* _hrSorted; @@ -2618,14 +2638,6 @@ assert(_inc_cset_build_state == Active, "Precondition"); assert(!hr->is_young(), "non-incremental add of young region"); - if (G1PrintHeapRegions) { - gclog_or_tty->print_cr("added region to cset " - "%d:["PTR_FORMAT", "PTR_FORMAT"], " - "top "PTR_FORMAT", %s", - hr->hrs_index(), hr->bottom(), hr->end(), - hr->top(), hr->is_young() ? "YOUNG" : "NOT_YOUNG"); - } - if (_g1->mark_in_progress()) _g1->concurrent_mark()->registerCSetRegion(hr); @@ -2791,14 +2803,6 @@ _inc_cset_tail->set_next_in_collection_set(hr); } _inc_cset_tail = hr; - - if (G1PrintHeapRegions) { - gclog_or_tty->print_cr(" added region to incremental cset (RHS) " - "%d:["PTR_FORMAT", "PTR_FORMAT"], " - "top "PTR_FORMAT", young %s", - hr->hrs_index(), hr->bottom(), hr->end(), - hr->top(), (hr->is_young()) ? "YES" : "NO"); - } } // Add the region to the LHS of the incremental cset @@ -2816,14 +2820,6 @@ _inc_cset_tail = hr; } _inc_cset_head = hr; - - if (G1PrintHeapRegions) { - gclog_or_tty->print_cr(" added region to incremental cset (LHS) " - "%d:["PTR_FORMAT", "PTR_FORMAT"], " - "top "PTR_FORMAT", young %s", - hr->hrs_index(), hr->bottom(), hr->end(), - hr->top(), (hr->is_young()) ? "YES" : "NO"); - } } #ifndef PRODUCT diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -891,6 +891,7 @@ virtual void record_collection_pause_end_G1_strong_roots(); virtual void record_collection_pause_end(); + void print_heap_transition(); // Record the fact that a full collection occurred. virtual void record_full_collection_start(); @@ -1179,6 +1180,11 @@ // The limit on the number of regions allocated for survivors. size_t _max_survivor_regions; + // For reporting purposes. + size_t _eden_bytes_before_gc; + size_t _survivor_bytes_before_gc; + size_t _capacity_before_gc; + // The amount of survor regions after a collection. size_t _recorded_survivor_regions; // List of survivor regions. diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/g1HRPrinter.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1HRPrinter.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1HRPrinter.hpp" +#include "gc_implementation/g1/heapRegion.hpp" +#include "utilities/ostream.hpp" + +const char* G1HRPrinter::action_name(ActionType action) { + switch(action) { + case Alloc: return "ALLOC"; + case AllocForce: return "ALLOC-FORCE"; + case Retire: return "RETIRE"; + case Reuse: return "REUSE"; + case CSet: return "CSET"; + case EvacFailure: return "EVAC-FAILURE"; + case Cleanup: return "CLEANUP"; + case PostCompaction: return "POST-COMPACTION"; + case Commit: return "COMMIT"; + case Uncommit: return "UNCOMMIT"; + default: ShouldNotReachHere(); + } + // trying to keep the Windows compiler happy + return NULL; +} + +const char* G1HRPrinter::region_type_name(RegionType type) { + switch (type) { + case Unset: return NULL; + case Eden: return "Eden"; + case Survivor: return "Survivor"; + case Old: return "Old"; + case SingleHumongous: return "SingleH"; + case StartsHumongous: return "StartsH"; + case ContinuesHumongous: return "ContinuesH"; + default: ShouldNotReachHere(); + } + // trying to keep the Windows compiler happy + return NULL; +} + +const char* G1HRPrinter::phase_name(PhaseType phase) { + switch (phase) { + case StartGC: return "StartGC"; + case EndGC: return "EndGC"; + case StartFullGC: return "StartFullGC"; + case EndFullGC: return "EndFullGC"; + default: ShouldNotReachHere(); + } + // trying to keep the Windows compiler happy + return NULL; +} + +#define G1HR_PREFIX " G1HR" + +void G1HRPrinter::print(ActionType action, RegionType type, + HeapRegion* hr, HeapWord* top) { + const char* action_str = action_name(action); + const char* type_str = region_type_name(type); + HeapWord* bottom = hr->bottom(); + + if (type_str != NULL) { + if (top != NULL) { + gclog_or_tty->print_cr(G1HR_PREFIX" %s(%s) "PTR_FORMAT" "PTR_FORMAT, + action_str, type_str, bottom, top); + } else { + gclog_or_tty->print_cr(G1HR_PREFIX" %s(%s) "PTR_FORMAT, + action_str, type_str, bottom); + } + } else { + if (top != NULL) { + gclog_or_tty->print_cr(G1HR_PREFIX" %s "PTR_FORMAT" "PTR_FORMAT, + action_str, bottom, top); + } else { + gclog_or_tty->print_cr(G1HR_PREFIX" %s "PTR_FORMAT, + action_str, bottom); + } + } +} + +void G1HRPrinter::print(ActionType action, HeapWord* bottom, HeapWord* end) { + const char* action_str = action_name(action); + + gclog_or_tty->print_cr(G1HR_PREFIX" %s ["PTR_FORMAT","PTR_FORMAT"]", + action_str, bottom, end); +} + +void G1HRPrinter::print(PhaseType phase, size_t phase_num) { + const char* phase_str = phase_name(phase); + gclog_or_tty->print_cr(G1HR_PREFIX" #%s "SIZE_FORMAT, phase_str, phase_num); +} diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/g1HRPrinter.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1HRPrinter.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1HRPRINTER_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1HRPRINTER_HPP + +#include "memory/allocation.hpp" +#include "gc_implementation/g1/heapRegion.hpp" + +#define SKIP_RETIRED_FULL_REGIONS 1 + +class G1HRPrinter VALUE_OBJ_CLASS_SPEC { +public: + typedef enum { + Alloc, + AllocForce, + Retire, + Reuse, + CSet, + EvacFailure, + Cleanup, + PostCompaction, + Commit, + Uncommit + } ActionType; + + typedef enum { + Unset, + Eden, + Survivor, + Old, + SingleHumongous, + StartsHumongous, + ContinuesHumongous + } RegionType; + + typedef enum { + StartGC, + EndGC, + StartFullGC, + EndFullGC + } PhaseType; + +private: + bool _active; + + static const char* action_name(ActionType action); + static const char* region_type_name(RegionType type); + static const char* phase_name(PhaseType phase); + + // Print an action event. This version is used in most scenarios and + // only prints the region's bottom. The parameters type and top are + // optional (the "not set" values are Unset and NULL). + static void print(ActionType action, RegionType type, + HeapRegion* hr, HeapWord* top); + + // Print an action event. This version prints both the region's + // bottom and end. Used for Commit / Uncommit events. + static void print(ActionType action, HeapWord* bottom, HeapWord* end); + + // Print a phase event. + static void print(PhaseType phase, size_t phase_num); + +public: + // In some places we iterate over a list in order to generate output + // for the list's elements. By exposing this we can avoid this + // iteration if the printer is not active. + const bool is_active() { return _active; } + + // Have to set this explicitly as we have to do this during the + // heap's initialize() method, not in the constructor. + void set_active(bool active) { _active = active; } + + // The methods below are convenient wrappers for the print() methods. + + void alloc(HeapRegion* hr, RegionType type, bool force = false) { + if (is_active()) { + print((!force) ? Alloc : AllocForce, type, hr, NULL); + } + } + + void alloc(RegionType type, HeapRegion* hr, HeapWord* top) { + if (is_active()) { + print(Alloc, type, hr, top); + } + } + + void retire(HeapRegion* hr) { + if (is_active()) { + if (!SKIP_RETIRED_FULL_REGIONS || hr->top() < hr->end()) { + print(Retire, Unset, hr, hr->top()); + } + } + } + + void reuse(HeapRegion* hr) { + if (is_active()) { + print(Reuse, Unset, hr, NULL); + } + } + + void cset(HeapRegion* hr) { + if (is_active()) { + print(CSet, Unset, hr, NULL); + } + } + + void evac_failure(HeapRegion* hr) { + if (is_active()) { + print(EvacFailure, Unset, hr, NULL); + } + } + + void cleanup(HeapRegion* hr) { + if (is_active()) { + print(Cleanup, Unset, hr, NULL); + } + } + + void post_compaction(HeapRegion* hr, RegionType type) { + if (is_active()) { + print(PostCompaction, type, hr, hr->top()); + } + } + + void commit(HeapWord* bottom, HeapWord* end) { + if (is_active()) { + print(Commit, bottom, end); + } + } + + void uncommit(HeapWord* bottom, HeapWord* end) { + if (is_active()) { + print(Uncommit, bottom, end); + } + } + + void start_gc(bool full, size_t gc_num) { + if (is_active()) { + if (!full) { + print(StartGC, gc_num); + } else { + print(StartFullGC, gc_num); + } + } + } + + void end_gc(bool full, size_t gc_num) { + if (is_active()) { + if (!full) { + print(EndGC, gc_num); + } else { + print(EndFullGC, gc_num); + } + } + } + + G1HRPrinter() : _active(false) { } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1HRPRINTER_HPP diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/g1MarkSweep.cpp --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -84,11 +84,6 @@ mark_sweep_phase1(marked_for_unloading, clear_all_softrefs); - if (VerifyDuringGC) { - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - g1h->checkConcurrentMark(); - } - mark_sweep_phase2(); // Don't add any more derived pointers during phase3 @@ -179,6 +174,29 @@ assert(GenMarkSweep::_marking_stack.is_empty(), "stack should be empty by now"); + + if (VerifyDuringGC) { + HandleMark hm; // handle scope + COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact); + gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying "); + Universe::heap()->prepare_for_verify(); + // Note: we can verify only the heap here. When an object is + // marked, the previous value of the mark word (including + // identity hash values, ages, etc) is preserved, and the mark + // word is set to markOop::marked_value - effectively removing + // any hash values from the mark word. These hash values are + // used when verifying the dictionaries and so removing them + // from the mark word can make verification of the dictionaries + // fail. At the end of the GC, the orginal mark word values + // (including hash values) are restored to the appropriate + // objects. + Universe::heap()->verify(/* allow dirty */ true, + /* silent */ false, + /* option */ VerifyOption_G1UseMarkWord); + + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + gclog_or_tty->print_cr("]"); + } } class G1PrepareCompactClosure: public HeapRegionClosure { diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/g1OopClosures.hpp --- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ class CMBitMap; class CMMarkStack; class G1ParScanThreadState; +class CMTask; // A class that scans oops in a given heap region (much as OopsInGenClosure // scans oops in a generation.) @@ -40,7 +41,7 @@ protected: HeapRegion* _from; public: - virtual void set_region(HeapRegion* from) { _from = from; } + void set_region(HeapRegion* from) { _from = from; } }; class G1ParClosureSuper : public OopsInHeapRegionClosure { @@ -161,44 +162,6 @@ bool do_header() { return false; } }; -class FilterInHeapRegionAndIntoCSClosure : public OopsInHeapRegionClosure { - G1CollectedHeap* _g1; - OopsInHeapRegionClosure* _oc; -public: - FilterInHeapRegionAndIntoCSClosure(G1CollectedHeap* g1, - OopsInHeapRegionClosure* oc) : - _g1(g1), _oc(oc) - {} - template void do_oop_nv(T* p); - virtual void do_oop(oop* p) { do_oop_nv(p); } - virtual void do_oop(narrowOop* p) { do_oop_nv(p); } - bool apply_to_weak_ref_discovered_field() { return true; } - bool do_header() { return false; } - void set_region(HeapRegion* from) { - _oc->set_region(from); - } -}; - -class FilterAndMarkInHeapRegionAndIntoCSClosure : public OopsInHeapRegionClosure { - G1CollectedHeap* _g1; - ConcurrentMark* _cm; - OopsInHeapRegionClosure* _oc; -public: - FilterAndMarkInHeapRegionAndIntoCSClosure(G1CollectedHeap* g1, - OopsInHeapRegionClosure* oc, - ConcurrentMark* cm) - : _g1(g1), _oc(oc), _cm(cm) { } - - template void do_oop_nv(T* p); - virtual void do_oop(oop* p) { do_oop_nv(p); } - virtual void do_oop(narrowOop* p) { do_oop_nv(p); } - bool apply_to_weak_ref_discovered_field() { return true; } - bool do_header() { return false; } - void set_region(HeapRegion* from) { - _oc->set_region(from); - } -}; - class FilterOutOfRegionClosure: public OopClosure { HeapWord* _r_bottom; HeapWord* _r_end; @@ -214,4 +177,16 @@ int out_of_region() { return _out_of_region; } }; +// Closure for iterating over object fields during concurrent marking +class G1CMOopClosure : public OopClosure { + G1CollectedHeap* _g1h; + ConcurrentMark* _cm; + CMTask* _task; +public: + G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task); + template void do_oop_nv(T* p); + virtual void do_oop( oop* p) { do_oop_nv(p); } + virtual void do_oop(narrowOop* p) { do_oop_nv(p); } +}; + #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp --- a/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP -#include "gc_implementation/g1/concurrentMark.hpp" +#include "gc_implementation/g1/concurrentMark.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.hpp" #include "gc_implementation/g1/g1OopClosures.hpp" #include "gc_implementation/g1/g1RemSet.hpp" @@ -66,27 +66,6 @@ } } -template inline void FilterInHeapRegionAndIntoCSClosure::do_oop_nv(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop) && - _g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop))) - _oc->do_oop(p); -} - -template inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop_nv(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - HeapRegion* hr = _g1->heap_region_containing((HeapWord*) obj); - if (hr != NULL) { - if (hr->in_collection_set()) - _oc->do_oop(p); - else if (!hr->is_young()) - _cm->grayRoot(obj); - } - } -} - // This closure is applied to the fields of the objects that have just been copied. template inline void G1ParScanClosure::do_oop_nv(T* p) { T heap_oop = oopDesc::load_heap_oop(p); @@ -129,5 +108,18 @@ } } +template inline void G1CMOopClosure::do_oop_nv(T* p) { + assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant"); + assert(!_g1h->is_on_master_free_list( + _g1h->heap_region_containing((HeapWord*) p)), "invariant"); + + oop obj = oopDesc::load_decode_heap_oop(p); + if (_cm->verbose_high()) { + gclog_or_tty->print_cr("[%d] we're looking at location " + "*"PTR_FORMAT" = "PTR_FORMAT, + _task->task_id(), p, (void*) obj); + } + _task->deal_with_reference(obj); +} #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/g1RemSet.cpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -66,41 +66,6 @@ } #endif - -class IntoCSOopClosure: public OopsInHeapRegionClosure { - OopsInHeapRegionClosure* _blk; - G1CollectedHeap* _g1; -public: - IntoCSOopClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) : - _g1(g1), _blk(blk) {} - void set_region(HeapRegion* from) { - _blk->set_region(from); - } - virtual void do_oop(narrowOop* p) { do_oop_work(p); } - virtual void do_oop( oop* p) { do_oop_work(p); } - template void do_oop_work(T* p) { - oop obj = oopDesc::load_decode_heap_oop(p); - if (_g1->obj_in_cs(obj)) _blk->do_oop(p); - } - bool apply_to_weak_ref_discovered_field() { return true; } - bool idempotent() { return true; } -}; - -class VerifyRSCleanCardOopClosure: public OopClosure { - G1CollectedHeap* _g1; -public: - VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {} - - virtual void do_oop(narrowOop* p) { do_oop_work(p); } - virtual void do_oop( oop* p) { do_oop_work(p); } - template void do_oop_work(T* p) { - oop obj = oopDesc::load_decode_heap_oop(p); - HeapRegion* to = _g1->heap_region_containing(obj); - guarantee(to == NULL || !to->in_collection_set(), - "Missed a rem set member."); - } -}; - G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs) : _g1(g1), _conc_refine_cards(0), _ct_bs(ct_bs), _g1p(_g1->g1_policy()), @@ -332,31 +297,6 @@ _g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0); } -#ifndef PRODUCT -class PrintRSClosure : public HeapRegionClosure { - int _count; -public: - PrintRSClosure() : _count(0) {} - bool doHeapRegion(HeapRegion* r) { - HeapRegionRemSet* hrrs = r->rem_set(); - _count += (int) hrrs->occupied(); - if (hrrs->occupied() == 0) { - gclog_or_tty->print("Heap Region [" PTR_FORMAT ", " PTR_FORMAT ") " - "has no remset entries\n", - r->bottom(), r->end()); - } else { - gclog_or_tty->print("Printing rem set for heap region [" PTR_FORMAT ", " PTR_FORMAT ")\n", - r->bottom(), r->end()); - r->print(); - hrrs->print(); - gclog_or_tty->print("\nDone printing rem set\n"); - } - return false; - } - int occupied() {return _count;} -}; -#endif - class CountRSSizeClosure: public HeapRegionClosure { size_t _n; size_t _tot; @@ -482,10 +422,6 @@ } void G1RemSet::prepare_for_oops_into_collection_set_do() { -#if G1_REM_SET_LOGGING - PrintRSClosure cl; - _g1->collection_set_iterate(&cl); -#endif cleanupHRRS(); ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); _g1->set_refine_cte_cl_concurrency(false); @@ -504,14 +440,6 @@ } -class cleanUpIteratorsClosure : public HeapRegionClosure { - bool doHeapRegion(HeapRegion *r) { - HeapRegionRemSet* hrrs = r->rem_set(); - hrrs->init_for_par_iteration(); - return false; - } -}; - // This closure, applied to a DirtyCardQueueSet, is used to immediately // update the RSets for the regions in the CSet. For each card it iterates // through the oops which coincide with that card. It scans the reference @@ -572,18 +500,13 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() { guarantee( _cards_scanned != NULL, "invariant" ); _total_cards_scanned = 0; - for (uint i = 0; i < n_workers(); ++i) + for (uint i = 0; i < n_workers(); ++i) { _total_cards_scanned += _cards_scanned[i]; + } FREE_C_HEAP_ARRAY(size_t, _cards_scanned); _cards_scanned = NULL; // Cleanup after copy -#if G1_REM_SET_LOGGING - PrintRSClosure cl; - _g1->heap_region_iterate(&cl); -#endif _g1->set_refine_cte_cl_concurrency(true); - cleanUpIteratorsClosure iterClosure; - _g1->collection_set_iterate(&iterClosure); // Set all cards back to clean. _g1->cleanUpCardTable(); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/g1RemSet.hpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -142,8 +142,6 @@ virtual void prepare_for_verify(); }; -#define G1_REM_SET_LOGGING 0 - class CountNonCleanMemRegionClosure: public MemRegionClosure { G1CollectedHeap* _g1; int _n; diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -65,12 +65,6 @@ HeapRegion* to = _g1->heap_region_containing(obj); if (to != NULL && from != to) { -#if G1_REM_SET_LOGGING - gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS" - " for region [" PTR_FORMAT ", " PTR_FORMAT ")", - p, obj, - to->bottom(), to->end()); -#endif assert(to->rem_set() != NULL, "Need per-region 'into' remsets."); to->rem_set()->add_reference(p, tid); } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp --- a/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,8 +45,7 @@ class FilterIntoCSClosure; class FilterOutOfRegionClosure; -class FilterInHeapRegionAndIntoCSClosure; -class FilterAndMarkInHeapRegionAndIntoCSClosure; +class G1CMOopClosure; #ifdef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES #error "FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES already defined." @@ -58,8 +57,7 @@ f(G1ParPushHeapRSClosure,_nv) \ f(FilterIntoCSClosure,_nv) \ f(FilterOutOfRegionClosure,_nv) \ - f(FilterInHeapRegionAndIntoCSClosure,_nv) \ - f(FilterAndMarkInHeapRegionAndIntoCSClosure,_nv) + f(G1CMOopClosure,_nv) #ifdef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES #error "FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES already defined." diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/heapRegion.cpp --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -60,13 +60,14 @@ oop _containing_obj; bool _failures; int _n_failures; - bool _use_prev_marking; + VerifyOption _vo; public: - // use_prev_marking == true -> use "prev" marking information, - // use_prev_marking == false -> use "next" marking information - VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) : + // _vo == UsePrevMarking -> use "prev" marking information, + // _vo == UseNextMarking -> use "next" marking information, + // _vo == UseMarkWord -> use mark word from object header. + VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : _g1h(g1h), _bs(NULL), _containing_obj(NULL), - _failures(false), _n_failures(0), _use_prev_marking(use_prev_marking) + _failures(false), _n_failures(0), _vo(vo) { BarrierSet* bs = _g1h->barrier_set(); if (bs->is_a(BarrierSet::CardTableModRef)) @@ -95,14 +96,14 @@ template void do_oop_work(T* p) { assert(_containing_obj != NULL, "Precondition"); - assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking), + assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), "Precondition"); T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); bool failed = false; if (!_g1h->is_in_closed_subset(obj) || - _g1h->is_obj_dead_cond(obj, _use_prev_marking)) { + _g1h->is_obj_dead_cond(obj, _vo)) { if (!_failures) { gclog_or_tty->print_cr(""); gclog_or_tty->print_cr("----------"); @@ -159,20 +160,16 @@ gclog_or_tty->print_cr("----------"); } gclog_or_tty->print_cr("Missing rem set entry:"); - gclog_or_tty->print_cr("Field "PTR_FORMAT - " of obj "PTR_FORMAT - ", in region %d ["PTR_FORMAT - ", "PTR_FORMAT"),", - p, (void*) _containing_obj, - from->hrs_index(), - from->bottom(), - from->end()); + gclog_or_tty->print_cr("Field "PTR_FORMAT" " + "of obj "PTR_FORMAT", " + "in region "HR_FORMAT, + p, (void*) _containing_obj, + HR_FORMAT_PARAMS(from)); _containing_obj->print_on(gclog_or_tty); - gclog_or_tty->print_cr("points to obj "PTR_FORMAT - " in region %d ["PTR_FORMAT - ", "PTR_FORMAT").", - (void*) obj, to->hrs_index(), - to->bottom(), to->end()); + gclog_or_tty->print_cr("points to obj "PTR_FORMAT" " + "in region "HR_FORMAT, + (void*) obj, + HR_FORMAT_PARAMS(to)); obj->print_on(gclog_or_tty); gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", cv_obj, cv_field); @@ -484,11 +481,10 @@ HeapRegion:: -HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, - MemRegion mr, bool is_zeroed) +HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray, + MemRegion mr, bool is_zeroed) : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed), - _next_fk(HeapRegionDCTOC::NoFilterKind), - _hrs_index(-1), + _next_fk(HeapRegionDCTOC::NoFilterKind), _hrs_index(hrs_index), _humongous_type(NotHumongous), _humongous_start_region(NULL), _in_collection_set(false), _is_gc_alloc_region(false), _next_in_special_set(NULL), _orig_end(NULL), @@ -740,20 +736,20 @@ void HeapRegion::verify(bool allow_dirty) const { bool dummy = false; - verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy); + verify(allow_dirty, VerifyOption_G1UsePrevMarking, /* failures */ &dummy); } // This really ought to be commoned up into OffsetTableContigSpace somehow. // We would need a mechanism to make that code skip dead objects. void HeapRegion::verify(bool allow_dirty, - bool use_prev_marking, + VerifyOption vo, bool* failures) const { G1CollectedHeap* g1 = G1CollectedHeap::heap(); *failures = false; HeapWord* p = bottom(); HeapWord* prev_p = NULL; - VerifyLiveClosure vl_cl(g1, use_prev_marking); + VerifyLiveClosure vl_cl(g1, vo); bool is_humongous = isHumongous(); bool do_bot_verify = !is_young(); size_t object_num = 0; @@ -778,7 +774,7 @@ return; } - if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) { + if (!g1->is_obj_dead_cond(obj, this, vo)) { if (obj->is_oop()) { klassOop klass = obj->klass(); if (!klass->is_perm()) { diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/heapRegion.hpp --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -52,9 +52,11 @@ class HeapRegion; class HeapRegionSetBase; -#define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]" -#define HR_FORMAT_PARAMS(_hr_) (_hr_)->hrs_index(), (_hr_)->bottom(), \ - (_hr_)->top(), (_hr_)->end() +#define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]" +#define HR_FORMAT_PARAMS(_hr_) \ + (_hr_)->hrs_index(), \ + (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \ + (_hr_)->bottom(), (_hr_)->top(), (_hr_)->end() // A dirty card to oop closure for heap regions. It // knows how to get the G1 heap and how to use the bitmap @@ -237,9 +239,8 @@ G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; } protected: - // If this region is a member of a HeapRegionSeq, the index in that - // sequence, otherwise -1. - int _hrs_index; + // The index of this region in the heap region sequence. + size_t _hrs_index; HumongousType _humongous_type; // For a humongous region, region in which it starts. @@ -296,8 +297,7 @@ enum YoungType { NotYoung, // a region is not young Young, // a region is young - Survivor // a region is young and it contains - // survivor + Survivor // a region is young and it contains survivors }; volatile YoungType _young_type; @@ -351,7 +351,8 @@ public: // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros. - HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, + HeapRegion(size_t hrs_index, + G1BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr, bool is_zeroed); static int LogOfHRGrainBytes; @@ -393,8 +394,7 @@ // If this region is a member of a HeapRegionSeq, the index in that // sequence, otherwise -1. - int hrs_index() const { return _hrs_index; } - void set_hrs_index(int index) { _hrs_index = index; } + size_t hrs_index() const { return _hrs_index; } // The number of bytes marked live in the region in the last marking phase. size_t marked_bytes() { return _prev_marked_bytes; } @@ -579,6 +579,8 @@ void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; } bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; } + HeapWord* orig_end() { return _orig_end; } + // Allows logical separation between objects allocated before and after. void save_marks(); @@ -853,14 +855,20 @@ void print() const; void print_on(outputStream* st) const; - // use_prev_marking == true -> use "prev" marking information, - // use_prev_marking == false -> use "next" marking information + // vo == UsePrevMarking -> use "prev" marking information, + // vo == UseNextMarking -> use "next" marking information + // vo == UseMarkWord -> use the mark word in the object header + // // NOTE: Only the "prev" marking information is guaranteed to be // consistent most of the time, so most calls to this should use - // use_prev_marking == true. Currently, there is only one case where - // this is called with use_prev_marking == false, which is to verify - // the "next" marking information at the end of remark. - void verify(bool allow_dirty, bool use_prev_marking, bool *failures) const; + // vo == UsePrevMarking. + // Currently, there is only one case where this is called with + // vo == UseNextMarking, which is to verify the "next" marking + // information at the end of remark. + // Currently there is only one place where this is called with + // vo == UseMarkWord, which is to verify the marking during a + // full GC. + void verify(bool allow_dirty, VerifyOption vo, bool *failures) const; // Override; it uses the "prev" marking information virtual void verify(bool allow_dirty) const; diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -834,7 +834,7 @@ #endif // Set the corresponding coarse bit. - int max_hrs_index = max->hr()->hrs_index(); + size_t max_hrs_index = max->hr()->hrs_index(); if (!_coarse_map.at(max_hrs_index)) { _coarse_map.at_put(max_hrs_index, true); _n_coarse_entries++; @@ -860,7 +860,8 @@ BitMap* region_bm, BitMap* card_bm) { // First eliminated garbage regions from the coarse map. if (G1RSScrubVerbose) - gclog_or_tty->print_cr("Scrubbing region %d:", hr()->hrs_index()); + gclog_or_tty->print_cr("Scrubbing region "SIZE_FORMAT":", + hr()->hrs_index()); assert(_coarse_map.size() == region_bm->size(), "Precondition"); if (G1RSScrubVerbose) @@ -878,7 +879,8 @@ PosParPRT* nxt = cur->next(); // If the entire region is dead, eliminate. if (G1RSScrubVerbose) - gclog_or_tty->print_cr(" For other region %d:", cur->hr()->hrs_index()); + gclog_or_tty->print_cr(" For other region "SIZE_FORMAT":", + cur->hr()->hrs_index()); if (!region_bm->at(cur->hr()->hrs_index())) { *prev = nxt; cur->set_next(NULL); @@ -994,7 +996,7 @@ void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) { MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); - size_t hrs_ind = (size_t)from_hr->hrs_index(); + size_t hrs_ind = from_hr->hrs_index(); size_t ind = hrs_ind & _mod_max_fine_entries_mask; if (del_single_region_table(ind, from_hr)) { assert(!_coarse_map.at(hrs_ind), "Inv"); @@ -1002,7 +1004,7 @@ _coarse_map.par_at_put(hrs_ind, 0); } // Check to see if any of the fcc entries come from here. - int hr_ind = hr()->hrs_index(); + size_t hr_ind = hr()->hrs_index(); for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) { int fcc_ent = _from_card_cache[tid][hr_ind]; if (fcc_ent != -1) { @@ -1083,8 +1085,9 @@ HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, HeapRegion* hr) - : _bosa(bosa), _other_regions(hr), _iter_state(Unclaimed) { } - + : _bosa(bosa), _other_regions(hr) { + reset_for_par_iteration(); +} void HeapRegionRemSet::setup_remset_size() { // Setup sparse and fine-grain tables sizes. @@ -1099,10 +1102,6 @@ guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity"); } -void HeapRegionRemSet::init_for_par_iteration() { - _iter_state = Unclaimed; -} - bool HeapRegionRemSet::claim_iter() { if (_iter_state != Unclaimed) return false; jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed); @@ -1117,7 +1116,6 @@ return _iter_state == Complete; } - void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const { iter->initialize(this); } @@ -1130,7 +1128,7 @@ while (iter.has_next(card_index)) { HeapWord* card_start = G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index); - gclog_or_tty->print_cr(" Card " PTR_FORMAT ".", card_start); + gclog_or_tty->print_cr(" Card " PTR_FORMAT, card_start); } // XXX if (iter.n_yielded() != occupied()) { @@ -1157,6 +1155,14 @@ void HeapRegionRemSet::clear() { _other_regions.clear(); assert(occupied() == 0, "Should be clear."); + reset_for_par_iteration(); +} + +void HeapRegionRemSet::reset_for_par_iteration() { + _iter_state = Unclaimed; + _iter_claimed = 0; + // It's good to check this to make sure that the two methods are in sync. + assert(verify_ready_for_par_iteration(), "post-condition"); } void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs, diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -262,8 +262,6 @@ virtual void cleanup() = 0; #endif - // Should be called from single-threaded code. - void init_for_par_iteration(); // Attempt to claim the region. Returns true iff this call caused an // atomic transition from Unclaimed to Claimed. bool claim_iter(); @@ -273,7 +271,6 @@ bool iter_is_complete(); // Support for claiming blocks of cards during iteration - void set_iter_claimed(size_t x) { _iter_claimed = (jlong)x; } size_t iter_claimed() const { return (size_t)_iter_claimed; } // Claim the next block of cards size_t iter_claimed_next(size_t step) { @@ -284,6 +281,11 @@ } while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current); return current; } + void reset_for_par_iteration(); + + bool verify_ready_for_par_iteration() { + return (_iter_state == Unclaimed) && (_iter_claimed == 0); + } // Initialize the given iterator to iterate over this rem set. void init_iterator(HeapRegionRemSetIterator* iter) const; diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/heapRegionSeq.cpp --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -23,259 +23,182 @@ */ #include "precompiled.hpp" +#include "gc_implementation/g1/heapRegion.hpp" +#include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/g1/heapRegionSets.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" -#include "gc_implementation/g1/heapRegionSeq.hpp" #include "memory/allocation.hpp" -// Local to this file. - -static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) { - if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1; - else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1; - else if (*hr1p == *hr2p) return 0; - else { - assert(false, "We should never compare distinct overlapping regions."); - } - return 0; -} - -HeapRegionSeq::HeapRegionSeq(const size_t max_size) : - _alloc_search_start(0), - // The line below is the worst bit of C++ hackery I've ever written - // (Detlefs, 11/23). You should think of it as equivalent to - // "_regions(100, true)": initialize the growable array and inform it - // that it should allocate its elem array(s) on the C heap. - // - // The first argument, however, is actually a comma expression - // (set_allocation_type(this, C_HEAP), 100). The purpose of the - // set_allocation_type() call is to replace the default allocation - // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will - // allow to pass the assert in GenericGrowableArray() which checks - // that a growable array object must be on C heap if elements are. - // - // Note: containing object is allocated on C heap since it is CHeapObj. - // - _regions((ResourceObj::set_allocation_type((address)&_regions, - ResourceObj::C_HEAP), - (int)max_size), - true), - _next_rr_candidate(0), - _seq_bottom(NULL) -{} - -// Private methods. +// Private -void HeapRegionSeq::print_empty_runs() { - int empty_run = 0; - int n_empty = 0; - int empty_run_start; - for (int i = 0; i < _regions.length(); i++) { - HeapRegion* r = _regions.at(i); - if (r->continuesHumongous()) continue; - if (r->is_empty()) { - assert(!r->isHumongous(), "H regions should not be empty."); - if (empty_run == 0) empty_run_start = i; - empty_run++; - n_empty++; - } else { - if (empty_run > 0) { - gclog_or_tty->print(" %d:%d", empty_run_start, empty_run); - empty_run = 0; - } - } - } - if (empty_run > 0) { - gclog_or_tty->print(" %d:%d", empty_run_start, empty_run); - } - gclog_or_tty->print_cr(" [tot = %d]", n_empty); -} - -int HeapRegionSeq::find(HeapRegion* hr) { - // FIXME: optimized for adjacent regions of fixed size. - int ind = hr->hrs_index(); - if (ind != -1) { - assert(_regions.at(ind) == hr, "Mismatch"); - } - return ind; -} - - -// Public methods. +size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) { + size_t len = length(); + assert(num > 1, "use this only for sequences of length 2 or greater"); + assert(from <= len, + err_msg("from: "SIZE_FORMAT" should be valid and <= than "SIZE_FORMAT, + from, len)); -void HeapRegionSeq::insert(HeapRegion* hr) { - assert(!_regions.is_full(), "Too many elements in HeapRegionSeq"); - if (_regions.length() == 0 - || _regions.top()->end() <= hr->bottom()) { - hr->set_hrs_index(_regions.length()); - _regions.append(hr); - } else { - _regions.append(hr); - _regions.sort(orderRegions); - for (int i = 0; i < _regions.length(); i++) { - _regions.at(i)->set_hrs_index(i); - } - } - char* bot = (char*)_regions.at(0)->bottom(); - if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot; -} - -size_t HeapRegionSeq::length() { - return _regions.length(); -} - -size_t HeapRegionSeq::free_suffix() { - size_t res = 0; - int first = _regions.length() - 1; - int cur = first; - while (cur >= 0 && - (_regions.at(cur)->is_empty() - && (first == cur - || (_regions.at(cur+1)->bottom() == - _regions.at(cur)->end())))) { - res++; - cur--; - } - return res; -} - -int HeapRegionSeq::find_contiguous_from(int from, size_t num) { - assert(num > 1, "pre-condition"); - assert(0 <= from && from <= _regions.length(), - err_msg("from: %d should be valid and <= than %d", - from, _regions.length())); - - int curr = from; - int first = -1; + size_t curr = from; + size_t first = G1_NULL_HRS_INDEX; size_t num_so_far = 0; - while (curr < _regions.length() && num_so_far < num) { - HeapRegion* curr_hr = _regions.at(curr); - if (curr_hr->is_empty()) { - if (first == -1) { + while (curr < len && num_so_far < num) { + if (at(curr)->is_empty()) { + if (first == G1_NULL_HRS_INDEX) { first = curr; num_so_far = 1; } else { num_so_far += 1; } } else { - first = -1; + first = G1_NULL_HRS_INDEX; num_so_far = 0; } curr += 1; } - assert(num_so_far <= num, "post-condition"); if (num_so_far == num) { // we found enough space for the humongous object - assert(from <= first && first < _regions.length(), "post-condition"); - assert(first < curr && (curr - first) == (int) num, "post-condition"); - for (int i = first; i < first + (int) num; ++i) { - assert(_regions.at(i)->is_empty(), "post-condition"); + assert(from <= first && first < len, "post-condition"); + assert(first < curr && (curr - first) == num, "post-condition"); + for (size_t i = first; i < first + num; ++i) { + assert(at(i)->is_empty(), "post-condition"); } return first; } else { // we failed to find enough space for the humongous object - return -1; + return G1_NULL_HRS_INDEX; } } -int HeapRegionSeq::find_contiguous(size_t num) { - assert(num > 1, "otherwise we should not be calling this"); - assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(), - err_msg("_alloc_search_start: %d should be valid and <= than %d", - _alloc_search_start, _regions.length())); +// Public + +void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end, + size_t max_length) { + assert((size_t) bottom % HeapRegion::GrainBytes == 0, + "bottom should be heap region aligned"); + assert((size_t) end % HeapRegion::GrainBytes == 0, + "end should be heap region aligned"); + + _length = 0; + _heap_bottom = bottom; + _heap_end = end; + _region_shift = HeapRegion::LogOfHRGrainBytes; + _next_search_index = 0; + _allocated_length = 0; + _max_length = max_length; + + _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length); + memset(_regions, 0, max_length * sizeof(HeapRegion*)); + _regions_biased = _regions - ((size_t) bottom >> _region_shift); + + assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)], + "bottom should be included in the region with index 0"); +} + +MemRegion HeapRegionSeq::expand_by(HeapWord* old_end, + HeapWord* new_end, + FreeRegionList* list) { + assert(old_end < new_end, "don't call it otherwise"); + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + + HeapWord* next_bottom = old_end; + assert(_heap_bottom <= next_bottom, "invariant"); + while (next_bottom < new_end) { + assert(next_bottom < _heap_end, "invariant"); + size_t index = length(); - int start = _alloc_search_start; - int res = find_contiguous_from(start, num); - if (res == -1 && start != 0) { - // Try starting from the beginning. If _alloc_search_start was 0, - // no point in doing this again. - res = find_contiguous_from(0, num); + assert(index < _max_length, "otherwise we cannot expand further"); + if (index == 0) { + // We have not allocated any regions so far + assert(next_bottom == _heap_bottom, "invariant"); + } else { + // next_bottom should match the end of the last/previous region + assert(next_bottom == at(index - 1)->end(), "invariant"); + } + + if (index == _allocated_length) { + // We have to allocate a new HeapRegion. + HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom); + if (new_hr == NULL) { + // allocation failed, we bail out and return what we have done so far + return MemRegion(old_end, next_bottom); + } + assert(_regions[index] == NULL, "invariant"); + _regions[index] = new_hr; + increment_length(&_allocated_length); + } + // Have to increment the length first, otherwise we will get an + // assert failure at(index) below. + increment_length(&_length); + HeapRegion* hr = at(index); + list->add_as_tail(hr); + + next_bottom = hr->end(); } - if (res != -1) { - assert(0 <= res && res < _regions.length(), - err_msg("res: %d should be valid", res)); - _alloc_search_start = res + (int) num; - assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(), - err_msg("_alloc_search_start: %d should be valid", - _alloc_search_start)); + assert(next_bottom == new_end, "post-condition"); + return MemRegion(old_end, next_bottom); +} + +size_t HeapRegionSeq::free_suffix() { + size_t res = 0; + size_t index = length(); + while (index > 0) { + index -= 1; + if (!at(index)->is_empty()) { + break; + } + res += 1; } return res; } -void HeapRegionSeq::iterate(HeapRegionClosure* blk) { - iterate_from((HeapRegion*)NULL, blk); +size_t HeapRegionSeq::find_contiguous(size_t num) { + assert(num > 1, "use this only for sequences of length 2 or greater"); + assert(_next_search_index <= length(), + err_msg("_next_search_indeex: "SIZE_FORMAT" " + "should be valid and <= than "SIZE_FORMAT, + _next_search_index, length())); + + size_t start = _next_search_index; + size_t res = find_contiguous_from(start, num); + if (res == G1_NULL_HRS_INDEX && start > 0) { + // Try starting from the beginning. If _next_search_index was 0, + // no point in doing this again. + res = find_contiguous_from(0, num); + } + if (res != G1_NULL_HRS_INDEX) { + assert(res < length(), + err_msg("res: "SIZE_FORMAT" should be valid", res)); + _next_search_index = res + num; + assert(_next_search_index <= length(), + err_msg("_next_search_indeex: "SIZE_FORMAT" " + "should be valid and <= than "SIZE_FORMAT, + _next_search_index, length())); + } + return res; } -// The first argument r is the heap region at which iteration begins. -// This operation runs fastest when r is NULL, or the heap region for -// which a HeapRegionClosure most recently returned true, or the -// heap region immediately to its right in the sequence. In all -// other cases a linear search is required to find the index of r. - -void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) { - - // :::: FIXME :::: - // Static cache value is bad, especially when we start doing parallel - // remembered set update. For now just don't cache anything (the - // code in the def'd out blocks). +void HeapRegionSeq::iterate(HeapRegionClosure* blk) const { + iterate_from((HeapRegion*) NULL, blk); +} -#if 0 - static int cached_j = 0; -#endif - int len = _regions.length(); - int j = 0; - // Find the index of r. - if (r != NULL) { -#if 0 - assert(cached_j >= 0, "Invariant."); - if ((cached_j < len) && (r == _regions.at(cached_j))) { - j = cached_j; - } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) { - j = cached_j + 1; - } else { - j = find(r); -#endif - if (j < 0) { - j = 0; - } -#if 0 - } -#endif +void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const { + size_t hr_index = 0; + if (hr != NULL) { + hr_index = (size_t) hr->hrs_index(); } - int i; - for (i = j; i < len; i += 1) { - int res = blk->doHeapRegion(_regions.at(i)); + + size_t len = length(); + for (size_t i = hr_index; i < len; i += 1) { + bool res = blk->doHeapRegion(at(i)); if (res) { -#if 0 - cached_j = i; -#endif blk->incomplete(); return; } } - for (i = 0; i < j; i += 1) { - int res = blk->doHeapRegion(_regions.at(i)); + for (size_t i = 0; i < hr_index; i += 1) { + bool res = blk->doHeapRegion(at(i)); if (res) { -#if 0 - cached_j = i; -#endif - blk->incomplete(); - return; - } - } -} - -void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) { - int len = _regions.length(); - int i; - for (i = idx; i < len; i++) { - if (blk->doHeapRegion(_regions.at(i))) { - blk->incomplete(); - return; - } - } - for (i = 0; i < idx; i++) { - if (blk->doHeapRegion(_regions.at(i))) { blk->incomplete(); return; } @@ -283,54 +206,92 @@ } MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes, - size_t& num_regions_deleted) { + size_t* num_regions_deleted) { // Reset this in case it's currently pointing into the regions that // we just removed. - _alloc_search_start = 0; + _next_search_index = 0; assert(shrink_bytes % os::vm_page_size() == 0, "unaligned"); assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned"); + assert(length() > 0, "the region sequence should not be empty"); + assert(length() <= _allocated_length, "invariant"); + assert(_allocated_length > 0, "we should have at least one region committed"); - if (_regions.length() == 0) { - num_regions_deleted = 0; - return MemRegion(); - } - int j = _regions.length() - 1; - HeapWord* end = _regions.at(j)->end(); + // around the loop, i will be the next region to be removed + size_t i = length() - 1; + assert(i > 0, "we should never remove all regions"); + // [last_start, end) is the MemRegion that covers the regions we will remove. + HeapWord* end = at(i)->end(); HeapWord* last_start = end; - while (j >= 0 && shrink_bytes > 0) { - HeapRegion* cur = _regions.at(j); - // We have to leave humongous regions where they are, - // and work around them. - if (cur->isHumongous()) { - return MemRegion(last_start, end); - } - assert(cur == _regions.top(), "Should be top"); + *num_regions_deleted = 0; + while (shrink_bytes > 0) { + HeapRegion* cur = at(i); + // We should leave the humongous regions where they are. + if (cur->isHumongous()) break; + // We should stop shrinking if we come across a non-empty region. if (!cur->is_empty()) break; + + i -= 1; + *num_regions_deleted += 1; shrink_bytes -= cur->capacity(); - num_regions_deleted++; - _regions.pop(); last_start = cur->bottom(); - // We need to delete these somehow, but can't currently do so here: if - // we do, the ZF thread may still access the deleted region. We'll - // leave this here as a reminder that we have to do something about - // this. - // delete cur; - j--; + decrement_length(&_length); + // We will reclaim the HeapRegion. _allocated_length should be + // covering this index. So, even though we removed the region from + // the active set by decreasing _length, we still have it + // available in the future if we need to re-use it. + assert(i > 0, "we should never remove all regions"); + assert(length() > 0, "we should never remove all regions"); } return MemRegion(last_start, end); } -class PrintHeapRegionClosure : public HeapRegionClosure { -public: - bool doHeapRegion(HeapRegion* r) { - gclog_or_tty->print(PTR_FORMAT ":", r); - r->print(); - return false; +#ifndef PRODUCT +void HeapRegionSeq::verify_optional() { + guarantee(_length <= _allocated_length, + err_msg("invariant: _length: "SIZE_FORMAT" " + "_allocated_length: "SIZE_FORMAT, + _length, _allocated_length)); + guarantee(_allocated_length <= _max_length, + err_msg("invariant: _allocated_length: "SIZE_FORMAT" " + "_max_length: "SIZE_FORMAT, + _allocated_length, _max_length)); + guarantee(_next_search_index <= _length, + err_msg("invariant: _next_search_index: "SIZE_FORMAT" " + "_length: "SIZE_FORMAT, + _next_search_index, _length)); + + HeapWord* prev_end = _heap_bottom; + for (size_t i = 0; i < _allocated_length; i += 1) { + HeapRegion* hr = _regions[i]; + guarantee(hr != NULL, err_msg("invariant: i: "SIZE_FORMAT, i)); + guarantee(hr->bottom() == prev_end, + err_msg("invariant i: "SIZE_FORMAT" "HR_FORMAT" " + "prev_end: "PTR_FORMAT, + i, HR_FORMAT_PARAMS(hr), prev_end)); + guarantee(hr->hrs_index() == i, + err_msg("invariant: i: "SIZE_FORMAT" hrs_index(): "SIZE_FORMAT, + i, hr->hrs_index())); + if (i < _length) { + // Asserts will fire if i is >= _length + HeapWord* addr = hr->bottom(); + guarantee(addr_to_region(addr) == hr, "sanity"); + guarantee(addr_to_region_unsafe(addr) == hr, "sanity"); + } else { + guarantee(hr->is_empty(), "sanity"); + guarantee(!hr->isHumongous(), "sanity"); + // using assert instead of guarantee here since containing_set() + // is only available in non-product builds. + assert(hr->containing_set() == NULL, "sanity"); + } + if (hr->startsHumongous()) { + prev_end = hr->orig_end(); + } else { + prev_end = hr->end(); + } } -}; - -void HeapRegionSeq::print() { - PrintHeapRegionClosure cl; - iterate(&cl); + for (size_t i = _allocated_length; i < _max_length; i += 1) { + guarantee(_regions[i] == NULL, err_msg("invariant i: "SIZE_FORMAT, i)); + } } +#endif // PRODUCT diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/heapRegionSeq.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -25,92 +25,143 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP -#include "gc_implementation/g1/heapRegion.hpp" -#include "utilities/growableArray.hpp" - class HeapRegion; class HeapRegionClosure; +class FreeRegionList; + +#define G1_NULL_HRS_INDEX ((size_t) -1) + +// This class keeps track of the region metadata (i.e., HeapRegion +// instances). They are kept in the _regions array in address +// order. A region's index in the array corresponds to its index in +// the heap (i.e., 0 is the region at the bottom of the heap, 1 is +// the one after it, etc.). Two regions that are consecutive in the +// array should also be adjacent in the address space (i.e., +// region(i).end() == region(i+1).bottom(). +// +// We create a HeapRegion when we commit the region's address space +// for the first time. When we uncommit the address space of a +// region we retain the HeapRegion to be able to re-use it in the +// future (in case we recommit it). +// +// We keep track of three lengths: +// +// * _length (returned by length()) is the number of currently +// committed regions. +// * _allocated_length (not exposed outside this class) is the +// number of regions for which we have HeapRegions. +// * _max_length (returned by max_length()) is the maximum number of +// regions the heap can have. +// +// and maintain that: _length <= _allocated_length <= _max_length class HeapRegionSeq: public CHeapObj { - // _regions is kept sorted by start address order, and no two regions are - // overlapping. - GrowableArray _regions; + // The array that holds the HeapRegions. + HeapRegion** _regions; + + // Version of _regions biased to address 0 + HeapRegion** _regions_biased; + + // The number of regions committed in the heap. + size_t _length; - // The index in "_regions" at which to start the next allocation search. - // (For efficiency only; private to obj_allocate after initialization.) - int _alloc_search_start; + // The address of the first reserved word in the heap. + HeapWord* _heap_bottom; + + // The address of the last reserved word in the heap - 1. + HeapWord* _heap_end; + + // The log of the region byte size. + size_t _region_shift; + + // A hint for which index to start searching from for humongous + // allocations. + size_t _next_search_index; - // Finds a contiguous set of empty regions of length num, starting - // from a given index. - int find_contiguous_from(int from, size_t num); + // The number of regions for which we have allocated HeapRegions for. + size_t _allocated_length; + + // The maximum number of regions in the heap. + size_t _max_length; + + // Find a contiguous set of empty regions of length num, starting + // from the given index. + size_t find_contiguous_from(size_t from, size_t num); - // Currently, we're choosing collection sets in a round-robin fashion, - // starting here. - int _next_rr_candidate; + // Map a heap address to a biased region index. Assume that the + // address is valid. + inline size_t addr_to_index_biased(HeapWord* addr) const; - // The bottom address of the bottom-most region, or else NULL if there - // are no regions in the sequence. - char* _seq_bottom; + void increment_length(size_t* length) { + assert(*length < _max_length, "pre-condition"); + *length += 1; + } + + void decrement_length(size_t* length) { + assert(*length > 0, "pre-condition"); + *length -= 1; + } public: - // Initializes "this" to the empty sequence of regions. - HeapRegionSeq(const size_t max_size); + // Empty contructor, we'll initialize it with the initialize() method. + HeapRegionSeq() { } + + void initialize(HeapWord* bottom, HeapWord* end, size_t max_length); - // Adds "hr" to "this" sequence. Requires "hr" not to overlap with - // any region already in "this". (Will perform better if regions are - // inserted in ascending address order.) - void insert(HeapRegion* hr); + // Return the HeapRegion at the given index. Assume that the index + // is valid. + inline HeapRegion* at(size_t index) const; + + // If addr is within the committed space return its corresponding + // HeapRegion, otherwise return NULL. + inline HeapRegion* addr_to_region(HeapWord* addr) const; + + // Return the HeapRegion that corresponds to the given + // address. Assume the address is valid. + inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const; - // Given a HeapRegion*, returns its index within _regions, - // or returns -1 if not found. - int find(HeapRegion* hr); + // Return the number of regions that have been committed in the heap. + size_t length() const { return _length; } + + // Return the maximum number of regions in the heap. + size_t max_length() const { return _max_length; } - // Requires the index to be valid, and return the region at the index. - HeapRegion* at(size_t i) { return _regions.at((int)i); } + // Expand the sequence to reflect that the heap has grown from + // old_end to new_end. Either create new HeapRegions, or re-use + // existing ones, and return them in the given list. Returns the + // memory region that covers the newly-created regions. If a + // HeapRegion allocation fails, the result memory region might be + // smaller than the desired one. + MemRegion expand_by(HeapWord* old_end, HeapWord* new_end, + FreeRegionList* list); - // Return the number of regions in the sequence. - size_t length(); - - // Returns the number of contiguous regions at the end of the sequence + // Return the number of contiguous regions at the end of the sequence // that are available for allocation. size_t free_suffix(); // Find a contiguous set of empty regions of length num and return - // the index of the first region or -1 if the search was unsuccessful. - int find_contiguous(size_t num); + // the index of the first region or G1_NULL_HRS_INDEX if the + // search was unsuccessful. + size_t find_contiguous(size_t num); - // Apply the "doHeapRegion" method of "blk" to all regions in "this", - // in address order, terminating the iteration early - // if the "doHeapRegion" method returns "true". - void iterate(HeapRegionClosure* blk); - - // Apply the "doHeapRegion" method of "blk" to all regions in "this", - // starting at "r" (or first region, if "r" is NULL), in a circular - // manner, terminating the iteration early if the "doHeapRegion" method - // returns "true". - void iterate_from(HeapRegion* r, HeapRegionClosure* blk); + // Apply blk->doHeapRegion() on all committed regions in address order, + // terminating the iteration early if doHeapRegion() returns true. + void iterate(HeapRegionClosure* blk) const; - // As above, but start from a given index in the sequence - // instead of a given heap region. - void iterate_from(int idx, HeapRegionClosure* blk); + // As above, but start the iteration from hr and loop around. If hr + // is NULL, we start from the first region in the heap. + void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const; - // Requires "shrink_bytes" to be a multiple of the page size and heap - // region granularity. Deletes as many "rightmost" completely free heap - // regions from the sequence as comprise shrink_bytes bytes. Returns the - // MemRegion indicating the region those regions comprised, and sets - // "num_regions_deleted" to the number of regions deleted. - MemRegion shrink_by(size_t shrink_bytes, size_t& num_regions_deleted); + // Tag as uncommitted as many regions that are completely free as + // possible, up to shrink_bytes, from the suffix of the committed + // sequence. Return a MemRegion that corresponds to the address + // range of the uncommitted regions. Assume shrink_bytes is page and + // heap region aligned. + MemRegion shrink_by(size_t shrink_bytes, size_t* num_regions_deleted); - // If "addr" falls within a region in the sequence, return that region, - // or else NULL. - inline HeapRegion* addr_to_region(const void* addr); - - void print(); - - // Prints out runs of empty regions. - void print_empty_runs(); - + // Do some sanity checking. + void verify_optional() PRODUCT_RETURN; }; #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,23 +25,42 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP +#include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/heapRegionSeq.hpp" -inline HeapRegion* HeapRegionSeq::addr_to_region(const void* addr) { - assert(_seq_bottom != NULL, "bad _seq_bottom in addr_to_region"); - if ((char*) addr >= _seq_bottom) { - size_t diff = (size_t) pointer_delta((HeapWord*) addr, - (HeapWord*) _seq_bottom); - int index = (int) (diff >> HeapRegion::LogOfHRGrainWords); - assert(index >= 0, "invariant / paranoia"); - if (index < _regions.length()) { - HeapRegion* hr = _regions.at(index); - assert(hr->is_in_reserved(addr), - "addr_to_region is wrong..."); - return hr; - } +inline size_t HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const { + assert(_heap_bottom <= addr && addr < _heap_end, + err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT, + addr, _heap_bottom, _heap_end)); + size_t index = (size_t) addr >> _region_shift; + return index; +} + +inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const { + assert(_heap_bottom <= addr && addr < _heap_end, + err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT, + addr, _heap_bottom, _heap_end)); + size_t index_biased = addr_to_index_biased(addr); + HeapRegion* hr = _regions_biased[index_biased]; + assert(hr != NULL, "invariant"); + return hr; +} + +inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const { + if (addr != NULL && addr < _heap_end) { + assert(addr >= _heap_bottom, + err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom)); + return addr_to_region_unsafe(addr); } return NULL; } +inline HeapRegion* HeapRegionSeq::at(size_t index) const { + assert(index < length(), "pre-condition"); + HeapRegion* hr = _regions[index]; + assert(hr != NULL, "sanity"); + assert(hr->hrs_index() == index, "sanity"); + return hr; +} + #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/heapRegionSet.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -1,5 +1,5 @@ /* - * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/heapRegionSets.cpp --- a/src/share/vm/gc_implementation/g1/heapRegionSets.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegionSets.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionSets.hpp" //////////////////// FreeRegionList //////////////////// @@ -38,6 +39,16 @@ //////////////////// MasterFreeRegionList //////////////////// +const char* MasterFreeRegionList::verify_region_extra(HeapRegion* hr) { + // We should reset the RSet for parallel iteration before we add it + // to the master free list so that it is ready when the region is + // re-allocated. + if (!hr->rem_set()->verify_ready_for_par_iteration()) { + return "the region's RSet should be ready for parallel iteration"; + } + return FreeRegionList::verify_region_extra(hr); +} + bool MasterFreeRegionList::check_mt_safety() { // Master Free List MT safety protocol: // (a) If we're at a safepoint, operations on the master free list diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/heapRegionSets.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionSets.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegionSets.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -1,5 +1,5 @@ /* - * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,6 +44,7 @@ class MasterFreeRegionList : public FreeRegionList { protected: + virtual const char* verify_region_extra(HeapRegion* hr); virtual bool check_mt_safety(); public: diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/sparsePRT.cpp --- a/src/share/vm/gc_implementation/g1/sparsePRT.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/sparsePRT.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -481,8 +481,9 @@ bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) { #if SPARSE_PRT_VERBOSE - gclog_or_tty->print_cr(" Adding card %d from region %d to region %d sparse.", - card_index, region_id, _hr->hrs_index()); + gclog_or_tty->print_cr(" Adding card %d from region %d to region " + SIZE_FORMAT" sparse.", + card_index, region_id, _hr->hrs_index()); #endif if (_next->occupied_entries() * 2 > _next->capacity()) { expand(); @@ -533,8 +534,8 @@ _next = new RSHashTable(last->capacity() * 2); #if SPARSE_PRT_VERBOSE - gclog_or_tty->print_cr(" Expanded sparse table for %d to %d.", - _hr->hrs_index(), _next->capacity()); + gclog_or_tty->print_cr(" Expanded sparse table for "SIZE_FORMAT" to %d.", + _hr->hrs_index(), _next->capacity()); #endif for (size_t i = 0; i < last->capacity(); i++) { SparsePRTEntry* e = last->entry((int)i); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/g1/vm_operations_g1.cpp --- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -99,6 +99,18 @@ // At this point we are supposed to start a concurrent cycle. We // will do so if one is not already in progress. bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(); + + // The above routine returns true if we were able to force the + // next GC pause to be an initial mark; it returns false if a + // marking cycle is already in progress. + // + // If a marking cycle is already in progress just return and skip + // the pause - the requesting thread should block in doit_epilogue + // until the marking cycle is complete. + if (!res) { + assert(_word_size == 0, "ExplicitGCInvokesConcurrent shouldn't be allocating"); + return; + } } _pause_succeeded = diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp --- a/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -348,15 +348,31 @@ // cleared before we had a chance to examine it. In that case, the value // will have been logged in the LNC for that chunk. // We need to examine as many chunks to the right as this object - // covers. - const uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1) - - lowest_non_clean_base_chunk_index; - DEBUG_ONLY(const uintptr_t last_chunk_index = addr_to_chunk_index(used.last()) - - lowest_non_clean_base_chunk_index;) - assert(last_chunk_index_to_check <= last_chunk_index, - err_msg("Out of bounds: last_chunk_index_to_check " INTPTR_FORMAT - " exceeds last_chunk_index " INTPTR_FORMAT, - last_chunk_index_to_check, last_chunk_index)); + // covers. However, we need to bound this checking to the largest + // entry in the LNC array: this is because the heap may expand + // after the LNC array has been created but before we reach this point, + // and the last block in our chunk may have been expanded to include + // the expansion delta (and possibly subsequently allocated from, so + // it wouldn't be sufficient to check whether that last block was + // or was not an object at this point). + uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1) + - lowest_non_clean_base_chunk_index; + const uintptr_t last_chunk_index = addr_to_chunk_index(used.last()) + - lowest_non_clean_base_chunk_index; + if (last_chunk_index_to_check > last_chunk_index) { + assert(last_block + last_block_size > used.end(), + err_msg("Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]" + " does not exceed used.end() = " PTR_FORMAT "," + " yet last_chunk_index_to_check " INTPTR_FORMAT + " exceeds last_chunk_index " INTPTR_FORMAT, + last_chunk_index_to_check, last_chunk_index)); + assert(sp->used_region().end() > used.end(), + err_msg("Expansion did not happen: " + "[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")", + sp->used_region().start(), sp->used_region().end(), used.start(), used.end())); + NOISY(tty->print_cr(" process_chunk_boundary: heap expanded; explicitly bounding last_chunk");) + last_chunk_index_to_check = last_chunk_index; + } for (uintptr_t lnc_index = cur_chunk_index + 1; lnc_index <= last_chunk_index_to_check; lnc_index++) { diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -386,8 +386,6 @@ // we rely on the size_policy object to force a bail out. HeapWord* ParallelScavengeHeap::mem_allocate( size_t size, - bool is_noref, - bool is_tlab, bool* gc_overhead_limit_was_exceeded) { assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); @@ -398,7 +396,7 @@ // limit is being exceeded as checked below. *gc_overhead_limit_was_exceeded = false; - HeapWord* result = young_gen()->allocate(size, is_tlab); + HeapWord* result = young_gen()->allocate(size); uint loop_count = 0; uint gc_count = 0; @@ -419,7 +417,7 @@ MutexLocker ml(Heap_lock); gc_count = Universe::heap()->total_collections(); - result = young_gen()->allocate(size, is_tlab); + result = young_gen()->allocate(size); // (1) If the requested object is too large to easily fit in the // young_gen, or @@ -433,21 +431,13 @@ if (result != NULL) { return result; } - if (!is_tlab && - size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) { - result = old_gen()->allocate(size, is_tlab); + if (size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) { + result = old_gen()->allocate(size); if (result != NULL) { return result; } } if (GC_locker::is_active_and_needs_gc()) { - // GC is locked out. If this is a TLAB allocation, - // return NULL; the requestor will retry allocation - // of an idividual object at a time. - if (is_tlab) { - return NULL; - } - // If this thread is not in a jni critical section, we stall // the requestor until the critical section has cleared and // GC allowed. When the critical section clears, a GC is @@ -472,7 +462,7 @@ if (result == NULL) { // Generate a VM operation - VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count); + VM_ParallelGCFailedAllocation op(size, gc_count); VMThread::execute(&op); // Did the VM operation execute? If so, return the result directly. @@ -526,7 +516,7 @@ if ((result == NULL) && (QueuedAllocationWarningCount > 0) && (loop_count % QueuedAllocationWarningCount == 0)) { warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t" - " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : ""); + " size=%d", loop_count, size); } } @@ -539,7 +529,7 @@ // time over limit here, that is the responsibility of the heap specific // collection methods. This method decides where to attempt allocations, // and when to attempt collections, but no collection specific policy. -HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) { +HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(!Universe::heap()->is_gc_active(), "not reentrant"); @@ -553,7 +543,7 @@ // First level allocation failure, scavenge and allocate in young gen. GCCauseSetter gccs(this, GCCause::_allocation_failure); PSScavenge::invoke(); - HeapWord* result = young_gen()->allocate(size, is_tlab); + HeapWord* result = young_gen()->allocate(size); // Second level allocation failure. // Mark sweep and allocate in young generation. @@ -562,28 +552,28 @@ // Don't mark sweep twice if so. if (mark_sweep_invocation_count == total_invocations()) { invoke_full_gc(false); - result = young_gen()->allocate(size, is_tlab); + result = young_gen()->allocate(size); } } // Third level allocation failure. // After mark sweep and young generation allocation failure, // allocate in old generation. - if (result == NULL && !is_tlab) { - result = old_gen()->allocate(size, is_tlab); + if (result == NULL) { + result = old_gen()->allocate(size); } // Fourth level allocation failure. We're running out of memory. // More complete mark sweep and allocate in young generation. if (result == NULL) { invoke_full_gc(true); - result = young_gen()->allocate(size, is_tlab); + result = young_gen()->allocate(size); } // Fifth level allocation failure. // After more complete mark sweep, allocate in old generation. - if (result == NULL && !is_tlab) { - result = old_gen()->allocate(size, is_tlab); + if (result == NULL) { + result = old_gen()->allocate(size); } return result; @@ -761,7 +751,7 @@ } HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { - return young_gen()->allocate(size, true); + return young_gen()->allocate(size); } void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { @@ -901,7 +891,7 @@ } -void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) { +void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) { // Why do we need the total_collections()-filter below? if (total_collections() > 0) { if (!silent) { diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -165,12 +165,13 @@ // an excessive amount of time is being spent doing collections // and caused a NULL to be returned. If a NULL is not returned, // "gc_time_limit_was_exceeded" has an undefined meaning. + HeapWord* mem_allocate(size_t size, + bool* gc_overhead_limit_was_exceeded); - HeapWord* mem_allocate(size_t size, - bool is_noref, - bool is_tlab, - bool* gc_overhead_limit_was_exceeded); - HeapWord* failed_mem_allocate(size_t size, bool is_tlab); + // Allocation attempt(s) during a safepoint. It should never be called + // to allocate a new TLAB as this allocation might be satisfied out + // of the old generation. + HeapWord* failed_mem_allocate(size_t size); HeapWord* permanent_mem_allocate(size_t size); HeapWord* failed_permanent_mem_allocate(size_t size); @@ -194,8 +195,6 @@ inline void invoke_scavenge(); inline void invoke_full_gc(bool maximum_compaction); - size_t large_typearray_limit() { return FastAllocateSizeLimit; } - bool supports_inline_contig_alloc() const { return !UseNUMA; } HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; } @@ -253,7 +252,7 @@ virtual void gc_threads_do(ThreadClosure* tc) const; virtual void print_tracing_info() const; - void verify(bool allow_dirty, bool silent, bool /* option */); + void verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */); void print_heap_change(size_t prev_used); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -182,12 +182,12 @@ // Allocation. We report all successful allocations to the size policy // Note that the perm gen does not use this method, and should not! -HeapWord* PSOldGen::allocate(size_t word_size, bool is_tlab) { +HeapWord* PSOldGen::allocate(size_t word_size) { assert_locked_or_safepoint(Heap_lock); - HeapWord* res = allocate_noexpand(word_size, is_tlab); + HeapWord* res = allocate_noexpand(word_size); if (res == NULL) { - res = expand_and_allocate(word_size, is_tlab); + res = expand_and_allocate(word_size); } // Allocations in the old generation need to be reported @@ -199,13 +199,12 @@ return res; } -HeapWord* PSOldGen::expand_and_allocate(size_t word_size, bool is_tlab) { - assert(!is_tlab, "TLAB's are not supported in PSOldGen"); +HeapWord* PSOldGen::expand_and_allocate(size_t word_size) { expand(word_size*HeapWordSize); if (GCExpandToAllocateDelayMillis > 0) { os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); } - return allocate_noexpand(word_size, is_tlab); + return allocate_noexpand(word_size); } HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) { diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -60,9 +60,8 @@ // Used when initializing the _name field. static inline const char* select_name(); - HeapWord* allocate_noexpand(size_t word_size, bool is_tlab) { + HeapWord* allocate_noexpand(size_t word_size) { // We assume the heap lock is held here. - assert(!is_tlab, "Does not support TLAB allocation"); assert_locked_or_safepoint(Heap_lock); HeapWord* res = object_space()->allocate(word_size); if (res != NULL) { @@ -89,7 +88,7 @@ return (res == NULL) ? expand_and_cas_allocate(word_size) : res; } - HeapWord* expand_and_allocate(size_t word_size, bool is_tlab); + HeapWord* expand_and_allocate(size_t word_size); HeapWord* expand_and_cas_allocate(size_t word_size); void expand(size_t bytes); bool expand_by(size_t bytes); @@ -164,7 +163,7 @@ // Allocation. We report all successful allocations to the size policy // Note that the perm gen does not use this method, and should not! - HeapWord* allocate(size_t word_size, bool is_tlab); + HeapWord* allocate(size_t word_size); // Iteration. void oop_iterate(OopClosure* cl) { object_space()->oop_iterate(cl); } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -46,10 +46,10 @@ HeapWord* PSPermGen::allocate_permanent(size_t size) { assert_locked_or_safepoint(Heap_lock); - HeapWord* obj = allocate_noexpand(size, false); + HeapWord* obj = allocate_noexpand(size); if (obj == NULL) { - obj = expand_and_allocate(size, false); + obj = expand_and_allocate(size); } return obj; diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -157,7 +157,7 @@ } // Allocation - HeapWord* allocate(size_t word_size, bool is_tlab) { + HeapWord* allocate(size_t word_size) { HeapWord* result = eden_space()->cas_allocate(word_size); return result; } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -33,10 +33,9 @@ // The following methods are used by the parallel scavenge collector VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size, - bool is_tlab, unsigned int gc_count) : + unsigned int gc_count) : VM_GC_Operation(gc_count, GCCause::_allocation_failure), _size(size), - _is_tlab(is_tlab), _result(NULL) { } @@ -48,7 +47,7 @@ assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap"); GCCauseSetter gccs(heap, _gc_cause); - _result = heap->failed_mem_allocate(_size, _is_tlab); + _result = heap->failed_mem_allocate(_size); if (_result == NULL && GC_locker::is_active_and_needs_gc()) { set_gc_locked(); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,12 +32,10 @@ class VM_ParallelGCFailedAllocation: public VM_GC_Operation { private: size_t _size; - bool _is_tlab; HeapWord* _result; public: - VM_ParallelGCFailedAllocation(size_t size, bool is_tlab, - unsigned int gc_count); + VM_ParallelGCFailedAllocation(size_t size, unsigned int gc_count); virtual VMOp_Type type() const { return VMOp_ParallelGCFailedAllocation; diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/shared/allocationStats.hpp --- a/src/share/vm/gc_implementation/shared/allocationStats.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/shared/allocationStats.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -99,14 +99,16 @@ // vulnerable to noisy glitches. In such cases, we // ignore the current sample and use currently available // historical estimates. - // XXX NEEDS TO BE FIXED - // assert(prevSweep() + splitBirths() >= splitDeaths() + (ssize_t)count, "Conservation Principle"); - // ^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - // "Total Stock" "Not used at this block size" + assert(prevSweep() + splitBirths() + coalBirths() // "Total Production Stock" + >= splitDeaths() + coalDeaths() + (ssize_t)count, // "Current stock + depletion" + "Conservation Principle"); if (inter_sweep_current > _threshold) { - ssize_t demand = prevSweep() - (ssize_t)count + splitBirths() - splitDeaths(); - // XXX NEEDS TO BE FIXED - // assert(demand >= 0, "Demand should be non-negative"); + ssize_t demand = prevSweep() - (ssize_t)count + splitBirths() + coalBirths() + - splitDeaths() - coalDeaths(); + assert(demand >= 0, + err_msg("Demand (" SSIZE_FORMAT ") should be non-negative for " + PTR_FORMAT " (size=" SIZE_FORMAT ")", + demand, this, count)); // Defensive: adjust for imprecision in event counting if (demand < 0) { demand = 0; diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/shared/concurrentGCThread.cpp --- a/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -43,17 +43,6 @@ _sts.initialize(); }; -void ConcurrentGCThread::stopWorldAndDo(VoidClosure* op) { - MutexLockerEx x(Heap_lock, - Mutex::_no_safepoint_check_flag); - // warning("CGC: about to try stopping world"); - SafepointSynchronize::begin(); - // warning("CGC: successfully stopped world"); - op->do_void(); - SafepointSynchronize::end(); - // warning("CGC: successfully restarted world"); -} - void ConcurrentGCThread::safepoint_synchronize() { _sts.suspend_all(); } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_implementation/shared/concurrentGCThread.hpp --- a/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -95,8 +95,6 @@ static int set_CGC_flag(int b) { return _CGC_flag |= b; } static int reset_CGC_flag(int b) { return _CGC_flag &= ~b; } - void stopWorldAndDo(VoidClosure* op); - // All instances share this one set. static SuspendibleThreadSet _sts; diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_interface/collectedHeap.hpp --- a/src/share/vm/gc_interface/collectedHeap.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,7 +93,7 @@ // pure virtual. void pre_initialize(); - // Create a new tlab + // Create a new tlab. All TLAB allocations must go through this. virtual HeapWord* allocate_new_tlab(size_t size); // Accumulate statistics on all tlabs. @@ -109,11 +109,11 @@ // Allocate an uninitialized block of the given size, or returns NULL if // this is impossible. - inline static HeapWord* common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS); + inline static HeapWord* common_mem_allocate_noinit(size_t size, TRAPS); // Like allocate_init, but the block returned by a successful allocation // is guaranteed initialized to zeros. - inline static HeapWord* common_mem_allocate_init(size_t size, bool is_noref, TRAPS); + inline static HeapWord* common_mem_allocate_init(size_t size, TRAPS); // Same as common_mem version, except memory is allocated in the permanent area // If there is no permanent area, revert to common_mem_allocate_noinit @@ -322,7 +322,6 @@ // General obj/array allocation facilities. inline static oop obj_allocate(KlassHandle klass, int size, TRAPS); inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS); - inline static oop large_typearray_allocate(KlassHandle klass, int size, int length, TRAPS); // Special obj/array allocation facilities. // Some heaps may want to manage "permanent" data uniquely. These default @@ -345,16 +344,12 @@ // Raw memory allocation facilities // The obj and array allocate methods are covers for these methods. // The permanent allocation method should default to mem_allocate if - // permanent memory isn't supported. + // permanent memory isn't supported. mem_allocate() should never be + // called to allocate TLABs, only individual objects. virtual HeapWord* mem_allocate(size_t size, - bool is_noref, - bool is_tlab, bool* gc_overhead_limit_was_exceeded) = 0; virtual HeapWord* permanent_mem_allocate(size_t size) = 0; - // The boundary between a "large" and "small" array of primitives, in words. - virtual size_t large_typearray_limit() = 0; - // Utilities for turning raw memory into filler objects. // // min_fill_size() is the smallest region that can be filled. @@ -606,7 +601,7 @@ virtual void print_tracing_info() const = 0; // Heap verification - virtual void verify(bool allow_dirty, bool silent, bool option) = 0; + virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0; // Non product verification and debugging. #ifndef PRODUCT diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/gc_interface/collectedHeap.inline.hpp --- a/src/share/vm/gc_interface/collectedHeap.inline.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -122,7 +122,7 @@ post_allocation_notify(klass, (oop)obj); } -HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS) { +HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, TRAPS) { // Clear unhandled oops for memory allocation. Memory allocation might // not take out a lock if from tlab, so clear here. @@ -133,7 +133,6 @@ return NULL; // caller does a CHECK_0 too } - // We may want to update this, is_noref objects might not be allocated in TLABs. HeapWord* result = NULL; if (UseTLAB) { result = CollectedHeap::allocate_from_tlab(THREAD, size); @@ -145,8 +144,6 @@ } bool gc_overhead_limit_was_exceeded = false; result = Universe::heap()->mem_allocate(size, - is_noref, - false, &gc_overhead_limit_was_exceeded); if (result != NULL) { NOT_PRODUCT(Universe::heap()-> @@ -183,8 +180,8 @@ } } -HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, bool is_noref, TRAPS) { - HeapWord* obj = common_mem_allocate_noinit(size, is_noref, CHECK_NULL); +HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, TRAPS) { + HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL); init_obj(obj, size); return obj; } @@ -255,7 +252,7 @@ debug_only(check_for_valid_allocation_state()); assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); assert(size >= 0, "int won't convert to size_t"); - HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL); + HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL); post_allocation_setup_obj(klass, obj, size); NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); return (oop)obj; @@ -268,20 +265,7 @@ debug_only(check_for_valid_allocation_state()); assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); assert(size >= 0, "int won't convert to size_t"); - HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL); - post_allocation_setup_array(klass, obj, size, length); - NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); - return (oop)obj; -} - -oop CollectedHeap::large_typearray_allocate(KlassHandle klass, - int size, - int length, - TRAPS) { - debug_only(check_for_valid_allocation_state()); - assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); - assert(size >= 0, "int won't convert to size_t"); - HeapWord* obj = common_mem_allocate_init(size, true, CHECK_NULL); + HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL); post_allocation_setup_array(klass, obj, size, length); NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); return (oop)obj; diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/interpreter/interpreterRuntime.cpp --- a/src/share/vm/interpreter/interpreterRuntime.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -362,25 +362,6 @@ THROW_MSG(vmSymbols::java_lang_ClassCastException(), message); IRT_END -// required can be either a MethodType, or a Class (for a single argument) -// actual (if not null) can be either a MethodHandle, or an arbitrary value (for a single argument) -IRT_ENTRY(void, InterpreterRuntime::throw_WrongMethodTypeException(JavaThread* thread, - oopDesc* required, - oopDesc* actual)) { - ResourceMark rm(thread); - char* message = SharedRuntime::generate_wrong_method_type_message(thread, required, actual); - - if (ProfileTraps) { - note_trap(thread, Deoptimization::Reason_constraint, CHECK); - } - - // create exception - THROW_MSG(vmSymbols::java_lang_invoke_WrongMethodTypeException(), message); -} -IRT_END - - - // exception_handler_for_exception(...) returns the continuation address, // the exception oop (via TLS) and sets the bci/bcp for the continuation. // The exception oop is returned to make sure it is preserved over GC (it @@ -863,7 +844,7 @@ const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci; const int bci = branch_bcp != NULL ? method->bci_from(fr.interpreter_frame_bcp()) : InvocationEntryBci; - nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, thread); + nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, NULL, thread); if (osr_nm != NULL) { // We may need to do on-stack replacement which requires that no diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/interpreter/interpreterRuntime.hpp --- a/src/share/vm/interpreter/interpreterRuntime.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/interpreter/interpreterRuntime.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -98,7 +98,6 @@ static void throw_StackOverflowError(JavaThread* thread); static void throw_ArrayIndexOutOfBoundsException(JavaThread* thread, char* name, jint index); static void throw_ClassCastException(JavaThread* thread, oopDesc* obj); - static void throw_WrongMethodTypeException(JavaThread* thread, oopDesc* mtype = NULL, oopDesc* mhandle = NULL); static void create_exception(JavaThread* thread, char* name, char* message); static void create_klass_exception(JavaThread* thread, char* name, oopDesc* obj); static address exception_handler_for_exception(JavaThread* thread, oopDesc* exception); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/interpreter/linkResolver.cpp --- a/src/share/vm/interpreter/linkResolver.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/interpreter/linkResolver.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -294,6 +294,16 @@ Symbol* method_signature = pool->signature_ref_at(index); KlassHandle current_klass(THREAD, pool->pool_holder()); + if (pool->has_preresolution() + || (resolved_klass() == SystemDictionary::MethodHandle_klass() && + methodOopDesc::is_method_handle_invoke_name(method_name))) { + methodOop result_oop = constantPoolOopDesc::method_at_if_loaded(pool, index); + if (result_oop != NULL) { + resolved_method = methodHandle(THREAD, result_oop); + return; + } + } + resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK); } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/interpreter/templateInterpreter.cpp --- a/src/share/vm/interpreter/templateInterpreter.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/interpreter/templateInterpreter.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -171,7 +171,6 @@ address TemplateInterpreter::_throw_ArrayStoreException_entry = NULL; address TemplateInterpreter::_throw_ArithmeticException_entry = NULL; address TemplateInterpreter::_throw_ClassCastException_entry = NULL; -address TemplateInterpreter::_throw_WrongMethodType_entry = NULL; address TemplateInterpreter::_throw_NullPointerException_entry = NULL; address TemplateInterpreter::_throw_StackOverflowError_entry = NULL; address TemplateInterpreter::_throw_exception_entry = NULL; @@ -346,7 +345,6 @@ Interpreter::_throw_ArrayStoreException_entry = generate_klass_exception_handler("java/lang/ArrayStoreException" ); Interpreter::_throw_ArithmeticException_entry = generate_exception_handler("java/lang/ArithmeticException" , "/ by zero"); Interpreter::_throw_ClassCastException_entry = generate_ClassCastException_handler(); - Interpreter::_throw_WrongMethodType_entry = generate_WrongMethodType_handler(); Interpreter::_throw_NullPointerException_entry = generate_exception_handler("java/lang/NullPointerException" , NULL ); Interpreter::_throw_StackOverflowError_entry = generate_StackOverflowError_handler(); } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/interpreter/templateInterpreterGenerator.hpp --- a/src/share/vm/interpreter/templateInterpreterGenerator.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/interpreter/templateInterpreterGenerator.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -51,7 +51,6 @@ } address generate_exception_handler_common(const char* name, const char* message, bool pass_oop); address generate_ClassCastException_handler(); - address generate_WrongMethodType_handler(); address generate_ArrayIndexOutOfBounds_handler(const char* name); address generate_continuation_for(TosState state); address generate_return_entry_for(TosState state, int step); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/memory/collectorPolicy.cpp --- a/src/share/vm/memory/collectorPolicy.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/memory/collectorPolicy.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -750,10 +750,6 @@ return NULL; } -size_t GenCollectorPolicy::large_typearray_limit() { - return FastAllocateSizeLimit; -} - // Return true if any of the following is true: // . the allocation won't fit into the current young gen heap // . gc locker is occupied (jni critical section) diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/memory/collectorPolicy.hpp --- a/src/share/vm/memory/collectorPolicy.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/memory/collectorPolicy.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -280,9 +280,6 @@ HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab); - // The size that defines a "large array". - virtual size_t large_typearray_limit(); - // Adaptive size policy virtual void initialize_size_policy(size_t init_eden_size, size_t init_promo_size, diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/memory/genCollectedHeap.cpp --- a/src/share/vm/memory/genCollectedHeap.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/memory/genCollectedHeap.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -434,11 +434,9 @@ } HeapWord* GenCollectedHeap::mem_allocate(size_t size, - bool is_large_noref, - bool is_tlab, bool* gc_overhead_limit_was_exceeded) { return collector_policy()->mem_allocate_work(size, - is_tlab, + false /* is_tlab */, gc_overhead_limit_was_exceeded); } @@ -1120,11 +1118,9 @@ HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { bool gc_overhead_limit_was_exceeded; - HeapWord* result = mem_allocate(size /* size */, - false /* is_large_noref */, - true /* is_tlab */, - &gc_overhead_limit_was_exceeded); - return result; + return collector_policy()->mem_allocate_work(size /* size */, + true /* is_tlab */, + &gc_overhead_limit_was_exceeded); } // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size @@ -1179,10 +1175,6 @@ } } -size_t GenCollectedHeap::large_typearray_limit() { - return gen_policy()->large_typearray_limit(); -} - class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { void do_generation(Generation* gen) { gen->prepare_for_verify(); @@ -1260,7 +1252,7 @@ return _gens[level]->gc_stats(); } -void GenCollectedHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) { +void GenCollectedHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) { if (!silent) { gclog_or_tty->print("permgen "); } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/memory/genCollectedHeap.hpp --- a/src/share/vm/memory/genCollectedHeap.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/memory/genCollectedHeap.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -161,8 +161,6 @@ size_t max_capacity() const; HeapWord* mem_allocate(size_t size, - bool is_large_noref, - bool is_tlab, bool* gc_overhead_limit_was_exceeded); // We may support a shared contiguous allocation area, if the youngest @@ -315,8 +313,6 @@ // contributed as it needs. void release_scratch(); - size_t large_typearray_limit(); - // Ensure parsability: override virtual void ensure_parsability(bool retire_tlabs); @@ -361,7 +357,7 @@ void prepare_for_verify(); // Override. - void verify(bool allow_dirty, bool silent, bool /* option */); + void verify(bool allow_dirty, bool silent, VerifyOption option); // Override. void print() const; diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/memory/universe.cpp --- a/src/share/vm/memory/universe.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/memory/universe.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -1278,7 +1278,7 @@ st->print_cr("}"); } -void Universe::verify(bool allow_dirty, bool silent, bool option) { +void Universe::verify(bool allow_dirty, bool silent, VerifyOption option) { if (SharedSkipVerify) { return; } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/memory/universe.hpp --- a/src/share/vm/memory/universe.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/memory/universe.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -109,6 +109,14 @@ bool _use_implicit_null_checks; }; +enum VerifyOption { + VerifyOption_Default = 0, + + // G1 + VerifyOption_G1UsePrevMarking = VerifyOption_Default, + VerifyOption_G1UseNextMarking = VerifyOption_G1UsePrevMarking + 1, + VerifyOption_G1UseMarkWord = VerifyOption_G1UseNextMarking + 1 +}; class Universe: AllStatic { // Ugh. Universe is much too friendly. @@ -404,7 +412,8 @@ // Debugging static bool verify_in_progress() { return _verify_in_progress; } - static void verify(bool allow_dirty = true, bool silent = false, bool option = true); + static void verify(bool allow_dirty = true, bool silent = false, + VerifyOption option = VerifyOption_Default ); static int verify_count() { return _verify_count; } static void print(); static void print_on(outputStream* st); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/oops/constantPoolKlass.cpp --- a/src/share/vm/oops/constantPoolKlass.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/oops/constantPoolKlass.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -310,10 +310,14 @@ st->print(" - flags: 0x%x", cp->flags()); if (cp->has_pseudo_string()) st->print(" has_pseudo_string"); if (cp->has_invokedynamic()) st->print(" has_invokedynamic"); + if (cp->has_preresolution()) st->print(" has_preresolution"); st->cr(); } + if (cp->pool_holder() != NULL) { + bool extra = (instanceKlass::cast(cp->pool_holder())->constants() != cp); + st->print_cr(" - holder: " INTPTR_FORMAT "%s", cp->pool_holder(), (extra? " (extra)" : "")); + } st->print_cr(" - cache: " INTPTR_FORMAT, cp->cache()); - for (int index = 1; index < cp->length(); index++) { // Index 0 is unused st->print(" - %3d : ", index); cp->tag_at(index).print_on(st); @@ -414,10 +418,15 @@ st->print("constant pool [%d]", cp->length()); if (cp->has_pseudo_string()) st->print("/pseudo_string"); if (cp->has_invokedynamic()) st->print("/invokedynamic"); + if (cp->has_preresolution()) st->print("/preresolution"); if (cp->operands() != NULL) st->print("/operands[%d]", cp->operands()->length()); cp->print_address_on(st); st->print(" for "); cp->pool_holder()->print_value_on(st); + if (cp->pool_holder() != NULL) { + bool extra = (instanceKlass::cast(cp->pool_holder())->constants() != cp); + if (extra) st->print(" (extra)"); + } if (cp->cache() != NULL) { st->print(" cache=" PTR_FORMAT, cp->cache()); } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/oops/constantPoolOop.cpp --- a/src/share/vm/oops/constantPoolOop.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/oops/constantPoolOop.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -266,6 +266,29 @@ } +methodOop constantPoolOopDesc::method_at_if_loaded(constantPoolHandle cpool, + int which, Bytecodes::Code invoke_code) { + assert(!constantPoolCacheOopDesc::is_secondary_index(which), "no indy instruction here"); + if (cpool->cache() == NULL) return false; // nothing to load yet + int cache_index = which - CPCACHE_INDEX_TAG; + if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) { + if (PrintMiscellaneous && (Verbose||WizardMode)) { + tty->print_cr("bad operand %d for %d in:", which, invoke_code); cpool->print(); + } + return NULL; + } + ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); + if (invoke_code != Bytecodes::_illegal) + return e->get_method_if_resolved(invoke_code, cpool); + Bytecodes::Code bc; + if ((bc = e->bytecode_1()) != (Bytecodes::Code)0) + return e->get_method_if_resolved(bc, cpool); + if ((bc = e->bytecode_2()) != (Bytecodes::Code)0) + return e->get_method_if_resolved(bc, cpool); + return NULL; +} + + Symbol* constantPoolOopDesc::impl_name_ref_at(int which, bool uncached) { int name_index = name_ref_index_at(impl_name_and_type_ref_index_at(which, uncached)); return symbol_at(name_index); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/oops/constantPoolOop.hpp --- a/src/share/vm/oops/constantPoolOop.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/oops/constantPoolOop.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -103,7 +103,8 @@ enum FlagBit { FB_has_invokedynamic = 1, - FB_has_pseudo_string = 2 + FB_has_pseudo_string = 2, + FB_has_preresolution = 3 }; int flags() const { return _flags; } @@ -179,8 +180,10 @@ bool has_pseudo_string() const { return flag_at(FB_has_pseudo_string); } bool has_invokedynamic() const { return flag_at(FB_has_invokedynamic); } + bool has_preresolution() const { return flag_at(FB_has_preresolution); } void set_pseudo_string() { set_flag_at(FB_has_pseudo_string); } void set_invokedynamic() { set_flag_at(FB_has_invokedynamic); } + void set_preresolution() { set_flag_at(FB_has_preresolution); } // Klass holding pool klassOop pool_holder() const { return _pool_holder; } @@ -663,6 +666,8 @@ friend class SystemDictionary; // Used by compiler to prevent classloading. + static methodOop method_at_if_loaded (constantPoolHandle this_oop, int which, + Bytecodes::Code bc = Bytecodes::_illegal); static klassOop klass_at_if_loaded (constantPoolHandle this_oop, int which); static klassOop klass_ref_at_if_loaded (constantPoolHandle this_oop, int which); // Same as above - but does LinkResolving. diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/oops/cpCacheOop.cpp --- a/src/share/vm/oops/cpCacheOop.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/oops/cpCacheOop.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -295,6 +295,50 @@ } +methodOop ConstantPoolCacheEntry::get_method_if_resolved(Bytecodes::Code invoke_code, constantPoolHandle cpool) { + assert(invoke_code > (Bytecodes::Code)0, "bad query"); + if (is_secondary_entry()) { + return cpool->cache()->entry_at(main_entry_index())->get_method_if_resolved(invoke_code, cpool); + } + // Decode the action of set_method and set_interface_call + if (bytecode_1() == invoke_code) { + oop f1 = _f1; + if (f1 != NULL) { + switch (invoke_code) { + case Bytecodes::_invokeinterface: + assert(f1->is_klass(), ""); + return klassItable::method_for_itable_index(klassOop(f1), (int) f2()); + case Bytecodes::_invokestatic: + case Bytecodes::_invokespecial: + assert(f1->is_method(), ""); + return methodOop(f1); + } + } + } + if (bytecode_2() == invoke_code) { + switch (invoke_code) { + case Bytecodes::_invokevirtual: + if (is_vfinal()) { + // invokevirtual + methodOop m = methodOop((intptr_t) f2()); + assert(m->is_method(), ""); + return m; + } else { + int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index()); + if (cpool->tag_at(holder_index).is_klass()) { + klassOop klass = cpool->resolved_klass_at(holder_index); + if (!Klass::cast(klass)->oop_is_instance()) + klass = SystemDictionary::Object_klass(); + return instanceKlass::cast(klass)->method_at_vtable((int) f2()); + } + } + } + } + return NULL; +} + + + class LocalOopClosure: public OopClosure { private: void (*_f)(oop*); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/oops/cpCacheOop.hpp --- a/src/share/vm/oops/cpCacheOop.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/oops/cpCacheOop.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -194,6 +194,8 @@ methodHandle signature_invoker // determines signature information ); + methodOop get_method_if_resolved(Bytecodes::Code invoke_code, constantPoolHandle cpool); + // For JVM_CONSTANT_InvokeDynamic cache entries: void initialize_bootstrap_method_index_in_cache(int bsm_cache_index); int bootstrap_method_index_in_cache(); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/oops/methodOop.cpp --- a/src/share/vm/oops/methodOop.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/oops/methodOop.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -49,6 +49,7 @@ #include "runtime/relocator.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/signature.hpp" +#include "utilities/quickSort.hpp" #include "utilities/xmlstream.hpp" @@ -928,14 +929,40 @@ name->increment_refcount(); signature->increment_refcount(); + // record non-BCP method types in the constant pool + GrowableArray* extra_klasses = NULL; + for (int i = -1, len = java_lang_invoke_MethodType::ptype_count(method_type()); i < len; i++) { + oop ptype = (i == -1 + ? java_lang_invoke_MethodType::rtype(method_type()) + : java_lang_invoke_MethodType::ptype(method_type(), i)); + klassOop klass = check_non_bcp_klass(java_lang_Class::as_klassOop(ptype)); + if (klass != NULL) { + if (extra_klasses == NULL) + extra_klasses = new GrowableArray(len+1); + bool dup = false; + for (int j = 0; j < extra_klasses->length(); j++) { + if (extra_klasses->at(j) == klass) { dup = true; break; } + } + if (!dup) + extra_klasses->append(KlassHandle(THREAD, klass)); + } + } + + int extra_klass_count = (extra_klasses == NULL ? 0 : extra_klasses->length()); + int cp_length = _imcp_limit + extra_klass_count; constantPoolHandle cp; { - constantPoolOop cp_oop = oopFactory::new_constantPool(_imcp_limit, IsSafeConc, CHECK_(empty)); + constantPoolOop cp_oop = oopFactory::new_constantPool(cp_length, IsSafeConc, CHECK_(empty)); cp = constantPoolHandle(THREAD, cp_oop); } cp->symbol_at_put(_imcp_invoke_name, name); cp->symbol_at_put(_imcp_invoke_signature, signature); cp->string_at_put(_imcp_method_type_value, Universe::the_null_string()); + for (int j = 0; j < extra_klass_count; j++) { + KlassHandle klass = extra_klasses->at(j); + cp->klass_at_put(_imcp_limit + j, klass()); + } + cp->set_preresolution(); cp->set_pool_holder(holder()); // set up the fancy stuff: @@ -984,6 +1011,14 @@ return m; } +klassOop methodOopDesc::check_non_bcp_klass(klassOop klass) { + if (klass != NULL && Klass::cast(klass)->class_loader() != NULL) { + if (Klass::cast(klass)->oop_is_objArray()) + klass = objArrayKlass::cast(klass)->bottom_klass(); + return klass; + } + return NULL; +} methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, @@ -1207,41 +1242,6 @@ if (WizardMode) signature()->print_symbol_on(st); } - -extern "C" { - static int method_compare(methodOop* a, methodOop* b) { - return (*a)->name()->fast_compare((*b)->name()); - } - - // Prevent qsort from reordering a previous valid sort by - // considering the address of the methodOops if two methods - // would otherwise compare as equal. Required to preserve - // optimal access order in the shared archive. Slower than - // method_compare, only used for shared archive creation. - static int method_compare_idempotent(methodOop* a, methodOop* b) { - int i = method_compare(a, b); - if (i != 0) return i; - return ( a < b ? -1 : (a == b ? 0 : 1)); - } - - // We implement special compare versions for narrow oops to avoid - // testing for UseCompressedOops on every comparison. - static int method_compare_narrow(narrowOop* a, narrowOop* b) { - methodOop m = (methodOop)oopDesc::load_decode_heap_oop(a); - methodOop n = (methodOop)oopDesc::load_decode_heap_oop(b); - return m->name()->fast_compare(n->name()); - } - - static int method_compare_narrow_idempotent(narrowOop* a, narrowOop* b) { - int i = method_compare_narrow(a, b); - if (i != 0) return i; - return ( a < b ? -1 : (a == b ? 0 : 1)); - } - - typedef int (*compareFn)(const void*, const void*); -} - - // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array static void reorder_based_on_method_index(objArrayOop methods, objArrayOop annotations, @@ -1265,6 +1265,14 @@ } } +// Comparer for sorting an object array containing +// methodOops. +template +static int method_comparator(T a, T b) { + methodOop m = (methodOop)oopDesc::decode_heap_oop_not_null(a); + methodOop n = (methodOop)oopDesc::decode_heap_oop_not_null(b); + return m->name()->fast_compare(n->name()); +} // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array void methodOopDesc::sort_methods(objArrayOop methods, @@ -1287,30 +1295,19 @@ m->set_method_idnum(i); } } - - // Use a simple bubble sort for small number of methods since - // qsort requires a functional pointer call for each comparison. - if (length < 8) { - bool sorted = true; - for (int i=length-1; i>0; i--) { - for (int j=0; jobj_at(j); - methodOop m2 = (methodOop)methods->obj_at(j+1); - if ((uintptr_t)m1->name() > (uintptr_t)m2->name()) { - methods->obj_at_put(j, m2); - methods->obj_at_put(j+1, m1); - sorted = false; - } - } - if (sorted) break; - sorted = true; + { + No_Safepoint_Verifier nsv; + if (UseCompressedOops) { + QuickSort::sort((narrowOop*)(methods->base()), length, method_comparator, idempotent); + } else { + QuickSort::sort((oop*)(methods->base()), length, method_comparator, idempotent); } - } else { - compareFn compare = - (UseCompressedOops ? - (compareFn) (idempotent ? method_compare_narrow_idempotent : method_compare_narrow): - (compareFn) (idempotent ? method_compare_idempotent : method_compare)); - qsort(methods->base(), length, heapOopSize, compare); + if (UseConcMarkSweepGC) { + // For CMS we need to dirty the cards for the array + BarrierSet* bs = Universe::heap()->barrier_set(); + assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); + bs->write_ref_array(methods->base(), length); + } } // Sort annotations if necessary diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/oops/methodOop.hpp --- a/src/share/vm/oops/methodOop.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/oops/methodOop.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -600,6 +600,7 @@ Symbol* signature, //anything at all Handle method_type, TRAPS); + static klassOop check_non_bcp_klass(klassOop klass); // these operate only on invoke methods: oop method_handle_type() const; static jint* method_type_offsets_chain(); // series of pointer-offsets, terminated by -1 diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/oops/typeArrayKlass.cpp --- a/src/share/vm/oops/typeArrayKlass.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/oops/typeArrayKlass.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -84,11 +84,7 @@ KlassHandle h_k(THREAD, as_klassOop()); typeArrayOop t; CollectedHeap* ch = Universe::heap(); - if (size < ch->large_typearray_limit()) { - t = (typeArrayOop)CollectedHeap::array_allocate(h_k, (int)size, length, CHECK_NULL); - } else { - t = (typeArrayOop)CollectedHeap::large_typearray_allocate(h_k, (int)size, length, CHECK_NULL); - } + t = (typeArrayOop)CollectedHeap::array_allocate(h_k, (int)size, length, CHECK_NULL); assert(t->is_parsable(), "Don't publish unless parsable"); return t; } else { diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/opto/bytecodeInfo.cpp --- a/src/share/vm/opto/bytecodeInfo.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/opto/bytecodeInfo.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -35,14 +35,16 @@ //============================================================================= //------------------------------InlineTree------------------------------------- -InlineTree::InlineTree( Compile* c, - const InlineTree *caller_tree, ciMethod* callee, - JVMState* caller_jvms, int caller_bci, - float site_invoke_ratio, int site_depth_adjust) -: C(c), _caller_jvms(caller_jvms), - _caller_tree((InlineTree*)caller_tree), - _method(callee), _site_invoke_ratio(site_invoke_ratio), - _site_depth_adjust(site_depth_adjust), +InlineTree::InlineTree(Compile* c, + const InlineTree *caller_tree, ciMethod* callee, + JVMState* caller_jvms, int caller_bci, + float site_invoke_ratio, int max_inline_level) : + C(c), + _caller_jvms(caller_jvms), + _caller_tree((InlineTree*) caller_tree), + _method(callee), + _site_invoke_ratio(site_invoke_ratio), + _max_inline_level(max_inline_level), _count_inline_bcs(method()->code_size()) { NOT_PRODUCT(_count_inlines = 0;) @@ -66,10 +68,13 @@ } InlineTree::InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, - float site_invoke_ratio, int site_depth_adjust) -: C(c), _caller_jvms(caller_jvms), _caller_tree(NULL), - _method(callee_method), _site_invoke_ratio(site_invoke_ratio), - _site_depth_adjust(site_depth_adjust), + float site_invoke_ratio, int max_inline_level) : + C(c), + _caller_jvms(caller_jvms), + _caller_tree(NULL), + _method(callee_method), + _site_invoke_ratio(site_invoke_ratio), + _max_inline_level(max_inline_level), _count_inline_bcs(method()->code_size()) { NOT_PRODUCT(_count_inlines = 0;) @@ -94,7 +99,7 @@ if(callee_method->should_inline()) { *wci_result = *(WarmCallInfo::always_hot()); if (PrintInlining && Verbose) { - CompileTask::print_inline_indent(inline_depth()); + CompileTask::print_inline_indent(inline_level()); tty->print_cr("Inlined method is hot: "); } return NULL; @@ -109,7 +114,7 @@ size < InlineThrowMaxSize ) { wci_result->set_profit(wci_result->profit() * 100); if (PrintInlining && Verbose) { - CompileTask::print_inline_indent(inline_depth()); + CompileTask::print_inline_indent(inline_level()); tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count()); } return NULL; @@ -149,9 +154,9 @@ max_inline_size = C->freq_inline_size(); if (size <= max_inline_size && TraceFrequencyInlining) { - CompileTask::print_inline_indent(inline_depth()); + CompileTask::print_inline_indent(inline_level()); tty->print_cr("Inlined frequent method (freq=%d count=%d):", freq, call_site_count); - CompileTask::print_inline_indent(inline_depth()); + CompileTask::print_inline_indent(inline_level()); callee_method->print(); tty->cr(); } @@ -322,7 +327,7 @@ if (!C->do_inlining() && InlineAccessors) { return "not an accessor"; } - if( inline_depth() > MaxInlineLevel ) { + if (inline_level() > _max_inline_level) { return "inlining too deep"; } @@ -392,7 +397,7 @@ //------------------------------print_inlining--------------------------------- // Really, the failure_msg can be a success message also. void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const { - CompileTask::print_inlining(callee_method, inline_depth(), caller_bci, failure_msg ? failure_msg : "inline"); + CompileTask::print_inlining(callee_method, inline_level(), caller_bci, failure_msg ? failure_msg : "inline"); if (callee_method == NULL) tty->print(" callee not monotonic or profiled"); if (Verbose && callee_method) { const InlineTree *top = this; @@ -500,25 +505,25 @@ if (old_ilt != NULL) { return old_ilt; } - int new_depth_adjust = 0; + int max_inline_level_adjust = 0; if (caller_jvms->method() != NULL) { if (caller_jvms->method()->is_method_handle_adapter()) - new_depth_adjust -= 1; // don't count actions in MH or indy adapter frames + max_inline_level_adjust += 1; // don't count actions in MH or indy adapter frames else if (callee_method->is_method_handle_invoke()) { - new_depth_adjust -= 1; // don't count method handle calls from java.lang.invoke implem + max_inline_level_adjust += 1; // don't count method handle calls from java.lang.invoke implem } - if (new_depth_adjust != 0 && PrintInlining) { - CompileTask::print_inline_indent(inline_depth()); + if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) { + CompileTask::print_inline_indent(inline_level()); tty->print_cr(" \\-> discounting inline depth"); } - if (new_depth_adjust != 0 && C->log()) { + if (max_inline_level_adjust != 0 && C->log()) { int id1 = C->log()->identify(caller_jvms->method()); int id2 = C->log()->identify(callee_method); - C->log()->elem("inline_depth_discount caller='%d' callee='%d'", id1, id2); + C->log()->elem("inline_level_discount caller='%d' callee='%d'", id1, id2); } } - InlineTree *ilt = new InlineTree(C, this, callee_method, caller_jvms, caller_bci, recur_frequency, _site_depth_adjust + new_depth_adjust); - _subtrees.append( ilt ); + InlineTree* ilt = new InlineTree(C, this, callee_method, caller_jvms, caller_bci, recur_frequency, _max_inline_level + max_inline_level_adjust); + _subtrees.append(ilt); NOT_PRODUCT( _count_inlines += 1; ) @@ -543,7 +548,7 @@ Compile* C = Compile::current(); // Root of inline tree - InlineTree *ilt = new InlineTree(C, NULL, C->method(), NULL, -1, 1.0F, 0); + InlineTree* ilt = new InlineTree(C, NULL, C->method(), NULL, -1, 1.0F, MaxInlineLevel); return ilt; } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/opto/compile.cpp --- a/src/share/vm/opto/compile.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/opto/compile.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -1206,11 +1206,7 @@ // Make sure the Bottom and NotNull variants alias the same. // Also, make sure exact and non-exact variants alias the same. if( ptr == TypePtr::NotNull || ta->klass_is_exact() ) { - if (ta->const_oop()) { - tj = ta = TypeAryPtr::make(TypePtr::Constant,ta->const_oop(),ta->ary(),ta->klass(),false,offset); - } else { - tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset); - } + tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset); } } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/opto/doCall.cpp --- a/src/share/vm/opto/doCall.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/opto/doCall.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -183,7 +183,7 @@ // TO DO: When UseOldInlining is removed, copy the ILT code elsewhere. float site_invoke_ratio = prof_factor; // Note: ilt is for the root of this parse, not the present call site. - ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, 0); + ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, MaxInlineLevel); } WarmCallInfo scratch_ci; if (!UseOldInlining) diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/opto/loopTransform.cpp --- a/src/share/vm/opto/loopTransform.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/opto/loopTransform.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -83,7 +83,7 @@ #ifdef ASSERT BoolTest::mask bt = cl->loopexit()->test_trip(); assert(bt == BoolTest::lt || bt == BoolTest::gt || - (bt == BoolTest::ne && !LoopLimitCheck), "canonical test is expected"); + bt == BoolTest::ne, "canonical test is expected"); #endif Node* init_n = cl->init_trip(); @@ -824,13 +824,23 @@ //------------------------------clone_up_backedge_goo-------------------------- // If Node n lives in the back_ctrl block and cannot float, we clone a private // version of n in preheader_ctrl block and return that, otherwise return n. -Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n ) { +Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ) { if( get_ctrl(n) != back_ctrl ) return n; + // Only visit once + if (visited.test_set(n->_idx)) { + Node *x = clones.find(n->_idx); + if (x != NULL) + return x; + return n; + } + Node *x = NULL; // If required, a clone of 'n' // Check for 'n' being pinned in the backedge. if( n->in(0) && n->in(0) == back_ctrl ) { + assert(clones.find(n->_idx) == NULL, "dead loop"); x = n->clone(); // Clone a copy of 'n' to preheader + clones.push(x, n->_idx); x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader } @@ -838,10 +848,13 @@ // If there are no changes we can just return 'n', otherwise // we need to clone a private copy and change it. for( uint i = 1; i < n->req(); i++ ) { - Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i) ); + Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i), visited, clones ); if( g != n->in(i) ) { - if( !x ) + if( !x ) { + assert(clones.find(n->_idx) == NULL, "dead loop"); x = n->clone(); + clones.push(x, n->_idx); + } x->set_req(i, g); } } @@ -960,6 +973,9 @@ post_head->set_req(LoopNode::EntryControl, zer_taken); set_idom(post_head, zer_taken, dd_main_exit); + Arena *a = Thread::current()->resource_area(); + VectorSet visited(a); + Node_Stack clones(a, main_head->back_control()->outcnt()); // Step A3: Make the fall-in values to the post-loop come from the // fall-out values of the main-loop. for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) { @@ -968,7 +984,8 @@ Node *post_phi = old_new[main_phi->_idx]; Node *fallmain = clone_up_backedge_goo(main_head->back_control(), post_head->init_control(), - main_phi->in(LoopNode::LoopBackControl)); + main_phi->in(LoopNode::LoopBackControl), + visited, clones); _igvn.hash_delete(post_phi); post_phi->set_req( LoopNode::EntryControl, fallmain ); } @@ -1032,6 +1049,8 @@ main_head->set_req(LoopNode::EntryControl, min_taken); set_idom(main_head, min_taken, dd_main_head); + visited.Clear(); + clones.clear(); // Step B3: Make the fall-in values to the main-loop come from the // fall-out values of the pre-loop. for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) { @@ -1040,7 +1059,8 @@ Node *pre_phi = old_new[main_phi->_idx]; Node *fallpre = clone_up_backedge_goo(pre_head->back_control(), main_head->init_control(), - pre_phi->in(LoopNode::LoopBackControl)); + pre_phi->in(LoopNode::LoopBackControl), + visited, clones); _igvn.hash_delete(main_phi); main_phi->set_req( LoopNode::EntryControl, fallpre ); } @@ -1070,9 +1090,11 @@ // direction: // positive stride use < // negative stride use > + // + // not-equal test is kept for post loop to handle case + // when init > limit when stride > 0 (and reverse). if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) { - assert(!LoopLimitCheck, "only canonical tests (lt or gt) are expected"); BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt; // Modify pre loop end condition diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/opto/loopnode.cpp --- a/src/share/vm/opto/loopnode.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/opto/loopnode.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -453,7 +453,12 @@ // Now we need to canonicalize loop condition. if (bt == BoolTest::ne) { assert(stride_con == 1 || stride_con == -1, "simple increment only"); - bt = (stride_con > 0) ? BoolTest::lt : BoolTest::gt; + // 'ne' can be replaced with 'lt' only when init < limit. + if (stride_con > 0 && init_t->_hi < limit_t->_lo) + bt = BoolTest::lt; + // 'ne' can be replaced with 'gt' only when init > limit. + if (stride_con < 0 && init_t->_lo > limit_t->_hi) + bt = BoolTest::gt; } if (incl_limit) { diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/opto/loopnode.hpp --- a/src/share/vm/opto/loopnode.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/opto/loopnode.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -843,7 +843,7 @@ void insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ); // If Node n lives in the back_ctrl block, we clone a private version of n // in preheader_ctrl block and return that, otherwise return n. - Node *clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n ); + Node *clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ); // Take steps to maximally unroll the loop. Peel any odd iterations, then // unroll to do double iterations. The next round of major loop transforms diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/opto/macro.cpp --- a/src/share/vm/opto/macro.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/opto/macro.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -391,13 +391,9 @@ } } // Check if an appropriate new value phi already exists. - Node* new_phi = NULL; - uint size = value_phis->size(); - for (uint i=0; i < size; i++) { - if ( mem->_idx == value_phis->index_at(i) ) { - return value_phis->node_at(i); - } - } + Node* new_phi = value_phis->find(mem->_idx); + if (new_phi != NULL) + return new_phi; if (level <= 0) { return NULL; // Give up: phi tree too deep diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/opto/node.cpp --- a/src/share/vm/opto/node.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/opto/node.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -2012,6 +2012,16 @@ _inode_top = _inodes + old_top; // restore _top } +// Node_Stack is used to map nodes. +Node* Node_Stack::find(uint idx) const { + uint sz = size(); + for (uint i=0; i < sz; i++) { + if (idx == index_at(i) ) + return node_at(i); + } + return NULL; +} + //============================================================================= uint TypeNode::size_of() const { return sizeof(*this); } #ifndef PRODUCT diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/opto/node.hpp --- a/src/share/vm/opto/node.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/opto/node.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -1463,6 +1463,9 @@ bool is_nonempty() const { return (_inode_top >= _inodes); } bool is_empty() const { return (_inode_top < _inodes); } void clear() { _inode_top = _inodes - 1; } // retain storage + + // Node_Stack is used to map nodes. + Node* find(uint idx) const; }; diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/opto/parse.hpp --- a/src/share/vm/opto/parse.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/opto/parse.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -50,7 +50,7 @@ // Always between 0.0 and 1.0. Represents the percentage of the method's // total execution time used at this call site. const float _site_invoke_ratio; - const int _site_depth_adjust; + const int _max_inline_level; // the maximum inline level for this sub-tree (may be adjusted) float compute_callee_frequency( int caller_bci ) const; GrowableArray _subtrees; @@ -63,7 +63,7 @@ JVMState* caller_jvms, int caller_bci, float site_invoke_ratio, - int site_depth_adjust); + int max_inline_level); InlineTree *build_inline_tree_for_callee(ciMethod* callee_method, JVMState* caller_jvms, int caller_bci); @@ -74,7 +74,7 @@ InlineTree *caller_tree() const { return _caller_tree; } InlineTree* callee_at(int bci, ciMethod* m) const; - int inline_depth() const { return stack_depth() + _site_depth_adjust; } + int inline_level() const { return stack_depth(); } int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; } public: @@ -82,7 +82,7 @@ static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false); // For temporary (stack-allocated, stateless) ilts: - InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int site_depth_adjust); + InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level); // InlineTree enum enum InlineStyle { diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/prims/jni.cpp --- a/src/share/vm/prims/jni.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/prims/jni.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -3296,6 +3296,19 @@ return ret; } +#ifndef PRODUCT + +#include "utilities/quickSort.hpp" + +void execute_internal_vm_tests() { + if (ExecuteInternalVMTests) { + assert(QuickSort::test_quick_sort(), "test_quick_sort failed"); + tty->print_cr("All tests passed"); + } +} + +#endif + HS_DTRACE_PROBE_DECL3(hotspot_jni, CreateJavaVM__entry, vm, penv, args); DT_RETURN_MARK_DECL(CreateJavaVM, jint); @@ -3386,6 +3399,7 @@ } NOT_PRODUCT(test_error_handler(ErrorHandlerTest)); + NOT_PRODUCT(execute_internal_vm_tests()); return result; } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/prims/methodHandleWalk.cpp --- a/src/share/vm/prims/methodHandleWalk.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/prims/methodHandleWalk.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -425,6 +425,8 @@ ArgToken arg = _outgoing.at(arg_slot); assert(dest == arg.basic_type(), ""); arg = make_conversion(T_OBJECT, dest_klass, Bytecodes::_checkcast, arg, CHECK_(empty)); + // replace the object by the result of the cast, to make the compiler happy: + change_argument(T_OBJECT, arg_slot, T_OBJECT, arg); debug_only(dest_klass = (klassOop)badOop); break; } @@ -467,7 +469,7 @@ ArgToken arglist[2]; arglist[0] = arg; // outgoing 'this' arglist[1] = ArgToken(); // sentinel - arg = make_invoke(NULL, unboxer, Bytecodes::_invokevirtual, false, 1, &arglist[0], CHECK_(empty)); + arg = make_invoke(methodHandle(), unboxer, Bytecodes::_invokevirtual, false, 1, &arglist[0], CHECK_(empty)); change_argument(T_OBJECT, arg_slot, dest, arg); break; } @@ -483,7 +485,7 @@ ArgToken arglist[2]; arglist[0] = arg; // outgoing value arglist[1] = ArgToken(); // sentinel - arg = make_invoke(NULL, boxer, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(empty)); + arg = make_invoke(methodHandle(), boxer, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(empty)); change_argument(src, arg_slot, T_OBJECT, arg); break; } @@ -599,8 +601,9 @@ lose("bad vmlayout slot", CHECK_(empty)); } // FIXME: consider inlining the invokee at the bytecode level - ArgToken ret = make_invoke(methodOop(invoker), vmIntrinsics::_none, + ArgToken ret = make_invoke(methodHandle(THREAD, methodOop(invoker)), vmIntrinsics::_invokeGeneric, Bytecodes::_invokevirtual, false, 1+argc, &arglist[0], CHECK_(empty)); + // The iid = _invokeGeneric really means to adjust reference types as needed. DEBUG_ONLY(invoker = NULL); if (rtype == T_OBJECT) { klassOop rklass = java_lang_Class::as_klassOop( java_lang_invoke_MethodType::rtype(recursive_mtype()) ); @@ -657,7 +660,7 @@ arglist[0] = array_arg; // value to check arglist[1] = length_arg; // length to check arglist[2] = ArgToken(); // sentinel - make_invoke(NULL, vmIntrinsics::_checkSpreadArgument, + make_invoke(methodHandle(), vmIntrinsics::_checkSpreadArgument, Bytecodes::_invokestatic, false, 2, &arglist[0], CHECK_(empty)); // Spread out the array elements. @@ -680,7 +683,7 @@ ArgToken offset_arg = make_prim_constant(T_INT, &offset_jvalue, CHECK_(empty)); ArgToken element_arg = make_fetch(element_type, element_klass(), aload_op, array_arg, offset_arg, CHECK_(empty)); change_argument(T_VOID, ap, element_type, element_arg); - ap += type2size[element_type]; + //ap += type2size[element_type]; // don't do this; insert next arg to *right* of previous } break; } @@ -731,7 +734,7 @@ } assert(ap == _outgoing_argc, ""); arglist[ap] = ArgToken(); // add a sentinel, for the sake of asserts - return make_invoke(chain().last_method_oop(), + return make_invoke(chain().last_method(), vmIntrinsics::_none, chain().last_invoke_code(), true, ap, arglist, THREAD); @@ -853,7 +856,6 @@ if (src != dst) { if (MethodHandles::same_basic_type_for_returns(src, dst, /*raw*/ true)) { if (MethodHandles::is_float_fixed_reinterpretation_cast(src, dst)) { - if (for_return) Untested("MHW return raw conversion"); // still untested vmIntrinsics::ID iid = vmIntrinsics::for_raw_conversion(src, dst); if (iid == vmIntrinsics::_none) { lose("no raw conversion method", CHECK); @@ -865,18 +867,24 @@ assert(arg.token_type() >= tt_symbolic || src == arg.basic_type(), "sanity"); arglist[0] = arg; // outgoing 'this' arglist[1] = ArgToken(); // sentinel - arg = make_invoke(NULL, iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK); + arg = make_invoke(methodHandle(), iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK); change_argument(src, slot, dst, arg); } else { // return type conversion - klassOop arg_klass = NULL; - arglist[0] = make_parameter(src, arg_klass, -1, CHECK); // return value - arglist[1] = ArgToken(); // sentinel - (void) make_invoke(NULL, iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK); + if (_return_conv == vmIntrinsics::_none) { + _return_conv = iid; + } else if (_return_conv == vmIntrinsics::for_raw_conversion(dst, src)) { + _return_conv = vmIntrinsics::_none; + } else if (_return_conv != zero_return_conv()) { + lose(err_msg("requested raw return conversion not allowed: %s -> %s (before %s)", type2name(src), type2name(dst), vmIntrinsics::name_at(_return_conv)), CHECK); + } } } else { // Nothing to do. } + } else if (for_return && (!is_subword_type(src) || !is_subword_type(dst))) { + // This can occur in exception-throwing MHs, which have a fictitious return value encoded as Void or Empty. + _return_conv = zero_return_conv(); } else if (src == T_OBJECT && is_java_primitive(dst)) { // ref-to-prim: discard ref, push zero lose("requested ref-to-prim conversion not expected", CHECK); @@ -896,6 +904,7 @@ _thread(THREAD), _bytecode(THREAD, 50), _constants(THREAD, 10), + _non_bcp_klasses(THREAD, 5), _cur_stack(0), _max_stack(0), _rtype(T_ILLEGAL) @@ -908,6 +917,15 @@ _name_index = cpool_symbol_put(name); _signature_index = cpool_symbol_put(signature); + // To make the resulting methods more recognizable by + // stack walkers and compiler heuristics, + // we put them in holder class MethodHandle. + // See klass_is_method_handle_adapter_holder + // and methodOopDesc::is_method_handle_adapter. + _target_klass = SystemDictionaryHandles::MethodHandle_klass(); + + check_non_bcp_klasses(java_lang_invoke_MethodHandle::type(root()), CHECK); + // Get return type klass. Handle first_mtype(THREAD, chain().method_type_oop()); // _rklass is NULL for primitives. @@ -929,6 +947,7 @@ assert(_thread == THREAD, "must be same thread"); methodHandle nullHandle; (void) walk(CHECK_(nullHandle)); + record_non_bcp_klasses(); return get_method_oop(CHECK_(nullHandle)); } @@ -1197,10 +1216,18 @@ } case T_OBJECT: { Handle value = arg.object(); - if (value.is_null()) + if (value.is_null()) { emit_bc(Bytecodes::_aconst_null); - else - emit_bc(Bytecodes::_ldc, cpool_object_put(value)); + break; + } + if (java_lang_Class::is_instance(value())) { + klassOop k = java_lang_Class::as_klassOop(value()); + if (k != NULL) { + emit_bc(Bytecodes::_ldc, cpool_klass_put(k)); + break; + } + } + emit_bc(Bytecodes::_ldc, cpool_object_put(value)); break; } default: @@ -1260,6 +1287,7 @@ index = src.index(); } emit_bc(op, cpool_klass_put(tk)); + check_non_bcp_klass(tk, CHECK_(src)); // Allocate a new local for the type so that we don't hide the // previous type from the verifier. index = new_local_index(type); @@ -1292,15 +1320,15 @@ // Emit bytecodes for the given invoke instruction. MethodHandleWalker::ArgToken -MethodHandleCompiler::make_invoke(methodOop m, vmIntrinsics::ID iid, +MethodHandleCompiler::make_invoke(methodHandle m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, MethodHandleWalker::ArgToken* argv, TRAPS) { ArgToken zero; - if (m == NULL) { + if (m.is_null()) { // Get the intrinsic methodOop. - m = vmIntrinsics::method_for(iid); - if (m == NULL) { + m = methodHandle(THREAD, vmIntrinsics::method_for(iid)); + if (m.is_null()) { lose(vmIntrinsics::name_at(iid), CHECK_(zero)); } } @@ -1309,18 +1337,46 @@ Symbol* name = m->name(); Symbol* signature = m->signature(); + if (iid == vmIntrinsics::_invokeGeneric && + argc >= 1 && argv[0].token_type() == tt_constant) { + assert(m->intrinsic_id() == vmIntrinsics::_invokeExact, ""); + Handle receiver = argv[0].object(); + Handle rtype(THREAD, java_lang_invoke_MethodHandle::type(receiver())); + Handle mtype(THREAD, m->method_handle_type()); + if (rtype() != mtype()) { + assert(java_lang_invoke_MethodType::form(rtype()) == + java_lang_invoke_MethodType::form(mtype()), + "must be the same shape"); + // customize m to the exact required rtype + bool has_non_bcp_klass = check_non_bcp_klasses(rtype(), CHECK_(zero)); + TempNewSymbol sig2 = java_lang_invoke_MethodType::as_signature(rtype(), true, CHECK_(zero)); + methodHandle m2; + if (!has_non_bcp_klass) { + methodOop m2_oop = SystemDictionary::find_method_handle_invoke(m->name(), sig2, + KlassHandle(), CHECK_(zero)); + m2 = methodHandle(THREAD, m2_oop); + } + if (m2.is_null()) { + // just build it fresh + m2 = methodOopDesc::make_invoke_method(klass, m->name(), sig2, rtype, CHECK_(zero)); + if (m2.is_null()) + lose(err_msg("no customized invoker %s", sig2->as_utf8()), CHECK_(zero)); + } + m = m2; + signature = m->signature(); + } + } + + check_non_bcp_klass(klass, CHECK_(zero)); + if (m->is_method_handle_invoke()) { + check_non_bcp_klasses(m->method_handle_type(), CHECK_(zero)); + } + // Count the number of arguments, not the size ArgumentCount asc(signature); assert(argc == asc.size() + ((op == Bytecodes::_invokestatic || op == Bytecodes::_invokedynamic) ? 0 : 1), "argc mismatch"); - if (tailcall) { - // Actually, in order to make these methods more recognizable, - // let's put them in holder class MethodHandle. That way stack - // walkers and compiler heuristics can recognize them. - _target_klass = SystemDictionary::MethodHandle_klass(); - } - // Inline the method. InvocationCounter* ic = m->invocation_counter(); ic->set_carry_flag(); @@ -1353,7 +1409,7 @@ int signature_index = cpool_symbol_put(signature); int name_and_type_index = cpool_name_and_type_put(name_index, signature_index); int klass_index = cpool_klass_put(klass); - int methodref_index = cpool_methodref_put(klass_index, name_and_type_index); + int methodref_index = cpool_methodref_put(op, klass_index, name_and_type_index, m); // Generate invoke. switch (op) { @@ -1380,6 +1436,20 @@ stack_push(rbt); // The return value is already pushed onto the stack. ArgToken ret; if (tailcall) { + if (return_conv() == zero_return_conv()) { + rbt = T_VOID; // discard value + } else if (return_conv() != vmIntrinsics::_none) { + // return value conversion + int index = new_local_index(rbt); + emit_store(rbt, index); + ArgToken arglist[2]; + arglist[0] = ArgToken(tt_temporary, rbt, index); + arglist[1] = ArgToken(); // sentinel + ret = make_invoke(methodHandle(), return_conv(), Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(zero)); + set_return_conv(vmIntrinsics::_none); + rbt = ret.basic_type(); + emit_load(rbt, ret.index()); + } if (rbt != _rtype) { if (rbt == T_VOID) { // push a zero of the right sort @@ -1425,6 +1495,7 @@ case T_OBJECT: if (_rklass.not_null() && _rklass() != SystemDictionary::Object_klass() && !Klass::cast(_rklass())->is_interface()) { emit_bc(Bytecodes::_checkcast, cpool_klass_put(_rklass())); + check_non_bcp_klass(_rklass(), CHECK_(zero)); } emit_bc(Bytecodes::_areturn); break; @@ -1525,6 +1596,52 @@ return index; } +bool MethodHandleCompiler::check_non_bcp_klasses(Handle method_type, TRAPS) { + bool res = false; + for (int i = -1, len = java_lang_invoke_MethodType::ptype_count(method_type()); i < len; i++) { + oop ptype = (i == -1 + ? java_lang_invoke_MethodType::rtype(method_type()) + : java_lang_invoke_MethodType::ptype(method_type(), i)); + res |= check_non_bcp_klass(java_lang_Class::as_klassOop(ptype), CHECK_(false)); + } + return res; +} + +bool MethodHandleCompiler::check_non_bcp_klass(klassOop klass, TRAPS) { + klass = methodOopDesc::check_non_bcp_klass(klass); + if (klass != NULL) { + Symbol* name = Klass::cast(klass)->name(); + for (int i = _non_bcp_klasses.length() - 1; i >= 0; i--) { + klassOop k2 = _non_bcp_klasses.at(i)(); + if (Klass::cast(k2)->name() == name) { + if (k2 != klass) { + lose(err_msg("unsupported klass name alias %s", name->as_utf8()), THREAD); + } + return true; + } + } + _non_bcp_klasses.append(KlassHandle(THREAD, klass)); + return true; + } + return false; +} + +void MethodHandleCompiler::record_non_bcp_klasses() { + // Append extra klasses to constant pool, to guide klass lookup. + for (int k = 0; k < _non_bcp_klasses.length(); k++) { + klassOop non_bcp_klass = _non_bcp_klasses.at(k)(); + bool add_to_cp = true; + for (int j = 1; j < _constants.length(); j++) { + ConstantValue* cv = _constants.at(j); + if (cv != NULL && cv->tag() == JVM_CONSTANT_Class + && cv->klass_oop() == non_bcp_klass) { + add_to_cp = false; + break; + } + } + if (add_to_cp) cpool_klass_put(non_bcp_klass); + } +} constantPoolHandle MethodHandleCompiler::get_constant_pool(TRAPS) const { constantPoolHandle nullHandle; @@ -1544,6 +1661,8 @@ case JVM_CONSTANT_Double: cpool->double_at_put( i, cv->get_jdouble() ); break; case JVM_CONSTANT_Class: cpool->klass_at_put( i, cv->klass_oop() ); break; case JVM_CONSTANT_Methodref: cpool->method_at_put( i, cv->first_index(), cv->second_index()); break; + case JVM_CONSTANT_InterfaceMethodref: + cpool->interface_method_at_put(i, cv->first_index(), cv->second_index()); break; case JVM_CONSTANT_NameAndType: cpool->name_and_type_at_put(i, cv->first_index(), cv->second_index()); break; case JVM_CONSTANT_Object: cpool->object_at_put( i, cv->object_oop() ); break; default: ShouldNotReachHere(); @@ -1558,6 +1677,8 @@ } } + cpool->set_preresolution(); + // Set the constant pool holder to the target method's class. cpool->set_pool_holder(_target_klass()); @@ -1606,6 +1727,33 @@ Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(empty)); // Use fake class. Rewriter::relocate_and_link(_target_klass(), methods, CHECK_(empty)); // Use fake class. + // Pre-resolve selected CP cache entries, to avoid problems with class loader scoping. + constantPoolCacheHandle cpc(THREAD, cpool->cache()); + for (int i = 0; i < cpc->length(); i++) { + ConstantPoolCacheEntry* e = cpc->entry_at(i); + assert(!e->is_secondary_entry(), "no indy instructions in here, yet"); + int constant_pool_index = e->constant_pool_index(); + ConstantValue* cv = _constants.at(constant_pool_index); + if (!cv->has_linkage()) continue; + methodHandle m = cv->linkage(); + int index; + switch (cv->tag()) { + case JVM_CONSTANT_Methodref: + index = m->vtable_index(); + if (m->is_static()) { + e->set_method(Bytecodes::_invokestatic, m, index); + } else { + e->set_method(Bytecodes::_invokespecial, m, index); + e->set_method(Bytecodes::_invokevirtual, m, index); + } + break; + case JVM_CONSTANT_InterfaceMethodref: + index = klassItable::compute_itable_index(m()); + e->set_interface_call(m, index); + break; + } + } + // Set the invocation counter's count to the invoke count of the // original call site. InvocationCounter* ic = m->invocation_counter(); @@ -1696,6 +1844,9 @@ _param_state(0), _temp_num(0) { + out->print("MethodHandle:"); + java_lang_invoke_MethodType::print_signature(java_lang_invoke_MethodHandle::type(root()), out); + out->print(" : #"); start_params(); } virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) { @@ -1759,12 +1910,12 @@ _strbuf.print(")"); return maybe_make_temp("fetch", type, "x"); } - virtual ArgToken make_invoke(methodOop m, vmIntrinsics::ID iid, + virtual ArgToken make_invoke(methodHandle m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS) { Symbol* name; Symbol* sig; - if (m != NULL) { + if (m.not_null()) { name = m->name(); sig = m->signature(); } else { diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/prims/methodHandleWalk.hpp --- a/src/share/vm/prims/methodHandleWalk.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/prims/methodHandleWalk.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -98,6 +98,7 @@ int bound_arg_slot() { assert(is_bound(), ""); return _arg_slot; } oop bound_arg_oop() { assert(is_bound(), ""); return BoundMethodHandle_argument_oop(); } + methodHandle last_method() { assert(is_last(), ""); return _last_method; } methodOop last_method_oop() { assert(is_last(), ""); return _last_method(); } Bytecodes::Code last_invoke_code() { assert(is_last(), ""); return _last_invoke; } @@ -181,6 +182,8 @@ GrowableArray _outgoing; // current outgoing parameter slots int _outgoing_argc; // # non-empty outgoing slots + vmIntrinsics::ID _return_conv; // Return conversion required by raw retypes. + // Replace a value of type old_type at slot (and maybe slot+1) with the new value. // If old_type != T_VOID, remove the old argument at that point. // If new_type != T_VOID, insert the new argument at that point. @@ -219,7 +222,8 @@ : _chain(root, THREAD), _for_invokedynamic(for_invokedynamic), _outgoing(THREAD, 10), - _outgoing_argc(0) + _outgoing_argc(0), + _return_conv(vmIntrinsics::_none) { _local_index = for_invokedynamic ? 0 : 1; } @@ -228,6 +232,10 @@ bool for_invokedynamic() const { return _for_invokedynamic; } + vmIntrinsics::ID return_conv() const { return _return_conv; } + void set_return_conv(vmIntrinsics::ID c) { _return_conv = c; } + static vmIntrinsics::ID zero_return_conv() { return vmIntrinsics::_min; } + int new_local_index(BasicType bt) { //int index = _for_invokedynamic ? _local_index : _local_index - 1; int index = _local_index; @@ -243,9 +251,9 @@ virtual ArgToken make_oop_constant(oop con, TRAPS) = 0; virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS) = 0; virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS) = 0; - virtual ArgToken make_invoke(methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS) = 0; + virtual ArgToken make_invoke(methodHandle m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS) = 0; - // For make_invoke, the methodOop can be NULL if the intrinsic ID + // For make_invoke, the methodHandle can be NULL if the intrinsic ID // is something other than vmIntrinsics::_none. // and in case anyone cares to related the previous actions to the chain: @@ -280,6 +288,7 @@ JavaValue _value; Handle _handle; Symbol* _sym; + methodHandle _method; // pre-linkage public: // Constructor for oop types. @@ -328,11 +337,21 @@ jlong get_jlong() const { return _value.get_jlong(); } jfloat get_jfloat() const { return _value.get_jfloat(); } jdouble get_jdouble() const { return _value.get_jdouble(); } + + void set_linkage(methodHandle method) { + assert(_method.is_null(), ""); + _method = method; + } + bool has_linkage() const { return _method.not_null(); } + methodHandle linkage() const { return _method; } }; // Fake constant pool. GrowableArray _constants; + // Non-BCP classes that appear in associated MethodTypes (require special handling). + GrowableArray _non_bcp_klasses; + // Accumulated compiler state: GrowableArray _bytecode; @@ -368,15 +387,20 @@ return _constants.append(cv); } - int cpool_oop_reference_put(int tag, int first_index, int second_index) { + int cpool_oop_reference_put(int tag, int first_index, int second_index, methodHandle method) { if (first_index == 0 && second_index == 0) return 0; assert(first_index != 0 && second_index != 0, "no zero indexes"); ConstantValue* cv = new ConstantValue(tag, first_index, second_index); + if (method.not_null()) cv->set_linkage(method); return _constants.append(cv); } int cpool_primitive_put(BasicType type, jvalue* con); + bool check_non_bcp_klasses(Handle method_type, TRAPS); + bool check_non_bcp_klass(klassOop klass, TRAPS); + void record_non_bcp_klasses(); + int cpool_int_put(jint value) { jvalue con; con.i = value; return cpool_primitive_put(T_INT, &con); @@ -403,11 +427,12 @@ int cpool_klass_put(klassOop klass) { return cpool_oop_put(JVM_CONSTANT_Class, klass); } - int cpool_methodref_put(int class_index, int name_and_type_index) { - return cpool_oop_reference_put(JVM_CONSTANT_Methodref, class_index, name_and_type_index); + int cpool_methodref_put(Bytecodes::Code op, int class_index, int name_and_type_index, methodHandle method) { + int tag = (op == Bytecodes::_invokeinterface ? JVM_CONSTANT_InterfaceMethodref : JVM_CONSTANT_Methodref); + return cpool_oop_reference_put(tag, class_index, name_and_type_index, method); } int cpool_name_and_type_put(int name_index, int signature_index) { - return cpool_oop_reference_put(JVM_CONSTANT_NameAndType, name_index, signature_index); + return cpool_oop_reference_put(JVM_CONSTANT_NameAndType, name_index, signature_index, methodHandle()); } void emit_bc(Bytecodes::Code op, int index = 0, int args_size = -1); @@ -428,7 +453,7 @@ virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS); virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS); - virtual ArgToken make_invoke(methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS); + virtual ArgToken make_invoke(methodHandle m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS); // Get a real constant pool. constantPoolHandle get_constant_pool(TRAPS) const; diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/prims/methodHandles.cpp --- a/src/share/vm/prims/methodHandles.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/prims/methodHandles.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -24,12 +24,14 @@ #include "precompiled.hpp" #include "classfile/symbolTable.hpp" +#include "compiler/compileBroker.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/oopMapCache.hpp" #include "memory/allocation.inline.hpp" #include "memory/oopFactory.hpp" #include "prims/methodHandles.hpp" #include "prims/methodHandleWalk.hpp" +#include "runtime/compilationPolicy.hpp" #include "runtime/javaCalls.hpp" #include "runtime/reflection.hpp" #include "runtime/signature.hpp" @@ -629,6 +631,8 @@ // convert the external string name to an internal symbol TempNewSymbol name = java_lang_String::as_symbol_or_null(name_str()); if (name == NULL) return; // no such name + if (name == vmSymbols::class_initializer_name()) + return; // illegal name Handle polymorphic_method_type; bool polymorphic_signature = false; @@ -765,7 +769,9 @@ m = NULL; // try again with a different class loader... } - if (m != NULL) { + if (m != NULL && + m->is_method_handle_invoke() && + java_lang_invoke_MethodType::equals(polymorphic_method_type(), m->method_handle_type())) { int mods = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS); java_lang_invoke_MemberName::set_vmtarget(mname(), m); java_lang_invoke_MemberName::set_vmindex(mname(), m->vtable_index()); @@ -984,6 +990,48 @@ // This is for debugging and reflection. oop MethodHandles::encode_target(Handle mh, int format, TRAPS) { assert(java_lang_invoke_MethodHandle::is_instance(mh()), "must be a MH"); + if (format == ETF_FORCE_DIRECT_HANDLE || + format == ETF_COMPILE_DIRECT_HANDLE) { + // Internal function for stress testing. + Handle mt = java_lang_invoke_MethodHandle::type(mh()); + int invocation_count = 10000; + TempNewSymbol signature = java_lang_invoke_MethodType::as_signature(mt(), true, CHECK_NULL); + bool omit_receiver_argument = true; + MethodHandleCompiler mhc(mh, vmSymbols::invoke_name(), signature, invocation_count, omit_receiver_argument, CHECK_NULL); + methodHandle m = mhc.compile(CHECK_NULL); + if (StressMethodHandleWalk && Verbose || PrintMiscellaneous) { + tty->print_cr("MethodHandleNatives.getTarget(%s)", + format == ETF_FORCE_DIRECT_HANDLE ? "FORCE_DIRECT" : "COMPILE_DIRECT"); + if (Verbose) { + m->print_codes(); + } + } + if (StressMethodHandleWalk) { + InterpreterOopMap mask; + OopMapCache::compute_one_oop_map(m, m->code_size() - 1, &mask); + } + if ((format == ETF_COMPILE_DIRECT_HANDLE || + CompilationPolicy::must_be_compiled(m)) + && !instanceKlass::cast(m->method_holder())->is_not_initialized() + && CompilationPolicy::can_be_compiled(m)) { + // Force compilation + CompileBroker::compile_method(m, InvocationEntryBci, + CompLevel_initial_compile, + methodHandle(), 0, "MethodHandleNatives.getTarget", + CHECK_NULL); + } + // Now wrap m in a DirectMethodHandle. + instanceKlassHandle dmh_klass(THREAD, SystemDictionary::DirectMethodHandle_klass()); + Handle dmh = dmh_klass->allocate_instance_handle(CHECK_NULL); + JavaValue ignore_result(T_VOID); + Symbol* init_name = vmSymbols::object_initializer_name(); + Symbol* init_sig = vmSymbols::notifyGenericMethodType_signature(); + JavaCalls::call_special(&ignore_result, dmh, + SystemDictionaryHandles::MethodHandle_klass(), init_name, init_sig, + java_lang_invoke_MethodHandle::type(mh()), CHECK_NULL); + MethodHandles::init_DirectMethodHandle(dmh, m, false, CHECK_NULL); + return dmh(); + } if (format == ETF_HANDLE_OR_METHOD_NAME) { oop target = java_lang_invoke_MethodHandle::vmtarget(mh()); if (target == NULL) { @@ -1219,6 +1267,12 @@ klassOop aklass_oop = SystemDictionary::resolve_or_null(name, loader, domain, CHECK); if (aklass_oop != NULL) aklass = KlassHandle(THREAD, aklass_oop); + if (aklass.is_null() && + pklass.not_null() && + loader.is_null() && + pklass->name() == name) + // accept name equivalence here, since that's the best we can do + aklass = pklass; } } else { // for method handle invokers we don't look at the name in the signature @@ -2652,6 +2706,17 @@ } InterpreterOopMap mask; OopMapCache::compute_one_oop_map(m, m->code_size() - 1, &mask); + // compile to object code if -Xcomp or WizardMode + if ((WizardMode || + CompilationPolicy::must_be_compiled(m)) + && !instanceKlass::cast(m->method_holder())->is_not_initialized() + && CompilationPolicy::can_be_compiled(m)) { + // Force compilation + CompileBroker::compile_method(m, InvocationEntryBci, + CompLevel_initial_compile, + methodHandle(), 0, "StressMethodHandleWalk", + CHECK); + } } } @@ -2771,7 +2836,12 @@ // Build a BMH on top of a DMH or another BMH: MethodHandles::init_BoundMethodHandle(mh, target, argnum, CHECK); } - stress_method_handle_walk(mh, CHECK); + + if (StressMethodHandleWalk) { + if (mh->klass() == SystemDictionary::BoundMethodHandle_klass()) + stress_method_handle_walk(mh, CHECK); + // else don't, since the subclass has not yet initialized its own fields + } } JVM_END diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/prims/methodHandles.hpp --- a/src/share/vm/prims/methodHandles.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/prims/methodHandles.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -588,6 +588,8 @@ ETF_DIRECT_HANDLE = 1, // ultimate method handle (will be a DMH, may be self) ETF_METHOD_NAME = 2, // ultimate method as MemberName ETF_REFLECT_METHOD = 3, // ultimate method as java.lang.reflect object (sans refClass) + ETF_FORCE_DIRECT_HANDLE = 64, + ETF_COMPILE_DIRECT_HANDLE = 65, // ad hoc constants OP_ROT_ARGS_DOWN_LIMIT_BIAS = -1 diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/advancedThresholdPolicy.cpp --- a/src/share/vm/runtime/advancedThresholdPolicy.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -171,7 +171,7 @@ // If a method has been stale for some time, remove it from the queue. if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) { if (PrintTieredEvents) { - print_event(KILL, method, method, task->osr_bci(), (CompLevel)task->comp_level()); + print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level()); } CompileTaskWrapper ctw(task); // Frees the task compile_queue->remove(task); @@ -192,7 +192,7 @@ if (max_task->comp_level() == CompLevel_full_profile && is_method_profiled(max_method)) { max_task->set_comp_level(CompLevel_limited_profile); if (PrintTieredEvents) { - print_event(UPDATE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level()); + print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level()); } } @@ -259,6 +259,17 @@ return false; } +// Inlining control: if we're compiling a profiled method with C1 and the callee +// is known to have OSRed in a C2 version, don't inline it. +bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) { + CompLevel comp_level = (CompLevel)env->comp_level(); + if (comp_level == CompLevel_full_profile || + comp_level == CompLevel_limited_profile) { + return callee->highest_osr_comp_level() == CompLevel_full_optimization; + } + return false; +} + // Create MDO if necessary. void AdvancedThresholdPolicy::create_mdo(methodHandle mh, TRAPS) { if (mh->is_native() || mh->is_abstract() || mh->is_accessor()) return; @@ -378,8 +389,9 @@ } // Determine if a method should be compiled with a normal entry point at a different level. -CompLevel AdvancedThresholdPolicy::call_event(methodOop method, CompLevel cur_level) { - CompLevel osr_level = (CompLevel) method->highest_osr_comp_level(); +CompLevel AdvancedThresholdPolicy::call_event(methodOop method, CompLevel cur_level) { + CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), + common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level)); CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level); // If OSR method level is greater than the regular method level, the levels should be @@ -400,15 +412,16 @@ // Determine if we should do an OSR compilation of a given method. CompLevel AdvancedThresholdPolicy::loop_event(methodOop method, CompLevel cur_level) { + CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level); if (cur_level == CompLevel_none) { // If there is a live OSR method that means that we deopted to the interpreter // for the transition. - CompLevel osr_level = (CompLevel)method->highest_osr_comp_level(); + CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level); if (osr_level > CompLevel_none) { return osr_level; } } - return common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level); + return next_level; } // Update the rate and submit compile @@ -418,10 +431,9 @@ CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", THREAD); } - // Handle the invocation event. void AdvancedThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh, - CompLevel level, TRAPS) { + CompLevel level, nmethod* nm, TRAPS) { if (should_create_mdo(mh(), level)) { create_mdo(mh, THREAD); } @@ -436,32 +448,81 @@ // Handle the back branch event. Notice that we can compile the method // with a regular entry from here. void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh, - int bci, CompLevel level, TRAPS) { + int bci, CompLevel level, nmethod* nm, TRAPS) { if (should_create_mdo(mh(), level)) { create_mdo(mh, THREAD); } + // Check if MDO should be created for the inlined method + if (should_create_mdo(imh(), level)) { + create_mdo(imh, THREAD); + } - // If the method is already compiling, quickly bail out. - if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) { - // Use loop event as an opportinity to also check there's been - // enough calls. - CompLevel cur_level = comp_level(mh()); - CompLevel next_level = call_event(mh(), cur_level); - CompLevel next_osr_level = loop_event(mh(), level); + if (is_compilation_enabled()) { + CompLevel next_osr_level = loop_event(imh(), level); + CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level(); if (next_osr_level == CompLevel_limited_profile) { next_osr_level = CompLevel_full_profile; // OSRs are supposed to be for very hot methods. } - next_level = MAX2(next_level, - next_osr_level < CompLevel_full_optimization ? next_osr_level : cur_level); - bool is_compiling = false; - if (next_level != cur_level) { - compile(mh, InvocationEntryBci, next_level, THREAD); - is_compiling = true; + + // At the very least compile the OSR version + if (!CompileBroker::compilation_is_in_queue(imh, bci)) { + // Check if there's a method like that already + nmethod* osr_nm = NULL; + if (max_osr_level >= next_osr_level) { + // There is an osr method already with the same + // or greater level, check if it has the bci we need + osr_nm = imh->lookup_osr_nmethod_for(bci, next_osr_level, false); + } + if (osr_nm == NULL) { + compile(imh, bci, next_osr_level, THREAD); + } } - // Do the OSR version - if (!is_compiling && next_osr_level != level) { - compile(mh, bci, next_osr_level, THREAD); + // Use loop event as an opportunity to also check if there's been + // enough calls. + CompLevel cur_level, next_level; + if (mh() != imh()) { // If there is an enclosing method + guarantee(nm != NULL, "Should have nmethod here"); + cur_level = comp_level(mh()); + next_level = call_event(mh(), cur_level); + + if (max_osr_level == CompLevel_full_optimization) { + // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts + bool make_not_entrant = false; + if (nm->is_osr_method()) { + // This is an osr method, just make it not entrant and recompile later if needed + make_not_entrant = true; + } else { + if (next_level != CompLevel_full_optimization) { + // next_level is not full opt, so we need to recompile the + // enclosing method without the inlinee + cur_level = CompLevel_none; + make_not_entrant = true; + } + } + if (make_not_entrant) { + if (PrintTieredEvents) { + int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci; + print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level); + } + nm->make_not_entrant(); + } + } + if (!CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) { + // Fix up next_level if necessary to avoid deopts + if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) { + next_level = CompLevel_full_profile; + } + if (cur_level != next_level) { + compile(mh, InvocationEntryBci, next_level, THREAD); + } + } + } else { + cur_level = comp_level(imh()); + next_level = call_event(imh(), cur_level); + if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) { + compile(imh, InvocationEntryBci, next_level, THREAD); + } } } } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/advancedThresholdPolicy.hpp --- a/src/share/vm/runtime/advancedThresholdPolicy.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/advancedThresholdPolicy.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -211,14 +211,16 @@ virtual void submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS); // event() from SimpleThresholdPolicy would call these. virtual void method_invocation_event(methodHandle method, methodHandle inlinee, - CompLevel level, TRAPS); + CompLevel level, nmethod* nm, TRAPS); virtual void method_back_branch_event(methodHandle method, methodHandle inlinee, - int bci, CompLevel level, TRAPS); + int bci, CompLevel level, nmethod* nm, TRAPS); public: AdvancedThresholdPolicy() : _start_time(0) { } // Select task is called by CompileBroker. We should return a task or NULL. virtual CompileTask* select_task(CompileQueue* compile_queue); virtual void initialize(); + virtual bool should_not_inline(ciEnv* env, ciMethod* callee); + }; #endif // TIERED diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/arguments.cpp --- a/src/share/vm/runtime/arguments.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/arguments.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -1680,8 +1680,33 @@ UseParallelGC || UseParallelOldGC)); } +// check if do gclog rotation +// +UseGCLogFileRotation is a must, +// no gc log rotation when log file not supplied or +// NumberOfGCLogFiles is 0, or GCLogFileSize is 0 +void check_gclog_consistency() { + if (UseGCLogFileRotation) { + if ((Arguments::gc_log_filename() == NULL) || + (NumberOfGCLogFiles == 0) || + (GCLogFileSize == 0)) { + jio_fprintf(defaultStream::output_stream(), + "To enable GC log rotation, use -Xloggc: -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles= -XX:GCLogFileSize=\n" + "where num_of_file > 0 and num_of_size > 0\n" + "GC log rotation is turned off\n"); + UseGCLogFileRotation = false; + } + } + + if (UseGCLogFileRotation && GCLogFileSize < 8*K) { + FLAG_SET_CMDLINE(uintx, GCLogFileSize, 8*K); + jio_fprintf(defaultStream::output_stream(), + "GCLogFileSize changed to minimum 8K\n"); + } +} + // Check consistency of GC selection bool Arguments::check_gc_consistency() { + check_gclog_consistency(); bool status = true; // Ensure that the user has not selected conflicting sets // of collectors. [Note: this check is merely a user convenience; @@ -2672,6 +2697,7 @@ return JNI_ERR; } } + // Change the default value for flags which have different default values // when working with older JDKs. if (JDK_Version::current().compare_major(6) <= 0 && diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/atomic.cpp --- a/src/share/vm/runtime/atomic.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/atomic.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -83,3 +83,13 @@ return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); } + +jlong Atomic::add(jlong add_value, volatile jlong* dest) { + jlong old = load(dest); + jlong new_value = old + add_value; + while (old != cmpxchg(new_value, dest, old)) { + old = load(dest); + new_value = old + add_value; + } + return old; +} diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/atomic.hpp --- a/src/share/vm/runtime/atomic.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/atomic.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -51,6 +51,8 @@ static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest); static void* add_ptr(intptr_t add_value, volatile void* dest); + static jlong add (jlong add_value, volatile jlong* dest); + // Atomically increment location static void inc (volatile jint* dest); static void inc_ptr(volatile intptr_t* dest); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/compilationPolicy.cpp --- a/src/share/vm/runtime/compilationPolicy.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/compilationPolicy.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -306,7 +306,7 @@ return (current >= initial + target); } -nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS) { +nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) { assert(comp_level == CompLevel_none, "This should be only called from the interpreter"); NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci)); if (JvmtiExport::can_post_interpreter_events()) { diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/compilationPolicy.hpp --- a/src/share/vm/runtime/compilationPolicy.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/compilationPolicy.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -62,7 +62,7 @@ virtual int compiler_count(CompLevel comp_level) = 0; // main notification entry, return a pointer to an nmethod if the OSR is required, // returns NULL otherwise. - virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS) = 0; + virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) = 0; // safepoint() is called at the end of the safepoint virtual void do_safepoint_work() = 0; // reprofile request @@ -80,6 +80,7 @@ virtual bool is_mature(methodOop method) = 0; // Do policy initialization virtual void initialize() = 0; + virtual bool should_not_inline(ciEnv* env, ciMethod* method) { return false; } }; // A base class for baseline policies. @@ -101,7 +102,7 @@ virtual bool is_mature(methodOop method); virtual void initialize(); virtual CompileTask* select_task(CompileQueue* compile_queue); - virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS); + virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS); virtual void method_invocation_event(methodHandle m, TRAPS) = 0; virtual void method_back_branch_event(methodHandle m, int bci, TRAPS) = 0; }; diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/globals.hpp --- a/src/share/vm/runtime/globals.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/globals.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -1944,6 +1944,9 @@ "Number of ObjArray elements to push onto the marking stack" \ "before pushing a continuation entry") \ \ + notproduct(bool, ExecuteInternalVMTests, false, \ + "Enable execution of internal VM tests.") \ + \ product_pd(bool, UseTLAB, "Use thread-local object allocation") \ \ product_pd(bool, ResizeTLAB, \ @@ -2332,6 +2335,20 @@ "Print diagnostic message when GC is stalled" \ "by JNI critical section") \ \ + /* GC log rotation setting */ \ + \ + product(bool, UseGCLogFileRotation, false, \ + "Prevent large gclog file for long running app. " \ + "Requires -Xloggc:") \ + \ + product(uintx, NumberOfGCLogFiles, 0, \ + "Number of gclog files in rotation, " \ + "Default: 0, no rotation") \ + \ + product(uintx, GCLogFileSize, 0, \ + "GC log file size, Default: 0 bytes, no rotation " \ + "Only valid with UseGCLogFileRotation") \ + \ /* JVMTI heap profiling */ \ \ diagnostic(bool, TraceJVMTIObjectTagging, false, \ diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/java.cpp --- a/src/share/vm/runtime/java.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/java.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -468,12 +468,10 @@ StatSampler::disengage(); StatSampler::destroy(); -#ifndef SERIALGC - // stop CMS threads - if (UseConcMarkSweepGC) { - ConcurrentMarkSweepThread::stop(); - } -#endif // SERIALGC + // We do not need to explicitly stop concurrent GC threads because the + // JVM will be taken down at a safepoint when such threads are inactive -- + // except for some concurrent G1 threads, see (comment in) + // Threads::destroy_vm(). // Print GC/heap related information. if (PrintGCDetails) { diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/safepoint.cpp --- a/src/share/vm/runtime/safepoint.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/safepoint.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -511,6 +511,11 @@ TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime); NMethodSweeper::scan_stacks(); + + // rotate log files? + if (UseGCLogFileRotation) { + gclog_or_tty->rotate_log(); + } } diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/sharedRuntime.cpp --- a/src/share/vm/runtime/sharedRuntime.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/sharedRuntime.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -763,6 +763,13 @@ throw_and_post_jvmti_exception(thread, exception); JRT_END +JRT_ENTRY(void, SharedRuntime::throw_WrongMethodTypeException(JavaThread* thread, oopDesc* required, oopDesc* actual)) + assert(thread == JavaThread::current() && required->is_oop() && actual->is_oop(), "bad args"); + ResourceMark rm; + char* message = SharedRuntime::generate_wrong_method_type_message(thread, required, actual); + throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_invoke_WrongMethodTypeException(), message); +JRT_END + address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread, address pc, SharedRuntime::ImplicitExceptionKind exception_kind) diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/sharedRuntime.hpp --- a/src/share/vm/runtime/sharedRuntime.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/sharedRuntime.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -185,6 +185,7 @@ static void throw_NullPointerException(JavaThread* thread); static void throw_NullPointerException_at_call(JavaThread* thread); static void throw_StackOverflowError(JavaThread* thread); + static void throw_WrongMethodTypeException(JavaThread* thread, oopDesc* required, oopDesc* actual); static address continuation_for_implicit_exception(JavaThread* thread, address faulting_pc, ImplicitExceptionKind exception_kind); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/simpleThresholdPolicy.cpp --- a/src/share/vm/runtime/simpleThresholdPolicy.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/simpleThresholdPolicy.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -50,15 +50,18 @@ case COMPILE: tty->print("compile"); break; - case KILL: - tty->print("kill"); + case REMOVE_FROM_QUEUE: + tty->print("remove-from-queue"); break; - case UPDATE: - tty->print("update"); + case UPDATE_IN_QUEUE: + tty->print("update-in-queue"); break; case REPROFILE: tty->print("reprofile"); break; + case MAKE_NOT_ENTRANT: + tty->print("make-not-entrant"); + break; default: tty->print("unknown"); } @@ -68,7 +71,6 @@ ResourceMark rm; char *method_name = mh->name_and_sig_as_C_string(); tty->print("[%s", method_name); - // We can have an inlinee, although currently we don't generate any notifications for the inlined methods. if (inlinee_event) { char *inlinee_name = imh->name_and_sig_as_C_string(); tty->print(" [%s]] ", inlinee_name); @@ -170,7 +172,7 @@ } nmethod* SimpleThresholdPolicy::event(methodHandle method, methodHandle inlinee, - int branch_bci, int bci, CompLevel comp_level, TRAPS) { + int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) { if (comp_level == CompLevel_none && JvmtiExport::can_post_interpreter_events()) { assert(THREAD->is_Java_thread(), "Should be java thread"); @@ -190,12 +192,13 @@ } if (bci == InvocationEntryBci) { - method_invocation_event(method, inlinee, comp_level, THREAD); + method_invocation_event(method, inlinee, comp_level, nm, THREAD); } else { - method_back_branch_event(method, inlinee, bci, comp_level, THREAD); - int highest_level = method->highest_osr_comp_level(); + method_back_branch_event(method, inlinee, bci, comp_level, nm, THREAD); + // method == inlinee if the event originated in the main method + int highest_level = inlinee->highest_osr_comp_level(); if (highest_level > comp_level) { - osr_nm = method->lookup_osr_nmethod_for(bci, highest_level, false); + osr_nm = inlinee->lookup_osr_nmethod_for(bci, highest_level, false); } } return osr_nm; @@ -323,7 +326,8 @@ // Determine if a method should be compiled with a normal entry point at a different level. CompLevel SimpleThresholdPolicy::call_event(methodOop method, CompLevel cur_level) { - CompLevel osr_level = (CompLevel) method->highest_osr_comp_level(); + CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), + common(&SimpleThresholdPolicy::loop_predicate, method, cur_level)); CompLevel next_level = common(&SimpleThresholdPolicy::call_predicate, method, cur_level); // If OSR method level is greater than the regular method level, the levels should be @@ -344,21 +348,22 @@ // Determine if we should do an OSR compilation of a given method. CompLevel SimpleThresholdPolicy::loop_event(methodOop method, CompLevel cur_level) { + CompLevel next_level = common(&SimpleThresholdPolicy::loop_predicate, method, cur_level); if (cur_level == CompLevel_none) { // If there is a live OSR method that means that we deopted to the interpreter // for the transition. - CompLevel osr_level = (CompLevel)method->highest_osr_comp_level(); + CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level); if (osr_level > CompLevel_none) { return osr_level; } } - return common(&SimpleThresholdPolicy::loop_predicate, method, cur_level); + return next_level; } // Handle the invocation event. void SimpleThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh, - CompLevel level, TRAPS) { + CompLevel level, nmethod* nm, TRAPS) { if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) { CompLevel next_level = call_event(mh(), level); if (next_level != level) { @@ -370,7 +375,7 @@ // Handle the back branch event. Notice that we can compile the method // with a regular entry from here. void SimpleThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh, - int bci, CompLevel level, TRAPS) { + int bci, CompLevel level, nmethod* nm, TRAPS) { // If the method is already compiling, quickly bail out. if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) { // Use loop event as an opportinity to also check there's been diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/simpleThresholdPolicy.hpp --- a/src/share/vm/runtime/simpleThresholdPolicy.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/simpleThresholdPolicy.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -62,7 +62,7 @@ void set_c1_count(int x) { _c1_count = x; } void set_c2_count(int x) { _c2_count = x; } - enum EventType { CALL, LOOP, COMPILE, KILL, UPDATE, REPROFILE }; + enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT }; void print_event(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level); // Print policy-specific information if necessary virtual void print_specific(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level) { } @@ -88,9 +88,9 @@ return CompLevel_none; } virtual void method_invocation_event(methodHandle method, methodHandle inlinee, - CompLevel level, TRAPS); + CompLevel level, nmethod* nm, TRAPS); virtual void method_back_branch_event(methodHandle method, methodHandle inlinee, - int bci, CompLevel level, TRAPS); + int bci, CompLevel level, nmethod* nm, TRAPS); public: SimpleThresholdPolicy() : _c1_count(0), _c2_count(0) { } virtual int compiler_count(CompLevel comp_level) { @@ -101,17 +101,20 @@ virtual void do_safepoint_work() { } virtual void delay_compilation(methodOop method) { } virtual void disable_compilation(methodOop method) { } - // TODO: we should honour reprofiling requests in the future. Currently reprofiling - // would happen but not to the extent we would ideally like. virtual void reprofile(ScopeDesc* trap_scope, bool is_osr); virtual nmethod* event(methodHandle method, methodHandle inlinee, - int branch_bci, int bci, CompLevel comp_level, TRAPS); + int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS); // Select task is called by CompileBroker. We should return a task or NULL. virtual CompileTask* select_task(CompileQueue* compile_queue); // Tell the runtime if we think a given method is adequately profiled. virtual bool is_mature(methodOop method); // Initialize: set compiler thread count virtual void initialize(); + virtual bool should_not_inline(ciEnv* env, ciMethod* callee) { + return (env->comp_level() == CompLevel_limited_profile || + env->comp_level() == CompLevel_full_profile) && + callee->has_loops(); + } }; #endif // SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_HPP diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/stubRoutines.cpp --- a/src/share/vm/runtime/stubRoutines.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/stubRoutines.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -55,6 +55,7 @@ address StubRoutines::_throw_NullPointerException_entry = NULL; address StubRoutines::_throw_NullPointerException_at_call_entry = NULL; address StubRoutines::_throw_StackOverflowError_entry = NULL; +address StubRoutines::_throw_WrongMethodTypeException_entry = NULL; address StubRoutines::_handler_for_unsafe_access_entry = NULL; jint StubRoutines::_verify_oop_count = 0; address StubRoutines::_verify_oop_subroutine_entry = NULL; diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/stubRoutines.hpp --- a/src/share/vm/runtime/stubRoutines.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/stubRoutines.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -132,6 +132,7 @@ static address _throw_NullPointerException_entry; static address _throw_NullPointerException_at_call_entry; static address _throw_StackOverflowError_entry; + static address _throw_WrongMethodTypeException_entry; static address _handler_for_unsafe_access_entry; static address _atomic_xchg_entry; @@ -254,6 +255,7 @@ static address throw_NullPointerException_entry() { return _throw_NullPointerException_entry; } static address throw_NullPointerException_at_call_entry(){ return _throw_NullPointerException_at_call_entry; } static address throw_StackOverflowError_entry() { return _throw_StackOverflowError_entry; } + static address throw_WrongMethodTypeException_entry() { return _throw_WrongMethodTypeException_entry; } // Exceptions during unsafe access - should throw Java exception rather // than crash. diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/runtime/thread.cpp --- a/src/share/vm/runtime/thread.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/runtime/thread.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -3698,6 +3698,14 @@ // heap is unparseable if they are caught. Grab the Heap_lock // to prevent this. The GC vm_operations will not be able to // queue until after the vm thread is dead. + // After this point, we'll never emerge out of the safepoint before + // the VM exits, so concurrent GC threads do not need to be explicitly + // stopped; they remain inactive until the process exits. + // Note: some concurrent G1 threads may be running during a safepoint, + // but these will not be accessing the heap, just some G1-specific side + // data structures that are not accessed by any other threads but them + // after this point in a terminal safepoint. + MutexLocker ml(Heap_lock); VMThread::wait_for_vm_thread_exit(); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/utilities/bitMap.hpp --- a/src/share/vm/utilities/bitMap.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/utilities/bitMap.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -161,11 +161,11 @@ // Set or clear the specified bit. inline void set_bit(idx_t bit); - void clear_bit(idx_t bit); + inline void clear_bit(idx_t bit); // Atomically set or clear the specified bit. - bool par_set_bit(idx_t bit); - bool par_clear_bit(idx_t bit); + inline bool par_set_bit(idx_t bit); + inline bool par_clear_bit(idx_t bit); // Put the given value at the given offset. The parallel version // will CAS the value into the bitmap and is quite a bit slower. diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/utilities/ostream.cpp --- a/src/share/vm/utilities/ostream.cpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/utilities/ostream.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -349,7 +349,7 @@ fileStream::~fileStream() { if (_file != NULL) { if (_need_close) fclose(_file); - _file = NULL; + _file = NULL; } } @@ -377,6 +377,86 @@ update_position(s, len); } +rotatingFileStream::~rotatingFileStream() { + if (_file != NULL) { + if (_need_close) fclose(_file); + _file = NULL; + FREE_C_HEAP_ARRAY(char, _file_name); + _file_name = NULL; + } +} + +rotatingFileStream::rotatingFileStream(const char* file_name) { + _cur_file_num = 0; + _bytes_writen = 0L; + _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10); + jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num); + _file = fopen(_file_name, "w"); + _need_close = true; +} + +rotatingFileStream::rotatingFileStream(const char* file_name, const char* opentype) { + _cur_file_num = 0; + _bytes_writen = 0L; + _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10); + jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num); + _file = fopen(_file_name, opentype); + _need_close = true; +} + +void rotatingFileStream::write(const char* s, size_t len) { + if (_file != NULL) { + // Make an unused local variable to avoid warning from gcc 4.x compiler. + size_t count = fwrite(s, 1, len, _file); + Atomic::add((jlong)count, &_bytes_writen); + } + update_position(s, len); +} + +// rotate_log must be called from VMThread at safepoint. In case need change parameters +// for gc log rotation from thread other than VMThread, a sub type of VM_Operation +// should be created and be submitted to VMThread's operation queue. DO NOT call this +// function directly. Currently, it is safe to rotate log at safepoint through VMThread. +// That is, no mutator threads and concurrent GC threads run parallel with VMThread to +// write to gc log file at safepoint. If in future, changes made for mutator threads or +// concurrent GC threads to run parallel with VMThread at safepoint, write and rotate_log +// must be synchronized. +void rotatingFileStream::rotate_log() { + if (_bytes_writen < (jlong)GCLogFileSize) return; +#ifdef ASSERT + Thread *thread = Thread::current(); + assert(thread == NULL || + (thread->is_VM_thread() && SafepointSynchronize::is_at_safepoint()), + "Must be VMThread at safepoint"); +#endif + if (NumberOfGCLogFiles == 1) { + // rotate in same file + rewind(); + _bytes_writen = 0L; + return; + } + + // rotate file in names file.0, file.1, file.2, ..., file. + // close current file, rotate to next file + if (_file != NULL) { + _cur_file_num ++; + if (_cur_file_num >= NumberOfGCLogFiles) _cur_file_num = 0; + jio_snprintf(_file_name, strlen(Arguments::gc_log_filename()) + 10, "%s.%d", + Arguments::gc_log_filename(), _cur_file_num); + fclose(_file); + _file = NULL; + } + _file = fopen(_file_name, "w"); + if (_file != NULL) { + _bytes_writen = 0L; + _need_close = true; + } else { + tty->print_cr("failed to open rotation log file %s due to %s\n", + _file_name, strerror(errno)); + _need_close = false; + } +} + defaultStream* defaultStream::instance = NULL; int defaultStream::_output_fd = 1; int defaultStream::_error_fd = 2; @@ -749,14 +829,17 @@ gclog_or_tty = tty; // default to tty if (Arguments::gc_log_filename() != NULL) { - fileStream * gclog = new(ResourceObj::C_HEAP) - fileStream(Arguments::gc_log_filename()); + fileStream * gclog = UseGCLogFileRotation ? + new(ResourceObj::C_HEAP) + rotatingFileStream(Arguments::gc_log_filename()) : + new(ResourceObj::C_HEAP) + fileStream(Arguments::gc_log_filename()); if (gclog->is_open()) { // now we update the time stamp of the GC log to be synced up // with tty. gclog->time_stamp().update_to(tty->time_stamp().ticks()); - gclog_or_tty = gclog; } + gclog_or_tty = gclog; } // If we haven't lazily initialized the logfile yet, do it now, diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/utilities/ostream.hpp --- a/src/share/vm/utilities/ostream.hpp Sat Jul 02 04:17:12 2011 -0400 +++ b/src/share/vm/utilities/ostream.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -110,14 +110,15 @@ // flushing virtual void flush() {} virtual void write(const char* str, size_t len) = 0; - virtual ~outputStream() {} // close properly on deletion + virtual void rotate_log() {} // GC log rotation + virtual ~outputStream() {} // close properly on deletion void dec_cr() { dec(); cr(); } void inc_cr() { inc(); cr(); } }; // standard output - // ANSI C++ name collision +// ANSI C++ name collision extern outputStream* tty; // tty output extern outputStream* gclog_or_tty; // stream for gc log if -Xloggc:, or tty @@ -176,6 +177,7 @@ FILE* _file; bool _need_close; public: + fileStream() { _file = NULL; _need_close = false; } fileStream(const char* file_name); fileStream(const char* file_name, const char* opentype); fileStream(FILE* file) { _file = file; _need_close = false; } @@ -210,6 +212,20 @@ void flush() {}; }; +class rotatingFileStream : public fileStream { + protected: + char* _file_name; + jlong _bytes_writen; + uintx _cur_file_num; // current logfile rotation number, from 0 to MaxGCLogFileNumbers-1 + public: + rotatingFileStream(const char* file_name); + rotatingFileStream(const char* file_name, const char* opentype); + rotatingFileStream(FILE* file) : fileStream(file) {} + ~rotatingFileStream(); + virtual void write(const char* c, size_t len); + virtual void rotate_log(); +}; + void ostream_init(); void ostream_init_log(); void ostream_exit(); diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/utilities/quickSort.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/utilities/quickSort.cpp Thu Jul 07 22:34:34 2011 -0400 @@ -0,0 +1,218 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "utilities/quickSort.hpp" + +#ifndef PRODUCT + +// Unit tests + +#include "runtime/os.hpp" +#include + +static int test_comparator(int a, int b) { + if (a == b) { + return 0; + } + if (a < b) { + return -1; + } + return 1; +} + +static int test_even_odd_comparator(int a, int b) { + bool a_is_odd = (a % 2) == 1; + bool b_is_odd = (b % 2) == 1; + if (a_is_odd == b_is_odd) { + return 0; + } + if (a_is_odd) { + return -1; + } + return 1; +} + +static int test_stdlib_comparator(const void* a, const void* b) { + int ai = *(int*)a; + int bi = *(int*)b; + if (ai == bi) { + return 0; + } + if (ai < bi) { + return -1; + } + return 1; +} + +void QuickSort::print_array(const char* prefix, int* array, int length) { + tty->print("%s:", prefix); + for (int i = 0; i < length; i++) { + tty->print(" %d", array[i]); + } + tty->print_cr(""); +} + +bool QuickSort::compare_arrays(int* actual, int* expected, int length) { + for (int i = 0; i < length; i++) { + if (actual[i] != expected[i]) { + print_array("Sorted array ", actual, length); + print_array("Expected array", expected, length); + return false; + } + } + return true; +} + +template +bool QuickSort::sort_and_compare(int* arrayToSort, int* expectedResult, int length, C comparator, bool idempotent) { + sort(arrayToSort, length, comparator, idempotent); + return compare_arrays(arrayToSort, expectedResult, length); +} + +bool QuickSort::test_quick_sort() { + tty->print_cr("test_quick_sort\n"); + { + int* test_array = NULL; + int* expected_array = NULL; + assert(sort_and_compare(test_array, expected_array, 0, test_comparator), "Empty array not handled"); + } + { + int test_array[] = {3}; + int expected_array[] = {3}; + assert(sort_and_compare(test_array, expected_array, 1, test_comparator), "Single value array not handled"); + } + { + int test_array[] = {3,2}; + int expected_array[] = {2,3}; + assert(sort_and_compare(test_array, expected_array, 2, test_comparator), "Array with 2 values not correctly sorted"); + } + { + int test_array[] = {3,2,1}; + int expected_array[] = {1,2,3}; + assert(sort_and_compare(test_array, expected_array, 3, test_comparator), "Array with 3 values not correctly sorted"); + } + { + int test_array[] = {4,3,2,1}; + int expected_array[] = {1,2,3,4}; + assert(sort_and_compare(test_array, expected_array, 4, test_comparator), "Array with 4 values not correctly sorted"); + } + { + int test_array[] = {7,1,5,3,6,9,8,2,4,0}; + int expected_array[] = {0,1,2,3,4,5,6,7,8,9}; + assert(sort_and_compare(test_array, expected_array, 10, test_comparator), "Array with 10 values not correctly sorted"); + } + { + int test_array[] = {4,4,1,4}; + int expected_array[] = {1,4,4,4}; + assert(sort_and_compare(test_array, expected_array, 4, test_comparator), "3 duplicates not sorted correctly"); + } + { + int test_array[] = {0,1,2,3,4,5,6,7,8,9}; + int expected_array[] = {0,1,2,3,4,5,6,7,8,9}; + assert(sort_and_compare(test_array, expected_array, 10, test_comparator), "Already sorted array not correctly sorted"); + } + { + // one of the random arrays that found an issue in the partion method. + int test_array[] = {76,46,81,8,64,56,75,11,51,55,11,71,59,27,9,64,69,75,21,25,39,40,44,32,7,8,40,41,24,78,24,74,9,65,28,6,40,31,22,13,27,82}; + int expected_array[] = {6,7,8,8,9,9,11,11,13,21,22,24,24,25,27,27,28,31,32,39,40,40,40,41,44,46,51,55,56,59,64,64,65,69,71,74,75,75,76,78,81,82}; + assert(sort_and_compare(test_array, expected_array, 42, test_comparator), "Not correctly sorted"); + } + { + int test_array[] = {2,8,1,4}; + int expected_array[] = {1,4,2,8}; + assert(sort_and_compare(test_array, expected_array, 4, test_even_odd_comparator), "Even/odd not sorted correctly"); + } + { // Some idempotent tests + { + // An array of lenght 3 is only sorted by find_pivot. Make sure that it is idempotent. + int test_array[] = {1,4,8}; + int expected_array[] = {1,4,8}; + assert(sort_and_compare(test_array, expected_array, 3, test_even_odd_comparator, true), "Even/odd not idempotent"); + } + { + int test_array[] = {1,7,9,4,8,2}; + int expected_array[] = {1,7,9,4,8,2}; + assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent"); + } + { + int test_array[] = {1,9,7,4,2,8}; + int expected_array[] = {1,9,7,4,2,8}; + assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent"); + } + { + int test_array[] = {7,9,1,2,8,4}; + int expected_array[] = {7,9,1,2,8,4}; + assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent"); + } + { + int test_array[] = {7,1,9,2,4,8}; + int expected_array[] = {7,1,9,2,4,8}; + assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent"); + } + { + int test_array[] = {9,1,7,4,8,2}; + int expected_array[] = {9,1,7,4,8,2}; + assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent"); + } + { + int test_array[] = {9,7,1,4,2,8}; + int expected_array[] = {9,7,1,4,2,8}; + assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent"); + } + } + + // test sorting random arrays + for (int i = 0; i < 1000; i++) { + int length = os::random() % 100; + int* test_array = new int[length]; + int* expected_array = new int[length]; + for (int j = 0; j < length; j++) { + // Choose random values, but get a chance of getting duplicates + test_array[j] = os::random() % (length * 2); + expected_array[j] = test_array[j]; + } + + // Compare sorting to stdlib::qsort() + qsort(expected_array, length, sizeof(int), test_stdlib_comparator); + assert(sort_and_compare(test_array, expected_array, length, test_comparator), "Random array not correctly sorted"); + + // Make sure sorting is idempotent. + // Both test_array and expected_array are sorted by the test_comparator. + // Now sort them once with the test_even_odd_comparator. Then sort the + // test_array one more time with test_even_odd_comparator and verify that + // it is idempotent. + sort(expected_array, length, test_even_odd_comparator, true); + sort(test_array, length, test_even_odd_comparator, true); + assert(compare_arrays(test_array, expected_array, length), "Sorting identical arrays rendered different results"); + sort(test_array, length, test_even_odd_comparator, true); + assert(compare_arrays(test_array, expected_array, length), "Sorting already sorted array changed order of elements - not idempotent"); + + delete[] test_array; + delete[] expected_array; + } + return true; +} + +#endif diff -r 109d1d265924 -r 5447b2c582ad src/share/vm/utilities/quickSort.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/utilities/quickSort.hpp Thu Jul 07 22:34:34 2011 -0400 @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_UTILITIES_QUICKSORT_HPP +#define SHARE_VM_UTILITIES_QUICKSORT_HPP + +#include "memory/allocation.hpp" +#include "runtime/globals.hpp" +#include "utilities/debug.hpp" + +class QuickSort : AllStatic { + + private: + template + static void swap(T* array, int x, int y) { + T tmp = array[x]; + array[x] = array[y]; + array[y] = tmp; + } + + // As pivot we use the median of the first, last and middle elements. + // We swap in these three values at the right place in the array. This + // means that this method not only returns the index of the pivot + // element. It also alters the array so that: + // array[first] <= array[middle] <= array[last] + // A side effect of this is that arrays of length <= 3 are sorted. + template + static int find_pivot(T* array, int length, C comparator) { + assert(length > 1, "length of array must be > 0"); + + int middle_index = length / 2; + int last_index = length - 1; + + if (comparator(array[0], array[middle_index]) == 1) { + swap(array, 0, middle_index); + } + if (comparator(array[0], array[last_index]) == 1) { + swap(array, 0, last_index); + } + if (comparator(array[middle_index], array[last_index]) == 1) { + swap(array, middle_index, last_index); + } + // Now the value in the middle of the array is the median + // of the fist, last and middle values. Use this as pivot. + return middle_index; + } + + template + static int partition(T* array, int pivot, int length, C comparator) { + int left_index = -1; + int right_index = length; + T pivot_val = array[pivot]; + + while (true) { + do { + left_index++; + } while (comparator(array[left_index], pivot_val) == -1); + do { + right_index--; + } while (comparator(array[right_index], pivot_val) == 1); + + if (left_index < right_index) { + if (!idempotent || comparator(array[left_index], array[right_index]) != 0) { + swap(array, left_index, right_index); + } + } else { + return right_index; + } + } + + ShouldNotReachHere(); + return 0; + } + + template + static void inner_sort(T* array, int length, C comparator) { + if (length < 2) { + return; + } + int pivot = find_pivot(array, length, comparator); + if (length < 4) { + // arrays up to length 3 will be sorted after finding the pivot + return; + } + int split = partition(array, pivot, length, comparator); + int first_part_length = split + 1; + inner_sort(array, first_part_length, comparator); + inner_sort(&array[first_part_length], length - first_part_length, comparator); + } + + public: + // The idempotent parameter prevents the sort from + // reordering a previous valid sort by not swapping + // fields that compare as equal. This requires extra + // calls to the comparator, so the performance + // impact depends on the comparator. + template + static void sort(T* array, int length, C comparator, bool idempotent) { + // Switch "idempotent" from function paramter to template parameter + if (idempotent) { + inner_sort(array, length, comparator); + } else { + inner_sort(array, length, comparator); + } + } + + // for unit testing +#ifndef PRODUCT + static void print_array(const char* prefix, int* array, int length); + static bool compare_arrays(int* actual, int* expected, int length); + template static bool sort_and_compare(int* arrayToSort, int* expectedResult, int length, C comparator, bool idempotent = false); + static bool test_quick_sort(); +#endif +}; + + +#endif //SHARE_VM_UTILITIES_QUICKSORT_HPP diff -r 109d1d265924 -r 5447b2c582ad test/compiler/6478991/NullCheckTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/6478991/NullCheckTest.java Thu Jul 07 22:34:34 2011 -0400 @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6478991 + * @summary C1 NullCheckEliminator yields incorrect exceptions + * + * @run main/othervm -XX:CompileOnly=NullCheckTest.test,NullCheckTest.inlined -Xcomp NullCheckTest + */ + +public class NullCheckTest { + static class A { + int f; + + public final void inlined(A a) { + // This cast is intended to fail. + B b = ((B) a); + } + } + + static class B extends A { + } + + + private static void test(A a1, A a2) { + // Inlined call must do a null check on a1. + // However, the exlipcit NullCheck instruction is eliminated and + // the null check is folded into the field load below, so the + // exception in the inlined method is thrown before the null check + // and the NullPointerException is not thrown. + a1.inlined(a2); + + int x = a1.f; + } + + public static void main(String[] args) { + // load classes + new B(); + try { + test(null, new A()); + + throw new InternalError("FAILURE: no exception"); + } catch (NullPointerException ex) { + System.out.println("CORRECT: NullPointerException"); + } catch (ClassCastException ex) { + System.out.println("FAILURE: ClassCastException"); + throw ex; + } + } +} diff -r 109d1d265924 -r 5447b2c582ad test/compiler/7044738/Test7044738.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/7044738/Test7044738.java Thu Jul 07 22:34:34 2011 -0400 @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 7044738 + * @summary Loop unroll optimization causes incorrect result + * + * @run main/othervm -Xbatch Test7044738 + */ + +public class Test7044738 { + + private static final int INITSIZE = 10000; + public int d[] = { 1, 2, 3, 4 }; + public int i, size; + + private static int iter = 5; + + boolean done() { return (--iter > 0); } + + public static void main(String args[]) { + Test7044738 t = new Test7044738(); + t.test(); + } + + int test() { + + while (done()) { + size = INITSIZE; + + for (i = 0; i < size; i++) { + d[0] = d[1]; // 2 + d[1] = d[2]; // 3 + d[2] = d[3]; // 4 + d[3] = d[0]; // 2 + + d[0] = d[1]; // 3 + d[1] = d[2]; // 4 + d[2] = d[3]; // 2 + d[3] = d[0]; // 3 + + d[0] = d[1]; // 4 + d[1] = d[2]; // 2 + d[2] = d[3]; // 3 + d[3] = d[0]; // 4 + + d[0] = d[1]; // 2 + d[1] = d[2]; // 3 + d[2] = d[3]; // 4 + d[3] = d[0]; // 2 + + d[0] = d[1]; // 3 + d[1] = d[2]; // 4 + d[2] = d[3]; // 2 + d[3] = d[0]; // 3 + + d[0] = d[1]; // 4 + d[1] = d[2]; // 2 + d[2] = d[3]; // 3 + d[3] = d[0]; // 4 + + d[0] = d[1]; // 2 + d[1] = d[2]; // 3 + d[2] = d[3]; // 4 + d[3] = d[0]; // 2 + + d[0] = d[1]; // 3 + d[1] = d[2]; // 4 + d[2] = d[3]; // 2 + d[3] = d[0]; // 3 + } + + // try to defeat dead code elimination + if (d[0] == d[1]) { + System.out.println("test failed: iter=" + iter + " i=" + i + " d[] = { " + d[0] + ", " + d[1] + ", " + d[2] + ", " + d[3] + " } "); + System.exit(97); + } + } + return d[3]; + } + +} diff -r 109d1d265924 -r 5447b2c582ad test/compiler/7046096/Test7046096.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/7046096/Test7046096.java Thu Jul 07 22:34:34 2011 -0400 @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 7046096 + * @summary SEGV IN C2 WITH 6U25 + * + * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+OptimizeStringConcat Test7046096 + */ + + +public class Test7046096 { + + static int first = 1; + + String add(String str) { + if (first != 0) { + return str + "789"; + } else { + return "null"; + } + } + + String test(String str) { + for (int i=0; i < first; i++) { + if (i > 1) + return "bad"; + } + return add(str+"456"); + } + + public static void main(String [] args) { + Test7046096 t = new Test7046096(); + for (int i = 0; i < 11000; i++) { + String str = t.test("123"); + if (!str.equals("123456789")) { + System.out.println("FAILED: " + str + " != \"123456789\""); + System.exit(97); + } + } + } +} + diff -r 109d1d265924 -r 5447b2c582ad test/compiler/7052494/Test7052494.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/7052494/Test7052494.java Thu Jul 07 22:34:34 2011 -0400 @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 7052494 + * @summary Eclipse test fails on JDK 7 b142 + * + * @run main/othervm -Xbatch Test7052494 + */ + + +public class Test7052494 { + + static int test1(int i, int limit) { + int result = 0; + while (i++ != 0) { + if (result >= limit) + break; + result = i*2; + } + return result; + } + + static int test2(int i, int limit) { + int result = 0; + while (i-- != 0) { + if (result <= limit) + break; + result = i*2; + } + return result; + } + + static void test3(int i, int limit, int arr[]) { + while (i++ != 0) { + if (arr[i-1] >= limit) + break; + arr[i] = i*2; + } + } + + static void test4(int i, int limit, int arr[]) { + while (i-- != 0) { + if (arr[arr.length + i + 1] <= limit) + break; + arr[arr.length + i] = i*2; + } + } + + // Empty loop rolls through MAXINT if i > 0 + static int test5(int i) { + int result = 0; + while (i++ != 0) { + result = i*2; + } + return result; + } + + // Empty loop rolls through MININT if i < 0 + static int test6(int i) { + int result = 0; + while (i-- != 0) { + result = i*2; + } + return result; + } + + public static void main(String [] args) { + boolean failed = false; + int[] arr = new int[8]; + int[] ar3 = { 0, 0, 4, 6, 8, 10, 0, 0 }; + int[] ar4 = { 0, 0, 0, -10, -8, -6, -4, 0 }; + for (int i = 0; i < 11000; i++) { + int k = test1(1, 10); + if (k != 10) { + System.out.println("FAILED: " + k + " != 10"); + failed = true; + break; + } + } + for (int i = 0; i < 11000; i++) { + int k = test2(-1, -10); + if (k != -10) { + System.out.println("FAILED: " + k + " != -10"); + failed = true; + break; + } + } + for (int i = 0; i < 11000; i++) { + java.util.Arrays.fill(arr, 0); + test3(1, 10, arr); + if (!java.util.Arrays.equals(arr,ar3)) { + System.out.println("FAILED: arr = { " + arr[0] + ", " + + arr[1] + ", " + + arr[2] + ", " + + arr[3] + ", " + + arr[4] + ", " + + arr[5] + ", " + + arr[6] + ", " + + arr[7] + " }"); + failed = true; + break; + } + } + for (int i = 0; i < 11000; i++) { + java.util.Arrays.fill(arr, 0); + test4(-1, -10, arr); + if (!java.util.Arrays.equals(arr,ar4)) { + System.out.println("FAILED: arr = { " + arr[0] + ", " + + arr[1] + ", " + + arr[2] + ", " + + arr[3] + ", " + + arr[4] + ", " + + arr[5] + ", " + + arr[6] + ", " + + arr[7] + " }"); + failed = true; + break; + } + } + for (int i = 0; i < 11000; i++) { + int k = test5(1); + if (k != 0) { + System.out.println("FAILED: " + k + " != 0"); + failed = true; + break; + } + } + for (int i = 0; i < 11000; i++) { + int k = test6(-1); + if (k != 0) { + System.out.println("FAILED: " + k + " != 0"); + failed = true; + break; + } + } + if (failed) + System.exit(97); + } +} diff -r 109d1d265924 -r 5447b2c582ad test/gc/6941923/test6941923.sh --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/6941923/test6941923.sh Thu Jul 07 22:34:34 2011 -0400 @@ -0,0 +1,179 @@ +## +## @test @(#)test6941923.sh +## @bug 6941923 +## @summary test new added flags for gc log rotation +## @author yqi +## @run shell test6941923.sh +## + +## skip on windows +OS=`uname -s` +case "$OS" in + SunOS | Linux ) + NULL=/dev/null + PS=":" + FS="/" + ;; + Windows_* ) + echo "Test skipped for Windows" + exit 0 + ;; + * ) + echo "Unrecognized system!" + exit 1; + ;; +esac + +if [ "${JAVA_HOME}" = "" ] +then + echo "JAVA_HOME not set" + exit 0 +fi + +$JAVA_HOME/bin/java -version > $NULL 2>&1 + +if [ $? != 0 ]; then + echo "Wrong JAVA_HOME? JAVA_HOME: $JAVA_HOME" + exit $? +fi + +# create a small test case +testname="Test" +if [ -e ${testname}.java ]; then + rm -rf ${testname}.* +fi + +cat >> ${testname}.java << __EOF__ +import java.util.Vector; + +public class Test implements Runnable +{ + private boolean _should_stop = false; + + public static void main(String[] args) throws Exception { + + long limit = Long.parseLong(args[0]) * 60L * 1000L; // minutes + Test t = new Test(); + t.set_stop(false); + Thread thr = new Thread(t); + thr.start(); + + long time1 = System.currentTimeMillis(); + long time2 = System.currentTimeMillis(); + while (time2 - time1 < limit) { + try { + Thread.sleep(2000); // 2 seconds + } + catch(Exception e) {} + time2 = System.currentTimeMillis(); + System.out.print("\r... " + (time2 - time1)/1000 + " seconds"); + } + System.out.println(); + t.set_stop(true); + } + public void set_stop(boolean value) { _should_stop = value; } + public void run() { + int cap = 20000; + int fix_size = 2048; + int loop = 0; + Vector< byte[] > v = new Vector< byte[] >(cap); + while(!_should_stop) { + byte[] g = new byte[fix_size]; + v.add(g); + loop++; + if (loop > cap) { + v = null; + cap *= 2; + if (cap > 80000) cap = 80000; + v = new Vector< byte[] >(cap); + } + } + } +} +__EOF__ + +msgsuccess="succeeded" +msgfail="failed" +gclogsize="16K" +filesize=$((16*1024)) +$JAVA_HOME/bin/javac ${testname}.java > $NULL 2>&1 + +if [ $? != 0 ]; then + echo "$JAVA_HOME/bin/javac ${testname}.java $fail" + exit -1 +fi + +# test for 2 minutes, it will complete circulation of gc log rotation +tts=2 +logfile="test.log" +hotspotlog="hotspot.log" + +if [ -e $logfile ]; then + rm -rf $logfile +fi + +#also delete $hotspotlog if it exists +if [ -f $hotspotlog ]; then + rm -rf $hotspotlog +fi + +options="-Xloggc:$logfile -XX:+UseConcMarkSweepGC -XX:+PrintGC -XX:+PrintGCDetails -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=$gclogsize" +echo "Test gc log rotation in same file, wait for $tts minutes ...." +$JAVA_HOME/bin/java $options $testname $tts +if [ $? != 0 ]; then + echo "$msgfail" + exit -1 +fi + +# rotation file will be $logfile.0 +if [ -f $logfile.0 ]; then + outfilesize=`ls -l $logfile.0 | awk '{print $5 }'` + if [ $((outfilesize)) -ge $((filesize)) ]; then + echo $msgsuccess + else + echo $msgfail + fi +else + echo $msgfail + exit -1 +fi + +# delete log file +rm -rf $logfile.0 +if [ -f $hotspotlog ]; then + rm -rf $hotspotlog +fi + +#multiple log files +numoffiles=3 +options="-Xloggc:$logfile -XX:+UseConcMarkSweepGC -XX:+PrintGC -XX:+PrintGCDetails -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=$numoffiles -XX:GCLogFileSize=$gclogsize" +echo "Test gc log rotation in $numoffiles files, wait for $tts minutes ...." +$JAVA_HOME/bin/java $options $testname $tts +if [ $? != 0 ]; then + echo "$msgfail" + exit -1 +fi + +atleast=0 # at least size of numoffile-1 files >= $gclogsize +tk=0 +while [ $(($tk)) -lt $(($numoffiles)) ] +do + if [ -f $logfile.$tk ]; then + outfilesize=`ls -l $logfile.$tk | awk '{ print $5 }'` + if [ $(($outfilesize)) -ge $(($filesize)) ]; then + atleast=$((atleast+1)) + fi + fi + tk=$((tk+1)) +done + +rm -rf $logfile.* +rm -rf $testname.* +rm -rf $hotspotlog + +if [ $(($atleast)) -ge $(($numoffiles-1)) ]; then + echo $msgsuccess +else + echo $msgfail + exit -1 +fi