comparison src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp @ 4914:23c0eb012d6f

6330863: vm/gc/InfiniteList.java fails intermittently due to timeout Summary: in some cases, allocate from the old gen before doing a full gc Reviewed-by: stefank, jmasa
author jcoomes
date Thu, 16 Feb 2012 13:13:53 -0800
parents 53074c2c4600
children b632e80fc9dc
comparison
equal deleted inserted replaced
4913:ab4422d0ed59 4914:23c0eb012d6f
1 /* 1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
416 { 416 {
417 MutexLocker ml(Heap_lock); 417 MutexLocker ml(Heap_lock);
418 gc_count = Universe::heap()->total_collections(); 418 gc_count = Universe::heap()->total_collections();
419 419
420 result = young_gen()->allocate(size); 420 result = young_gen()->allocate(size);
421
422 // (1) If the requested object is too large to easily fit in the
423 // young_gen, or
424 // (2) If GC is locked out via GCLocker, young gen is full and
425 // the need for a GC already signalled to GCLocker (done
426 // at a safepoint),
427 // ... then, rather than force a safepoint and (a potentially futile)
428 // collection (attempt) for each allocation, try allocation directly
429 // in old_gen. For case (2) above, we may in the future allow
430 // TLAB allocation directly in the old gen.
431 if (result != NULL) { 421 if (result != NULL) {
432 return result; 422 return result;
433 } 423 }
434 if (size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) { 424
435 result = old_gen()->allocate(size); 425 // If certain conditions hold, try allocating from the old gen.
436 if (result != NULL) { 426 result = mem_allocate_old_gen(size);
437 return result; 427 if (result != NULL) {
438 } 428 return result;
439 } 429 }
430
431 // Failed to allocate without a gc.
440 if (GC_locker::is_active_and_needs_gc()) { 432 if (GC_locker::is_active_and_needs_gc()) {
441 // If this thread is not in a jni critical section, we stall 433 // If this thread is not in a jni critical section, we stall
442 // the requestor until the critical section has cleared and 434 // the requestor until the critical section has cleared and
443 // GC allowed. When the critical section clears, a GC is 435 // GC allowed. When the critical section clears, a GC is
444 // initiated by the last thread exiting the critical section; so 436 // initiated by the last thread exiting the critical section; so
458 } 450 }
459 } 451 }
460 } 452 }
461 453
462 if (result == NULL) { 454 if (result == NULL) {
463
464 // Generate a VM operation 455 // Generate a VM operation
465 VM_ParallelGCFailedAllocation op(size, gc_count); 456 VM_ParallelGCFailedAllocation op(size, gc_count);
466 VMThread::execute(&op); 457 VMThread::execute(&op);
467 458
468 // Did the VM operation execute? If so, return the result directly. 459 // Did the VM operation execute? If so, return the result directly.
521 } 512 }
522 513
523 return result; 514 return result;
524 } 515 }
525 516
517 // A "death march" is a series of ultra-slow allocations in which a full gc is
518 // done before each allocation, and after the full gc the allocation still
519 // cannot be satisfied from the young gen. This routine detects that condition;
520 // it should be called after a full gc has been done and the allocation
521 // attempted from the young gen. The parameter 'addr' should be the result of
522 // that young gen allocation attempt.
523 void
524 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
525 if (addr != NULL) {
526 _death_march_count = 0; // death march has ended
527 } else if (_death_march_count == 0) {
528 if (should_alloc_in_eden(size)) {
529 _death_march_count = 1; // death march has started
530 }
531 }
532 }
533
534 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
535 if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) {
536 // Size is too big for eden, or gc is locked out.
537 return old_gen()->allocate(size);
538 }
539
540 // If a "death march" is in progress, allocate from the old gen a limited
541 // number of times before doing a GC.
542 if (_death_march_count > 0) {
543 if (_death_march_count < 64) {
544 ++_death_march_count;
545 return old_gen()->allocate(size);
546 } else {
547 _death_march_count = 0;
548 }
549 }
550 return NULL;
551 }
552
526 // Failed allocation policy. Must be called from the VM thread, and 553 // Failed allocation policy. Must be called from the VM thread, and
527 // only at a safepoint! Note that this method has policy for allocation 554 // only at a safepoint! Note that this method has policy for allocation
528 // flow, and NOT collection policy. So we do not check for gc collection 555 // flow, and NOT collection policy. So we do not check for gc collection
529 // time over limit here, that is the responsibility of the heap specific 556 // time over limit here, that is the responsibility of the heap specific
530 // collection methods. This method decides where to attempt allocations, 557 // collection methods. This method decides where to attempt allocations,
533 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 560 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
534 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 561 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
535 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 562 assert(!Universe::heap()->is_gc_active(), "not reentrant");
536 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 563 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
537 564
538 size_t mark_sweep_invocation_count = total_invocations(); 565 // We assume that allocation in eden will fail unless we collect.
539
540 // We assume (and assert!) that an allocation at this point will fail
541 // unless we collect.
542 566
543 // First level allocation failure, scavenge and allocate in young gen. 567 // First level allocation failure, scavenge and allocate in young gen.
544 GCCauseSetter gccs(this, GCCause::_allocation_failure); 568 GCCauseSetter gccs(this, GCCause::_allocation_failure);
545 PSScavenge::invoke(); 569 const bool invoked_full_gc = PSScavenge::invoke();
546 HeapWord* result = young_gen()->allocate(size); 570 HeapWord* result = young_gen()->allocate(size);
547 571
548 // Second level allocation failure. 572 // Second level allocation failure.
549 // Mark sweep and allocate in young generation. 573 // Mark sweep and allocate in young generation.
550 if (result == NULL) { 574 if (result == NULL && !invoked_full_gc) {
551 // There is some chance the scavenge method decided to invoke mark_sweep. 575 invoke_full_gc(false);
552 // Don't mark sweep twice if so. 576 result = young_gen()->allocate(size);
553 if (mark_sweep_invocation_count == total_invocations()) { 577 }
554 invoke_full_gc(false); 578
555 result = young_gen()->allocate(size); 579 death_march_check(result, size);
556 }
557 }
558 580
559 // Third level allocation failure. 581 // Third level allocation failure.
560 // After mark sweep and young generation allocation failure, 582 // After mark sweep and young generation allocation failure,
561 // allocate in old generation. 583 // allocate in old generation.
562 if (result == NULL) { 584 if (result == NULL) {