Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp @ 3774:c9ca3f51cf41
6994322: Remove the is_tlab and is_noref / is_large_noref parameters from the CollectedHeap
Summary: Remove two unused parameters from the mem_allocate() method and update its uses accordingly.
Reviewed-by: stefank, johnc
author | tonyp |
---|---|
date | Thu, 16 Jun 2011 15:51:57 -0400 |
parents | 6747fd0512e0 |
children | c2bf0120ee5d |
comparison
equal
deleted
inserted
replaced
3773:5130fa1b24f1 | 3774:c9ca3f51cf41 |
---|---|
384 // and the rest will not be executed. For that reason, this method loops | 384 // and the rest will not be executed. For that reason, this method loops |
385 // during failed allocation attempts. If the java heap becomes exhausted, | 385 // during failed allocation attempts. If the java heap becomes exhausted, |
386 // we rely on the size_policy object to force a bail out. | 386 // we rely on the size_policy object to force a bail out. |
387 HeapWord* ParallelScavengeHeap::mem_allocate( | 387 HeapWord* ParallelScavengeHeap::mem_allocate( |
388 size_t size, | 388 size_t size, |
389 bool is_noref, | |
390 bool is_tlab, | |
391 bool* gc_overhead_limit_was_exceeded) { | 389 bool* gc_overhead_limit_was_exceeded) { |
392 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); | 390 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); |
393 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); | 391 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); |
394 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | 392 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
395 | 393 |
396 // In general gc_overhead_limit_was_exceeded should be false so | 394 // In general gc_overhead_limit_was_exceeded should be false so |
397 // set it so here and reset it to true only if the gc time | 395 // set it so here and reset it to true only if the gc time |
398 // limit is being exceeded as checked below. | 396 // limit is being exceeded as checked below. |
399 *gc_overhead_limit_was_exceeded = false; | 397 *gc_overhead_limit_was_exceeded = false; |
400 | 398 |
401 HeapWord* result = young_gen()->allocate(size, is_tlab); | 399 HeapWord* result = young_gen()->allocate(size); |
402 | 400 |
403 uint loop_count = 0; | 401 uint loop_count = 0; |
404 uint gc_count = 0; | 402 uint gc_count = 0; |
405 | 403 |
406 while (result == NULL) { | 404 while (result == NULL) { |
417 // total_collections() value! | 415 // total_collections() value! |
418 { | 416 { |
419 MutexLocker ml(Heap_lock); | 417 MutexLocker ml(Heap_lock); |
420 gc_count = Universe::heap()->total_collections(); | 418 gc_count = Universe::heap()->total_collections(); |
421 | 419 |
422 result = young_gen()->allocate(size, is_tlab); | 420 result = young_gen()->allocate(size); |
423 | 421 |
424 // (1) If the requested object is too large to easily fit in the | 422 // (1) If the requested object is too large to easily fit in the |
425 // young_gen, or | 423 // young_gen, or |
426 // (2) If GC is locked out via GCLocker, young gen is full and | 424 // (2) If GC is locked out via GCLocker, young gen is full and |
427 // the need for a GC already signalled to GCLocker (done | 425 // the need for a GC already signalled to GCLocker (done |
431 // in old_gen. For case (2) above, we may in the future allow | 429 // in old_gen. For case (2) above, we may in the future allow |
432 // TLAB allocation directly in the old gen. | 430 // TLAB allocation directly in the old gen. |
433 if (result != NULL) { | 431 if (result != NULL) { |
434 return result; | 432 return result; |
435 } | 433 } |
436 if (!is_tlab && | 434 if (size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) { |
437 size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) { | 435 result = old_gen()->allocate(size); |
438 result = old_gen()->allocate(size, is_tlab); | |
439 if (result != NULL) { | 436 if (result != NULL) { |
440 return result; | 437 return result; |
441 } | 438 } |
442 } | 439 } |
443 if (GC_locker::is_active_and_needs_gc()) { | 440 if (GC_locker::is_active_and_needs_gc()) { |
444 // GC is locked out. If this is a TLAB allocation, | |
445 // return NULL; the requestor will retry allocation | |
446 // of an idividual object at a time. | |
447 if (is_tlab) { | |
448 return NULL; | |
449 } | |
450 | |
451 // If this thread is not in a jni critical section, we stall | 441 // If this thread is not in a jni critical section, we stall |
452 // the requestor until the critical section has cleared and | 442 // the requestor until the critical section has cleared and |
453 // GC allowed. When the critical section clears, a GC is | 443 // GC allowed. When the critical section clears, a GC is |
454 // initiated by the last thread exiting the critical section; so | 444 // initiated by the last thread exiting the critical section; so |
455 // we retry the allocation sequence from the beginning of the loop, | 445 // we retry the allocation sequence from the beginning of the loop, |
470 } | 460 } |
471 | 461 |
472 if (result == NULL) { | 462 if (result == NULL) { |
473 | 463 |
474 // Generate a VM operation | 464 // Generate a VM operation |
475 VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count); | 465 VM_ParallelGCFailedAllocation op(size, gc_count); |
476 VMThread::execute(&op); | 466 VMThread::execute(&op); |
477 | 467 |
478 // Did the VM operation execute? If so, return the result directly. | 468 // Did the VM operation execute? If so, return the result directly. |
479 // This prevents us from looping until time out on requests that can | 469 // This prevents us from looping until time out on requests that can |
480 // not be satisfied. | 470 // not be satisfied. |
524 // time spent in gc crosses a threshold, we will bail out. | 514 // time spent in gc crosses a threshold, we will bail out. |
525 loop_count++; | 515 loop_count++; |
526 if ((result == NULL) && (QueuedAllocationWarningCount > 0) && | 516 if ((result == NULL) && (QueuedAllocationWarningCount > 0) && |
527 (loop_count % QueuedAllocationWarningCount == 0)) { | 517 (loop_count % QueuedAllocationWarningCount == 0)) { |
528 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t" | 518 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t" |
529 " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : ""); | 519 " size=%d", loop_count, size); |
530 } | 520 } |
531 } | 521 } |
532 | 522 |
533 return result; | 523 return result; |
534 } | 524 } |
537 // only at a safepoint! Note that this method has policy for allocation | 527 // only at a safepoint! Note that this method has policy for allocation |
538 // flow, and NOT collection policy. So we do not check for gc collection | 528 // flow, and NOT collection policy. So we do not check for gc collection |
539 // time over limit here, that is the responsibility of the heap specific | 529 // time over limit here, that is the responsibility of the heap specific |
540 // collection methods. This method decides where to attempt allocations, | 530 // collection methods. This method decides where to attempt allocations, |
541 // and when to attempt collections, but no collection specific policy. | 531 // and when to attempt collections, but no collection specific policy. |
542 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) { | 532 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) { |
543 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | 533 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
544 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); | 534 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); |
545 assert(!Universe::heap()->is_gc_active(), "not reentrant"); | 535 assert(!Universe::heap()->is_gc_active(), "not reentrant"); |
546 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | 536 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
547 | 537 |
551 // unless we collect. | 541 // unless we collect. |
552 | 542 |
553 // First level allocation failure, scavenge and allocate in young gen. | 543 // First level allocation failure, scavenge and allocate in young gen. |
554 GCCauseSetter gccs(this, GCCause::_allocation_failure); | 544 GCCauseSetter gccs(this, GCCause::_allocation_failure); |
555 PSScavenge::invoke(); | 545 PSScavenge::invoke(); |
556 HeapWord* result = young_gen()->allocate(size, is_tlab); | 546 HeapWord* result = young_gen()->allocate(size); |
557 | 547 |
558 // Second level allocation failure. | 548 // Second level allocation failure. |
559 // Mark sweep and allocate in young generation. | 549 // Mark sweep and allocate in young generation. |
560 if (result == NULL) { | 550 if (result == NULL) { |
561 // There is some chance the scavenge method decided to invoke mark_sweep. | 551 // There is some chance the scavenge method decided to invoke mark_sweep. |
562 // Don't mark sweep twice if so. | 552 // Don't mark sweep twice if so. |
563 if (mark_sweep_invocation_count == total_invocations()) { | 553 if (mark_sweep_invocation_count == total_invocations()) { |
564 invoke_full_gc(false); | 554 invoke_full_gc(false); |
565 result = young_gen()->allocate(size, is_tlab); | 555 result = young_gen()->allocate(size); |
566 } | 556 } |
567 } | 557 } |
568 | 558 |
569 // Third level allocation failure. | 559 // Third level allocation failure. |
570 // After mark sweep and young generation allocation failure, | 560 // After mark sweep and young generation allocation failure, |
571 // allocate in old generation. | 561 // allocate in old generation. |
572 if (result == NULL && !is_tlab) { | 562 if (result == NULL) { |
573 result = old_gen()->allocate(size, is_tlab); | 563 result = old_gen()->allocate(size); |
574 } | 564 } |
575 | 565 |
576 // Fourth level allocation failure. We're running out of memory. | 566 // Fourth level allocation failure. We're running out of memory. |
577 // More complete mark sweep and allocate in young generation. | 567 // More complete mark sweep and allocate in young generation. |
578 if (result == NULL) { | 568 if (result == NULL) { |
579 invoke_full_gc(true); | 569 invoke_full_gc(true); |
580 result = young_gen()->allocate(size, is_tlab); | 570 result = young_gen()->allocate(size); |
581 } | 571 } |
582 | 572 |
583 // Fifth level allocation failure. | 573 // Fifth level allocation failure. |
584 // After more complete mark sweep, allocate in old generation. | 574 // After more complete mark sweep, allocate in old generation. |
585 if (result == NULL && !is_tlab) { | 575 if (result == NULL) { |
586 result = old_gen()->allocate(size, is_tlab); | 576 result = old_gen()->allocate(size); |
587 } | 577 } |
588 | 578 |
589 return result; | 579 return result; |
590 } | 580 } |
591 | 581 |
759 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { | 749 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { |
760 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); | 750 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); |
761 } | 751 } |
762 | 752 |
763 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { | 753 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { |
764 return young_gen()->allocate(size, true); | 754 return young_gen()->allocate(size); |
765 } | 755 } |
766 | 756 |
767 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { | 757 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { |
768 CollectedHeap::accumulate_statistics_all_tlabs(); | 758 CollectedHeap::accumulate_statistics_all_tlabs(); |
769 } | 759 } |