comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 1973:631f79e71e90

6974966: G1: unnecessary direct-to-old allocations Summary: This change revamps the slow allocation path of G1. Improvements include the following: a) Allocations directly to old regions are now totally banned. G1 now only allows allocations out of young regions (with the only exception being humongous regions). b) The thread that allocates a new region (which is now guaranteed to be young) does not dirty all its cards. Each thread that successfully allocates out of a young region is now responsible for dirtying the cards that corresponding to the "block" that just got allocated. c) allocate_new_tlab() and mem_allocate() are now implemented differently and TLAB allocations are only done by allocate_new_tlab(). d) If a thread schedules an evacuation pause in order to satisfy an allocation request, it will perform the allocation at the end of the safepoint so that the thread that initiated the GC also gets "first pick" of any space made available by the GC. e) If a thread is unable to allocate a humongous object it will schedule an evacuation pause in case it reclaims enough regions so that the humongous allocation can be satisfied aftewards. f) The G1 policy is more careful to set the young list target length to be the survivor number +1. g) Lots of code tidy up, removal, refactoring to make future changes easier. Reviewed-by: johnc, ysr
author tonyp
date Tue, 24 Aug 2010 17:24:33 -0400
parents f95d63e2154a
children 016a3628c885
comparison
equal deleted inserted replaced
1972:f95d63e2154a 1973:631f79e71e90
288 // Keeps track of how many "full collections" (i.e., Full GCs or 288 // Keeps track of how many "full collections" (i.e., Full GCs or
289 // concurrent cycles) we have completed. The number of them we have 289 // concurrent cycles) we have completed. The number of them we have
290 // started is maintained in _total_full_collections in CollectedHeap. 290 // started is maintained in _total_full_collections in CollectedHeap.
291 volatile unsigned int _full_collections_completed; 291 volatile unsigned int _full_collections_completed;
292 292
293 // These are macros so that, if the assert fires, we get the correct
294 // line number, file, etc.
295
296 #define heap_locking_asserts_err_msg(__extra_message) \
297 err_msg("%s : Heap_lock %slocked, %sat a safepoint", \
298 (__extra_message), \
299 (!Heap_lock->owned_by_self()) ? "NOT " : "", \
300 (!SafepointSynchronize::is_at_safepoint()) ? "NOT " : "")
301
302 #define assert_heap_locked() \
303 do { \
304 assert(Heap_lock->owned_by_self(), \
305 heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
306 } while (0)
307
308 #define assert_heap_locked_or_at_safepoint() \
309 do { \
310 assert(Heap_lock->owned_by_self() || \
311 SafepointSynchronize::is_at_safepoint(), \
312 heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
313 "should be at a safepoint")); \
314 } while (0)
315
316 #define assert_heap_locked_and_not_at_safepoint() \
317 do { \
318 assert(Heap_lock->owned_by_self() && \
319 !SafepointSynchronize::is_at_safepoint(), \
320 heap_locking_asserts_err_msg("should be holding the Heap_lock and " \
321 "should not be at a safepoint")); \
322 } while (0)
323
324 #define assert_heap_not_locked() \
325 do { \
326 assert(!Heap_lock->owned_by_self(), \
327 heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \
328 } while (0)
329
330 #define assert_heap_not_locked_and_not_at_safepoint() \
331 do { \
332 assert(!Heap_lock->owned_by_self() && \
333 !SafepointSynchronize::is_at_safepoint(), \
334 heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
335 "should not be at a safepoint")); \
336 } while (0)
337
338 #define assert_at_safepoint() \
339 do { \
340 assert(SafepointSynchronize::is_at_safepoint(), \
341 heap_locking_asserts_err_msg("should be at a safepoint")); \
342 } while (0)
343
344 #define assert_not_at_safepoint() \
345 do { \
346 assert(!SafepointSynchronize::is_at_safepoint(), \
347 heap_locking_asserts_err_msg("should not be at a safepoint")); \
348 } while (0)
349
293 protected: 350 protected:
294 351
295 // Returns "true" iff none of the gc alloc regions have any allocations 352 // Returns "true" iff none of the gc alloc regions have any allocations
296 // since the last call to "save_marks". 353 // since the last call to "save_marks".
297 bool all_alloc_regions_no_allocs_since_save_marks(); 354 bool all_alloc_regions_no_allocs_since_save_marks();
327 size_t word_size, 384 size_t word_size,
328 bool zero_filled = true); 385 bool zero_filled = true);
329 386
330 // Attempt to allocate an object of the given (very large) "word_size". 387 // Attempt to allocate an object of the given (very large) "word_size".
331 // Returns "NULL" on failure. 388 // Returns "NULL" on failure.
332 virtual HeapWord* humongousObjAllocate(size_t word_size); 389 virtual HeapWord* humongous_obj_allocate(size_t word_size);
333 390
334 // If possible, allocate a block of the given word_size, else return "NULL". 391 // The following two methods, allocate_new_tlab() and
335 // Returning NULL will trigger GC or heap expansion. 392 // mem_allocate(), are the two main entry points from the runtime
336 // These two methods have rather awkward pre- and 393 // into the G1's allocation routines. They have the following
337 // post-conditions. If they are called outside a safepoint, then 394 // assumptions:
338 // they assume that the caller is holding the heap lock. Upon return 395 //
339 // they release the heap lock, if they are returning a non-NULL 396 // * They should both be called outside safepoints.
340 // value. attempt_allocation_slow() also dirties the cards of a 397 //
341 // newly-allocated young region after it releases the heap 398 // * They should both be called without holding the Heap_lock.
342 // lock. This change in interface was the neatest way to achieve 399 //
343 // this card dirtying without affecting mem_allocate(), which is a 400 // * All allocation requests for new TLABs should go to
344 // more frequently called method. We tried two or three different 401 // allocate_new_tlab().
345 // approaches, but they were even more hacky. 402 //
346 HeapWord* attempt_allocation(size_t word_size, 403 // * All non-TLAB allocation requests should go to mem_allocate()
347 bool permit_collection_pause = true); 404 // and mem_allocate() should never be called with is_tlab == true.
348 405 //
349 HeapWord* attempt_allocation_slow(size_t word_size, 406 // * If the GC locker is active we currently stall until we can
350 bool permit_collection_pause = true); 407 // allocate a new young region. This will be changed in the
408 // near future (see CR 6994056).
409 //
410 // * If either call cannot satisfy the allocation request using the
411 // current allocating region, they will try to get a new one. If
412 // this fails, they will attempt to do an evacuation pause and
413 // retry the allocation.
414 //
415 // * If all allocation attempts fail, even after trying to schedule
416 // an evacuation pause, allocate_new_tlab() will return NULL,
417 // whereas mem_allocate() will attempt a heap expansion and/or
418 // schedule a Full GC.
419 //
420 // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
421 // should never be called with word_size being humongous. All
422 // humongous allocation requests should go to mem_allocate() which
423 // will satisfy them with a special path.
424
425 virtual HeapWord* allocate_new_tlab(size_t word_size);
426
427 virtual HeapWord* mem_allocate(size_t word_size,
428 bool is_noref,
429 bool is_tlab, /* expected to be false */
430 bool* gc_overhead_limit_was_exceeded);
431
432 // The following methods, allocate_from_cur_allocation_region(),
433 // attempt_allocation(), replace_cur_alloc_region_and_allocate(),
434 // attempt_allocation_slow(), and attempt_allocation_humongous()
435 // have very awkward pre- and post-conditions with respect to
436 // locking:
437 //
438 // If they are called outside a safepoint they assume the caller
439 // holds the Heap_lock when it calls them. However, on exit they
440 // will release the Heap_lock if they return a non-NULL result, but
441 // keep holding the Heap_lock if they return a NULL result. The
442 // reason for this is that we need to dirty the cards that span
443 // allocated blocks on young regions to avoid having to take the
444 // slow path of the write barrier (for performance reasons we don't
445 // update RSets for references whose source is a young region, so we
446 // don't need to look at dirty cards on young regions). But, doing
447 // this card dirtying while holding the Heap_lock can be a
448 // scalability bottleneck, especially given that some allocation
449 // requests might be of non-trivial size (and the larger the region
450 // size is, the fewer allocations requests will be considered
451 // humongous, as the humongous size limit is a fraction of the
452 // region size). So, when one of these calls succeeds in allocating
453 // a block it does the card dirtying after it releases the Heap_lock
454 // which is why it will return without holding it.
455 //
456 // The above assymetry is the reason why locking / unlocking is done
457 // explicitly (i.e., with Heap_lock->lock() and
458 // Heap_lock->unlocked()) instead of using MutexLocker and
459 // MutexUnlocker objects. The latter would ensure that the lock is
460 // unlocked / re-locked at every possible exit out of the basic
461 // block. However, we only want that action to happen in selected
462 // places.
463 //
464 // Further, if the above methods are called during a safepoint, then
465 // naturally there's no assumption about the Heap_lock being held or
466 // there's no attempt to unlock it. The parameter at_safepoint
467 // indicates whether the call is made during a safepoint or not (as
468 // an optimization, to avoid reading the global flag with
469 // SafepointSynchronize::is_at_safepoint()).
470 //
471 // The methods share these parameters:
472 //
473 // * word_size : the size of the allocation request in words
474 // * at_safepoint : whether the call is done at a safepoint; this
475 // also determines whether a GC is permitted
476 // (at_safepoint == false) or not (at_safepoint == true)
477 // * do_dirtying : whether the method should dirty the allocated
478 // block before returning
479 //
480 // They all return either the address of the block, if they
481 // successfully manage to allocate it, or NULL.
482
483 // It tries to satisfy an allocation request out of the current
484 // allocating region, which is passed as a parameter. It assumes
485 // that the caller has checked that the current allocating region is
486 // not NULL. Given that the caller has to check the current
487 // allocating region for at least NULL, it might as well pass it as
488 // the first parameter so that the method doesn't have to read it
489 // from the _cur_alloc_region field again.
490 inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
491 size_t word_size);
492
493 // It attempts to allocate out of the current alloc region. If that
494 // fails, it retires the current alloc region (if there is one),
495 // tries to get a new one and retries the allocation.
496 inline HeapWord* attempt_allocation(size_t word_size);
497
498 // It assumes that the current alloc region has been retired and
499 // tries to allocate a new one. If it's successful, it performs
500 // the allocation out of the new current alloc region and updates
501 // _cur_alloc_region.
502 HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size,
503 bool at_safepoint,
504 bool do_dirtying);
505
506 // The slow path when we are unable to allocate a new current alloc
507 // region to satisfy an allocation request (i.e., when
508 // attempt_allocation() fails). It will try to do an evacuation
509 // pause, which might stall due to the GC locker, and retry the
510 // allocation attempt when appropriate.
511 HeapWord* attempt_allocation_slow(size_t word_size);
512
513 // The method that tries to satisfy a humongous allocation
514 // request. If it cannot satisfy it it will try to do an evacuation
515 // pause to perhaps reclaim enough space to be able to satisfy the
516 // allocation request afterwards.
517 HeapWord* attempt_allocation_humongous(size_t word_size,
518 bool at_safepoint);
519
520 // It does the common work when we are retiring the current alloc region.
521 inline void retire_cur_alloc_region_common(HeapRegion* cur_alloc_region);
522
523 // It retires the current alloc region, which is passed as a
524 // parameter (since, typically, the caller is already holding on to
525 // it). It sets _cur_alloc_region to NULL.
526 void retire_cur_alloc_region(HeapRegion* cur_alloc_region);
527
528 // It attempts to do an allocation immediately before or after an
529 // evacuation pause and can only be called by the VM thread. It has
530 // slightly different assumptions that the ones before (i.e.,
531 // assumes that the current alloc region has been retired).
532 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
533 bool expect_null_cur_alloc_region);
534
535 // It dirties the cards that cover the block so that so that the post
536 // write barrier never queues anything when updating objects on this
537 // block. It is assumed (and in fact we assert) that the block
538 // belongs to a young region.
539 inline void dirty_young_block(HeapWord* start, size_t word_size);
351 540
352 // Allocate blocks during garbage collection. Will ensure an 541 // Allocate blocks during garbage collection. Will ensure an
353 // allocation region, either by picking one or expanding the 542 // allocation region, either by picking one or expanding the
354 // heap, and then allocate a block of the given size. The block 543 // heap, and then allocate a block of the given size. The block
355 // may not be a humongous - it must fit into a single heap region. 544 // may not be a humongous - it must fit into a single heap region.
356 HeapWord* allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
357 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); 545 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
358 546
359 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, 547 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
360 HeapRegion* alloc_region, 548 HeapRegion* alloc_region,
361 bool par, 549 bool par,
368 // Retires an allocation region when it is full or at the end of a 556 // Retires an allocation region when it is full or at the end of a
369 // GC pause. 557 // GC pause.
370 void retire_alloc_region(HeapRegion* alloc_region, bool par); 558 void retire_alloc_region(HeapRegion* alloc_region, bool par);
371 559
372 // - if explicit_gc is true, the GC is for a System.gc() or a heap 560 // - if explicit_gc is true, the GC is for a System.gc() or a heap
373 // inspection request and should collect the entire heap 561 // inspection request and should collect the entire heap
374 // - if clear_all_soft_refs is true, all soft references are cleared 562 // - if clear_all_soft_refs is true, all soft references should be
375 // during the GC 563 // cleared during the GC
376 // - if explicit_gc is false, word_size describes the allocation that 564 // - if explicit_gc is false, word_size describes the allocation that
377 // the GC should attempt (at least) to satisfy 565 // the GC should attempt (at least) to satisfy
378 void do_collection(bool explicit_gc, 566 // - it returns false if it is unable to do the collection due to the
567 // GC locker being active, true otherwise
568 bool do_collection(bool explicit_gc,
379 bool clear_all_soft_refs, 569 bool clear_all_soft_refs,
380 size_t word_size); 570 size_t word_size);
381 571
382 // Callback from VM_G1CollectFull operation. 572 // Callback from VM_G1CollectFull operation.
383 // Perform a full collection. 573 // Perform a full collection.
389 void resize_if_necessary_after_full_collection(size_t word_size); 579 void resize_if_necessary_after_full_collection(size_t word_size);
390 580
391 // Callback from VM_G1CollectForAllocation operation. 581 // Callback from VM_G1CollectForAllocation operation.
392 // This function does everything necessary/possible to satisfy a 582 // This function does everything necessary/possible to satisfy a
393 // failed allocation request (including collection, expansion, etc.) 583 // failed allocation request (including collection, expansion, etc.)
394 HeapWord* satisfy_failed_allocation(size_t word_size); 584 HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
395 585
396 // Attempting to expand the heap sufficiently 586 // Attempting to expand the heap sufficiently
397 // to support an allocation of the given "word_size". If 587 // to support an allocation of the given "word_size". If
398 // successful, perform the allocation and return the address of the 588 // successful, perform the allocation and return the address of the
399 // allocated block, or else "NULL". 589 // allocated block, or else "NULL".
400 virtual HeapWord* expand_and_allocate(size_t word_size); 590 HeapWord* expand_and_allocate(size_t word_size);
401 591
402 public: 592 public:
403 // Expand the garbage-first heap by at least the given size (in bytes!). 593 // Expand the garbage-first heap by at least the given size (in bytes!).
404 // (Rounds up to a HeapRegion boundary.) 594 // (Rounds up to a HeapRegion boundary.)
405 virtual void expand(size_t expand_bytes); 595 virtual void expand(size_t expand_bytes);
476 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); 666 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
477 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const; 667 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
478 void reset_taskqueue_stats(); 668 void reset_taskqueue_stats();
479 #endif // TASKQUEUE_STATS 669 #endif // TASKQUEUE_STATS
480 670
481 // Do an incremental collection: identify a collection set, and evacuate 671 // Schedule the VM operation that will do an evacuation pause to
482 // its live objects elsewhere. 672 // satisfy an allocation request of word_size. *succeeded will
483 virtual void do_collection_pause(); 673 // return whether the VM operation was successful (it did do an
674 // evacuation pause) or not (another thread beat us to it or the GC
675 // locker was active). Given that we should not be holding the
676 // Heap_lock when we enter this method, we will pass the
677 // gc_count_before (i.e., total_collections()) as a parameter since
678 // it has to be read while holding the Heap_lock. Currently, both
679 // methods that call do_collection_pause() release the Heap_lock
680 // before the call, so it's easy to read gc_count_before just before.
681 HeapWord* do_collection_pause(size_t word_size,
682 unsigned int gc_count_before,
683 bool* succeeded);
484 684
485 // The guts of the incremental collection pause, executed by the vm 685 // The guts of the incremental collection pause, executed by the vm
486 // thread. 686 // thread. It returns false if it is unable to do the collection due
487 virtual void do_collection_pause_at_safepoint(double target_pause_time_ms); 687 // to the GC locker being active, true otherwise
688 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
488 689
489 // Actually do the work of evacuating the collection set. 690 // Actually do the work of evacuating the collection set.
490 virtual void evacuate_collection_set(); 691 void evacuate_collection_set();
491
492 // If this is an appropriate right time, do a collection pause.
493 // The "word_size" argument, if non-zero, indicates the size of an
494 // allocation request that is prompting this query.
495 void do_collection_pause_if_appropriate(size_t word_size);
496 692
497 // The g1 remembered set of the heap. 693 // The g1 remembered set of the heap.
498 G1RemSet* _g1_rem_set; 694 G1RemSet* _g1_rem_set;
499 // And it's mod ref barrier set, used to track updates for the above. 695 // And it's mod ref barrier set, used to track updates for the above.
500 ModRefBarrierSet* _mr_bs; 696 ModRefBarrierSet* _mr_bs;
760 #ifndef PRODUCT 956 #ifndef PRODUCT
761 size_t recalculate_used_regions() const; 957 size_t recalculate_used_regions() const;
762 #endif // PRODUCT 958 #endif // PRODUCT
763 959
764 // These virtual functions do the actual allocation. 960 // These virtual functions do the actual allocation.
765 virtual HeapWord* mem_allocate(size_t word_size,
766 bool is_noref,
767 bool is_tlab,
768 bool* gc_overhead_limit_was_exceeded);
769
770 // Some heaps may offer a contiguous region for shared non-blocking 961 // Some heaps may offer a contiguous region for shared non-blocking
771 // allocation, via inlined code (by exporting the address of the top and 962 // allocation, via inlined code (by exporting the address of the top and
772 // end fields defining the extent of the contiguous allocation region.) 963 // end fields defining the extent of the contiguous allocation region.)
773 // But G1CollectedHeap doesn't yet support this. 964 // But G1CollectedHeap doesn't yet support this.
774 965
1044 // See CollectedHeap for semantics. 1235 // See CollectedHeap for semantics.
1045 1236
1046 virtual bool supports_tlab_allocation() const; 1237 virtual bool supports_tlab_allocation() const;
1047 virtual size_t tlab_capacity(Thread* thr) const; 1238 virtual size_t tlab_capacity(Thread* thr) const;
1048 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; 1239 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
1049 virtual HeapWord* allocate_new_tlab(size_t word_size);
1050 1240
1051 // Can a compiler initialize a new object without store barriers? 1241 // Can a compiler initialize a new object without store barriers?
1052 // This permission only extends from the creation of a new object 1242 // This permission only extends from the creation of a new object
1053 // via a TLAB up to the first subsequent safepoint. If such permission 1243 // via a TLAB up to the first subsequent safepoint. If such permission
1054 // is granted for this heap type, the compiler promises to call 1244 // is granted for this heap type, the compiler promises to call
1184 // Convenience function to be used in situations where the heap type can be 1374 // Convenience function to be used in situations where the heap type can be
1185 // asserted to be this type. 1375 // asserted to be this type.
1186 static G1CollectedHeap* heap(); 1376 static G1CollectedHeap* heap();
1187 1377
1188 void empty_young_list(); 1378 void empty_young_list();
1189 bool should_set_young_locked();
1190 1379
1191 void set_region_short_lived_locked(HeapRegion* hr); 1380 void set_region_short_lived_locked(HeapRegion* hr);
1192 // add appropriate methods for any other surv rate groups 1381 // add appropriate methods for any other surv rate groups
1193 1382
1194 YoungList* young_list() { return _young_list; } 1383 YoungList* young_list() { return _young_list; }
1336 1525
1337 // </NEW PREDICTION> 1526 // </NEW PREDICTION>
1338 1527
1339 protected: 1528 protected:
1340 size_t _max_heap_capacity; 1529 size_t _max_heap_capacity;
1341
1342 // debug_only(static void check_for_valid_allocation_state();)
1343 1530
1344 public: 1531 public:
1345 // Temporary: call to mark things unimplemented for the G1 heap (e.g., 1532 // Temporary: call to mark things unimplemented for the G1 heap (e.g.,
1346 // MemoryService). In productization, we can make this assert false 1533 // MemoryService). In productization, we can make this assert false
1347 // to catch such places (as well as searching for calls to this...) 1534 // to catch such places (as well as searching for calls to this...)