comparison src/share/vm/memory/space.hpp @ 113:ba764ed4b6f2

6420645: Create a vm that uses compressed oops for up to 32gb heapsizes Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
author coleenp
date Sun, 13 Apr 2008 17:43:42 -0400
parents a61af66fc99e
children d1605aabd0a1 12eea04c8b06 37f87013dfd8
comparison
equal deleted inserted replaced
110:a49a647afe9a 113:ba764ed4b6f2
50 class BlockOffsetTable; 50 class BlockOffsetTable;
51 class GenRemSet; 51 class GenRemSet;
52 class CardTableRS; 52 class CardTableRS;
53 class DirtyCardToOopClosure; 53 class DirtyCardToOopClosure;
54 54
55
56 // An oop closure that is circumscribed by a filtering memory region. 55 // An oop closure that is circumscribed by a filtering memory region.
57 class SpaceMemRegionOopsIterClosure: public virtual OopClosure { 56 class SpaceMemRegionOopsIterClosure: public OopClosure {
58 OopClosure* cl; 57 private:
59 MemRegion mr; 58 OopClosure* _cl;
60 public: 59 MemRegion _mr;
61 void do_oop(oop* p) { 60 protected:
62 if (mr.contains(p)) { 61 template <class T> void do_oop_work(T* p) {
63 cl->do_oop(p); 62 if (_mr.contains(p)) {
63 _cl->do_oop(p);
64 } 64 }
65 } 65 }
66 SpaceMemRegionOopsIterClosure(OopClosure* _cl, MemRegion _mr): cl(_cl), mr(_mr) {} 66 public:
67 }; 67 SpaceMemRegionOopsIterClosure(OopClosure* cl, MemRegion mr):
68 68 _cl(cl), _mr(mr) {}
69 virtual void do_oop(oop* p);
70 virtual void do_oop(narrowOop* p);
71 };
69 72
70 // A Space describes a heap area. Class Space is an abstract 73 // A Space describes a heap area. Class Space is an abstract
71 // base class. 74 // base class.
72 // 75 //
73 // Space supports allocation, size computation and GC support is provided. 76 // Space supports allocation, size computation and GC support is provided.
277 OopClosure* _cl; 280 OopClosure* _cl;
278 Space* _sp; 281 Space* _sp;
279 CardTableModRefBS::PrecisionStyle _precision; 282 CardTableModRefBS::PrecisionStyle _precision;
280 HeapWord* _boundary; // If non-NULL, process only non-NULL oops 283 HeapWord* _boundary; // If non-NULL, process only non-NULL oops
281 // pointing below boundary. 284 // pointing below boundary.
282 HeapWord* _min_done; // ObjHeadPreciseArray precision requires 285 HeapWord* _min_done; // ObjHeadPreciseArray precision requires
283 // a downwards traversal; this is the 286 // a downwards traversal; this is the
284 // lowest location already done (or, 287 // lowest location already done (or,
285 // alternatively, the lowest address that 288 // alternatively, the lowest address that
286 // shouldn't be done again. NULL means infinity.) 289 // shouldn't be done again. NULL means infinity.)
287 NOT_PRODUCT(HeapWord* _last_bottom;) 290 NOT_PRODUCT(HeapWord* _last_bottom;)
506 "these are the only valid states during a mark sweep"); \ 509 "these are the only valid states during a mark sweep"); \
507 if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ 510 if (block_is_obj(q) && oop(q)->is_gc_marked()) { \
508 /* prefetch beyond q */ \ 511 /* prefetch beyond q */ \
509 Prefetch::write(q, interval); \ 512 Prefetch::write(q, interval); \
510 /* size_t size = oop(q)->size(); changing this for cms for perm gen */\ 513 /* size_t size = oop(q)->size(); changing this for cms for perm gen */\
511 size_t size = block_size(q); \ 514 size_t size = block_size(q); \
512 compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ 515 compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
513 q += size; \ 516 q += size; \
514 end_of_live = q; \ 517 end_of_live = q; \
515 } else { \ 518 } else { \
516 /* run over all the contiguous dead objects */ \ 519 /* run over all the contiguous dead objects */ \
570 \ 573 \
571 /* save the compaction_top of the compaction space. */ \ 574 /* save the compaction_top of the compaction space. */ \
572 cp->space->set_compaction_top(compact_top); \ 575 cp->space->set_compaction_top(compact_top); \
573 } 576 }
574 577
575 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ 578 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \
576 /* adjust all the interior pointers to point at the new locations of objects \ 579 /* adjust all the interior pointers to point at the new locations of objects \
577 * Used by MarkSweep::mark_sweep_phase3() */ \ 580 * Used by MarkSweep::mark_sweep_phase3() */ \
578 \ 581 \
579 HeapWord* q = bottom(); \ 582 HeapWord* q = bottom(); \
580 HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ 583 HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \
581 \ 584 \
582 assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ 585 assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \
583 \ 586 \
584 if (q < t && _first_dead > q && \ 587 if (q < t && _first_dead > q && \
585 !oop(q)->is_gc_marked()) { \ 588 !oop(q)->is_gc_marked()) { \
586 /* we have a chunk of the space which hasn't moved and we've \ 589 /* we have a chunk of the space which hasn't moved and we've \
587 * reinitialized the mark word during the previous pass, so we can't \ 590 * reinitialized the mark word during the previous pass, so we can't \
588 * use is_gc_marked for the traversal. */ \ 591 * use is_gc_marked for the traversal. */ \
589 HeapWord* end = _first_dead; \ 592 HeapWord* end = _first_dead; \
590 \ 593 \
591 while (q < end) { \ 594 while (q < end) { \
592 /* I originally tried to conjoin "block_start(q) == q" to the \ 595 /* I originally tried to conjoin "block_start(q) == q" to the \
593 * assertion below, but that doesn't work, because you can't \ 596 * assertion below, but that doesn't work, because you can't \
594 * accurately traverse previous objects to get to the current one \ 597 * accurately traverse previous objects to get to the current one \
595 * after their pointers (including pointers into permGen) have been \ 598 * after their pointers (including pointers into permGen) have been \
596 * updated, until the actual compaction is done. dld, 4/00 */ \ 599 * updated, until the actual compaction is done. dld, 4/00 */ \
597 assert(block_is_obj(q), \ 600 assert(block_is_obj(q), \
598 "should be at block boundaries, and should be looking at objs"); \ 601 "should be at block boundaries, and should be looking at objs"); \
599 \ 602 \
600 debug_only(MarkSweep::track_interior_pointers(oop(q))); \ 603 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
601 \ 604 \
602 /* point all the oops to the new location */ \ 605 /* point all the oops to the new location */ \
603 size_t size = oop(q)->adjust_pointers(); \ 606 size_t size = oop(q)->adjust_pointers(); \
604 size = adjust_obj_size(size); \ 607 size = adjust_obj_size(size); \
605 \ 608 \
606 debug_only(MarkSweep::check_interior_pointers()); \ 609 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
607 \ 610 \
608 debug_only(MarkSweep::validate_live_oop(oop(q), size)); \ 611 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
609 \ 612 \
610 q += size; \ 613 q += size; \
611 } \ 614 } \
612 \ 615 \
613 if (_first_dead == t) { \ 616 if (_first_dead == t) { \
614 q = t; \ 617 q = t; \
615 } else { \ 618 } else { \
616 /* $$$ This is funky. Using this to read the previously written \ 619 /* $$$ This is funky. Using this to read the previously written \
617 * LiveRange. See also use below. */ \ 620 * LiveRange. See also use below. */ \
618 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ 621 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
619 } \ 622 } \
620 } \ 623 } \
621 \ 624 \
622 const intx interval = PrefetchScanIntervalInBytes; \ 625 const intx interval = PrefetchScanIntervalInBytes; \
623 \ 626 \
624 debug_only(HeapWord* prev_q = NULL); \ 627 debug_only(HeapWord* prev_q = NULL); \
625 while (q < t) { \ 628 while (q < t) { \
626 /* prefetch beyond q */ \ 629 /* prefetch beyond q */ \
627 Prefetch::write(q, interval); \ 630 Prefetch::write(q, interval); \
628 if (oop(q)->is_gc_marked()) { \ 631 if (oop(q)->is_gc_marked()) { \
629 /* q is alive */ \ 632 /* q is alive */ \
630 debug_only(MarkSweep::track_interior_pointers(oop(q))); \ 633 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
631 /* point all the oops to the new location */ \ 634 /* point all the oops to the new location */ \
632 size_t size = oop(q)->adjust_pointers(); \ 635 size_t size = oop(q)->adjust_pointers(); \
633 size = adjust_obj_size(size); \ 636 size = adjust_obj_size(size); \
634 debug_only(MarkSweep::check_interior_pointers()); \ 637 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
635 debug_only(MarkSweep::validate_live_oop(oop(q), size)); \ 638 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
636 debug_only(prev_q = q); \ 639 debug_only(prev_q = q); \
637 q += size; \ 640 q += size; \
638 } else { \ 641 } else { \
639 /* q is not a live object, so its mark should point at the next \ 642 /* q is not a live object, so its mark should point at the next \
640 * live object */ \ 643 * live object */ \
641 debug_only(prev_q = q); \ 644 debug_only(prev_q = q); \
642 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ 645 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
643 assert(q > prev_q, "we should be moving forward through memory"); \ 646 assert(q > prev_q, "we should be moving forward through memory"); \
644 } \ 647 } \
645 } \ 648 } \
646 \ 649 \
647 assert(q == t, "just checking"); \ 650 assert(q == t, "just checking"); \
648 } 651 }
649 652
650 #define SCAN_AND_COMPACT(obj_size) { \ 653 #define SCAN_AND_COMPACT(obj_size) { \
651 /* Copy all live objects to their new location \ 654 /* Copy all live objects to their new location \
652 * Used by MarkSweep::mark_sweep_phase4() */ \ 655 * Used by MarkSweep::mark_sweep_phase4() */ \
653 \ 656 \
654 HeapWord* q = bottom(); \ 657 HeapWord* q = bottom(); \
655 HeapWord* const t = _end_of_live; \ 658 HeapWord* const t = _end_of_live; \
656 debug_only(HeapWord* prev_q = NULL); \ 659 debug_only(HeapWord* prev_q = NULL); \
657 \ 660 \
658 if (q < t && _first_dead > q && \ 661 if (q < t && _first_dead > q && \
659 !oop(q)->is_gc_marked()) { \ 662 !oop(q)->is_gc_marked()) { \
660 debug_only( \ 663 debug_only( \
661 /* we have a chunk of the space which hasn't moved and we've reinitialized the \ 664 /* we have a chunk of the space which hasn't moved and we've reinitialized \
662 * mark word during the previous pass, so we can't use is_gc_marked for the \ 665 * the mark word during the previous pass, so we can't use is_gc_marked for \
663 * traversal. */ \ 666 * the traversal. */ \
664 HeapWord* const end = _first_dead; \ 667 HeapWord* const end = _first_dead; \
665 \ 668 \
666 while (q < end) { \ 669 while (q < end) { \
667 size_t size = obj_size(q); \ 670 size_t size = obj_size(q); \
668 assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); \ 671 assert(!oop(q)->is_gc_marked(), \
669 debug_only(MarkSweep::live_oop_moved_to(q, size, q)); \ 672 "should be unmarked (special dense prefix handling)"); \
670 debug_only(prev_q = q); \ 673 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \
674 debug_only(prev_q = q); \
671 q += size; \ 675 q += size; \
672 } \ 676 } \
673 ) /* debug_only */ \ 677 ) /* debug_only */ \
674 \ 678 \
675 if (_first_dead == t) { \ 679 if (_first_dead == t) { \
676 q = t; \ 680 q = t; \
677 } else { \ 681 } else { \
678 /* $$$ Funky */ \ 682 /* $$$ Funky */ \
679 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ 683 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
680 } \ 684 } \
681 } \ 685 } \
682 \ 686 \
683 const intx scan_interval = PrefetchScanIntervalInBytes; \ 687 const intx scan_interval = PrefetchScanIntervalInBytes; \
684 const intx copy_interval = PrefetchCopyIntervalInBytes; \ 688 const intx copy_interval = PrefetchCopyIntervalInBytes; \
685 while (q < t) { \ 689 while (q < t) { \
686 if (!oop(q)->is_gc_marked()) { \ 690 if (!oop(q)->is_gc_marked()) { \
687 /* mark is pointer to next marked oop */ \ 691 /* mark is pointer to next marked oop */ \
688 debug_only(prev_q = q); \ 692 debug_only(prev_q = q); \
689 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ 693 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
690 assert(q > prev_q, "we should be moving forward through memory"); \ 694 assert(q > prev_q, "we should be moving forward through memory"); \
691 } else { \ 695 } else { \
692 /* prefetch beyond q */ \ 696 /* prefetch beyond q */ \
693 Prefetch::read(q, scan_interval); \ 697 Prefetch::read(q, scan_interval); \
694 \ 698 \
695 /* size and destination */ \ 699 /* size and destination */ \
696 size_t size = obj_size(q); \ 700 size_t size = obj_size(q); \
697 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ 701 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
698 \ 702 \
699 /* prefetch beyond compaction_top */ \ 703 /* prefetch beyond compaction_top */ \
700 Prefetch::write(compaction_top, copy_interval); \ 704 Prefetch::write(compaction_top, copy_interval); \
701 \ 705 \
702 /* copy object and reinit its mark */ \ 706 /* copy object and reinit its mark */ \
703 debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top)); \ 707 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \
704 assert(q != compaction_top, "everything in this pass should be moving"); \ 708 compaction_top)); \
705 Copy::aligned_conjoint_words(q, compaction_top, size); \ 709 assert(q != compaction_top, "everything in this pass should be moving"); \
706 oop(compaction_top)->init_mark(); \ 710 Copy::aligned_conjoint_words(q, compaction_top, size); \
707 assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ 711 oop(compaction_top)->init_mark(); \
708 \ 712 assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
709 debug_only(prev_q = q); \ 713 \
714 debug_only(prev_q = q); \
710 q += size; \ 715 q += size; \
711 } \ 716 } \
712 } \ 717 } \
713 \ 718 \
714 /* Reset space after compaction is complete */ \ 719 /* Reset space after compaction is complete */ \
715 reset_after_compaction(); \ 720 reset_after_compaction(); \
716 /* We do this clear, below, since it has overloaded meanings for some */ \ 721 /* We do this clear, below, since it has overloaded meanings for some */ \
717 /* space subtypes. For example, OffsetTableContigSpace's that were */ \ 722 /* space subtypes. For example, OffsetTableContigSpace's that were */ \
718 /* compacted into will have had their offset table thresholds updated */ \ 723 /* compacted into will have had their offset table thresholds updated */ \
719 /* continuously, but those that weren't need to have their thresholds */ \ 724 /* continuously, but those that weren't need to have their thresholds */ \
720 /* re-initialized. Also mangles unused area for debugging. */ \ 725 /* re-initialized. Also mangles unused area for debugging. */ \