comparison src/share/vm/memory/defNewGeneration.cpp @ 113:ba764ed4b6f2

6420645: Create a vm that uses compressed oops for up to 32gb heapsizes Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
author coleenp
date Sun, 13 Apr 2008 17:43:42 -0400
parents a61af66fc99e
children d1605aabd0a1 12eea04c8b06 37f87013dfd8
comparison
equal deleted inserted replaced
110:a49a647afe9a 113:ba764ed4b6f2
45 GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); 45 GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
46 assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind."); 46 assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
47 _rs = (CardTableRS*)rs; 47 _rs = (CardTableRS*)rs;
48 } 48 }
49 49
50 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { 50 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
51 // We never expect to see a null reference being processed 51 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
52 // as a weak reference. 52
53 assert (*p != NULL, "expected non-null ref");
54 assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
55
56 _cl->do_oop_nv(p);
57
58 // Card marking is trickier for weak refs.
59 // This oop is a 'next' field which was filled in while we
60 // were discovering weak references. While we might not need
61 // to take a special action to keep this reference alive, we
62 // will need to dirty a card as the field was modified.
63 //
64 // Alternatively, we could create a method which iterates through
65 // each generation, allowing them in turn to examine the modified
66 // field.
67 //
68 // We could check that p is also in an older generation, but
69 // dirty cards in the youngest gen are never scanned, so the
70 // extra check probably isn't worthwhile.
71 if (Universe::heap()->is_in_reserved(p)) {
72 _rs->inline_write_ref_field_gc(p, *p);
73 }
74 }
75 53
76 DefNewGeneration::FastKeepAliveClosure:: 54 DefNewGeneration::FastKeepAliveClosure::
77 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 55 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
78 DefNewGeneration::KeepAliveClosure(cl) { 56 DefNewGeneration::KeepAliveClosure(cl) {
79 _boundary = g->reserved().end(); 57 _boundary = g->reserved().end();
80 } 58 }
81 59
82 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { 60 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
83 assert (*p != NULL, "expected non-null ref"); 61 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
84 assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
85
86 _cl->do_oop_nv(p);
87
88 // Optimized for Defnew generation if it's the youngest generation:
89 // we set a younger_gen card if we have an older->youngest
90 // generation pointer.
91 if (((HeapWord*)(*p) < _boundary) && Universe::heap()->is_in_reserved(p)) {
92 _rs->inline_write_ref_field_gc(p, *p);
93 }
94 }
95 62
96 DefNewGeneration::EvacuateFollowersClosure:: 63 DefNewGeneration::EvacuateFollowersClosure::
97 EvacuateFollowersClosure(GenCollectedHeap* gch, int level, 64 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
98 ScanClosure* cur, ScanClosure* older) : 65 ScanClosure* cur, ScanClosure* older) :
99 _gch(gch), _level(level), 66 _gch(gch), _level(level),
130 { 97 {
131 assert(_g->level() == 0, "Optimized for youngest generation"); 98 assert(_g->level() == 0, "Optimized for youngest generation");
132 _boundary = _g->reserved().end(); 99 _boundary = _g->reserved().end();
133 } 100 }
134 101
102 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
103 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
104
135 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 105 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
136 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier) 106 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
137 { 107 {
138 assert(_g->level() == 0, "Optimized for youngest generation"); 108 assert(_g->level() == 0, "Optimized for youngest generation");
139 _boundary = _g->reserved().end(); 109 _boundary = _g->reserved().end();
140 } 110 }
141 111
112 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
113 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
114
142 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : 115 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
143 OopClosure(g->ref_processor()), _g(g) 116 OopClosure(g->ref_processor()), _g(g)
144 { 117 {
145 assert(_g->level() == 0, "Optimized for youngest generation"); 118 assert(_g->level() == 0, "Optimized for youngest generation");
146 _boundary = _g->reserved().end(); 119 _boundary = _g->reserved().end();
147 } 120 }
148 121
122 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
123 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
124
125 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
126 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
149 127
150 DefNewGeneration::DefNewGeneration(ReservedSpace rs, 128 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
151 size_t initial_size, 129 size_t initial_size,
152 int level, 130 int level,
153 const char* policy) 131 const char* policy)
654 drain_promo_failure_scan_stack(); 632 drain_promo_failure_scan_stack();
655 _promo_failure_drain_in_progress = false; 633 _promo_failure_drain_in_progress = false;
656 } 634 }
657 } 635 }
658 636
659 oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) { 637 oop DefNewGeneration::copy_to_survivor_space(oop old) {
660 assert(is_in_reserved(old) && !old->is_forwarded(), 638 assert(is_in_reserved(old) && !old->is_forwarded(),
661 "shouldn't be scavenging this oop"); 639 "shouldn't be scavenging this oop");
662 size_t s = old->size(); 640 size_t s = old->size();
663 oop obj = NULL; 641 oop obj = NULL;
664 642
667 obj = (oop) to()->allocate(s); 645 obj = (oop) to()->allocate(s);
668 } 646 }
669 647
670 // Otherwise try allocating obj tenured 648 // Otherwise try allocating obj tenured
671 if (obj == NULL) { 649 if (obj == NULL) {
672 obj = _next_gen->promote(old, s, from); 650 obj = _next_gen->promote(old, s);
673 if (obj == NULL) { 651 if (obj == NULL) {
674 if (!HandlePromotionFailure) { 652 if (!HandlePromotionFailure) {
675 // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag 653 // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
676 // is incorrectly set. In any case, its seriously wrong to be here! 654 // is incorrectly set. In any case, its seriously wrong to be here!
677 vm_exit_out_of_memory(s*wordSize, "promotion"); 655 vm_exit_out_of_memory(s*wordSize, "promotion");
860 838
861 839
862 const char* DefNewGeneration::name() const { 840 const char* DefNewGeneration::name() const {
863 return "def new generation"; 841 return "def new generation";
864 } 842 }
843
844 // Moved from inline file as they are not called inline
845 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
846 return eden();
847 }
848
849 HeapWord* DefNewGeneration::allocate(size_t word_size,
850 bool is_tlab) {
851 // This is the slow-path allocation for the DefNewGeneration.
852 // Most allocations are fast-path in compiled code.
853 // We try to allocate from the eden. If that works, we are happy.
854 // Note that since DefNewGeneration supports lock-free allocation, we
855 // have to use it here, as well.
856 HeapWord* result = eden()->par_allocate(word_size);
857 if (result != NULL) {
858 return result;
859 }
860 do {
861 HeapWord* old_limit = eden()->soft_end();
862 if (old_limit < eden()->end()) {
863 // Tell the next generation we reached a limit.
864 HeapWord* new_limit =
865 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
866 if (new_limit != NULL) {
867 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
868 } else {
869 assert(eden()->soft_end() == eden()->end(),
870 "invalid state after allocation_limit_reached returned null");
871 }
872 } else {
873 // The allocation failed and the soft limit is equal to the hard limit,
874 // there are no reasons to do an attempt to allocate
875 assert(old_limit == eden()->end(), "sanity check");
876 break;
877 }
878 // Try to allocate until succeeded or the soft limit can't be adjusted
879 result = eden()->par_allocate(word_size);
880 } while (result == NULL);
881
882 // If the eden is full and the last collection bailed out, we are running
883 // out of heap space, and we try to allocate the from-space, too.
884 // allocate_from_space can't be inlined because that would introduce a
885 // circular dependency at compile time.
886 if (result == NULL) {
887 result = allocate_from_space(word_size);
888 }
889 return result;
890 }
891
892 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
893 bool is_tlab) {
894 return eden()->par_allocate(word_size);
895 }
896
897 void DefNewGeneration::gc_prologue(bool full) {
898 // Ensure that _end and _soft_end are the same in eden space.
899 eden()->set_soft_end(eden()->end());
900 }
901
902 size_t DefNewGeneration::tlab_capacity() const {
903 return eden()->capacity();
904 }
905
906 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
907 return unsafe_max_alloc_nogc();
908 }