Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/parNew/parNewGeneration.cpp @ 3710:4e037604f6ee
use alignment for constants specified in DataPatch.alignment
author | Christian Wimmer <christian.wimmer@oracle.com> |
---|---|
date | Mon, 05 Dec 2011 18:15:25 -0800 |
parents | 92da084fefc9 |
children | bca17e38de00 |
rev | line source |
---|---|
0 | 1 /* |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2038
diff
changeset
|
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" | |
27 #include "gc_implementation/parNew/parGCAllocBuffer.hpp" | |
28 #include "gc_implementation/parNew/parNewGeneration.hpp" | |
29 #include "gc_implementation/parNew/parOopClosures.inline.hpp" | |
30 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" | |
31 #include "gc_implementation/shared/ageTable.hpp" | |
32 #include "gc_implementation/shared/spaceDecorator.hpp" | |
33 #include "memory/defNewGeneration.inline.hpp" | |
34 #include "memory/genCollectedHeap.hpp" | |
35 #include "memory/genOopClosures.inline.hpp" | |
36 #include "memory/generation.hpp" | |
37 #include "memory/generation.inline.hpp" | |
38 #include "memory/referencePolicy.hpp" | |
39 #include "memory/resourceArea.hpp" | |
40 #include "memory/sharedHeap.hpp" | |
41 #include "memory/space.hpp" | |
42 #include "oops/objArrayOop.hpp" | |
43 #include "oops/oop.inline.hpp" | |
44 #include "oops/oop.pcgc.inline.hpp" | |
45 #include "runtime/handles.hpp" | |
46 #include "runtime/handles.inline.hpp" | |
47 #include "runtime/java.hpp" | |
48 #include "runtime/thread.hpp" | |
49 #include "utilities/copy.hpp" | |
50 #include "utilities/globalDefinitions.hpp" | |
51 #include "utilities/workgroup.hpp" | |
0 | 52 |
53 #ifdef _MSC_VER | |
54 #pragma warning( push ) | |
55 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
56 #endif | |
57 ParScanThreadState::ParScanThreadState(Space* to_space_, | |
58 ParNewGeneration* gen_, | |
59 Generation* old_gen_, | |
60 int thread_num_, | |
61 ObjToScanQueueSet* work_queue_set_, | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
62 Stack<oop>* overflow_stacks_, |
0 | 63 size_t desired_plab_sz_, |
64 ParallelTaskTerminator& term_) : | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
65 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), |
0 | 66 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
67 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), |
0 | 68 _ageTable(false), // false ==> not the global age table, no perf data. |
69 _to_space_alloc_buffer(desired_plab_sz_), | |
70 _to_space_closure(gen_, this), _old_gen_closure(gen_, this), | |
71 _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this), | |
72 _older_gen_closure(gen_, this), | |
73 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, | |
74 &_to_space_root_closure, gen_, &_old_gen_root_closure, | |
75 work_queue_set_, &term_), | |
76 _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), | |
77 _keep_alive_closure(&_scan_weak_ref_closure), | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
78 _promotion_failure_size(0), |
0 | 79 _strong_roots_time(0.0), _term_time(0.0) |
80 { | |
1710 | 81 #if TASKQUEUE_STATS |
82 _term_attempts = 0; | |
83 _overflow_refills = 0; | |
84 _overflow_refill_objs = 0; | |
85 #endif // TASKQUEUE_STATS | |
86 | |
0 | 87 _survivor_chunk_array = |
88 (ChunkArray*) old_gen()->get_data_recorder(thread_num()); | |
89 _hash_seed = 17; // Might want to take time-based random value. | |
90 _start = os::elapsedTime(); | |
91 _old_gen_closure.set_generation(old_gen_); | |
92 _old_gen_root_closure.set_generation(old_gen_); | |
93 } | |
94 #ifdef _MSC_VER | |
95 #pragma warning( pop ) | |
96 #endif | |
97 | |
98 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, | |
99 size_t plab_word_size) { | |
100 ChunkArray* sca = survivor_chunk_array(); | |
101 if (sca != NULL) { | |
102 // A non-null SCA implies that we want the PLAB data recorded. | |
103 sca->record_sample(plab_start, plab_word_size); | |
104 } | |
105 } | |
106 | |
107 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { | |
108 return new_obj->is_objArray() && | |
109 arrayOop(new_obj)->length() > ParGCArrayScanChunk && | |
110 new_obj != old_obj; | |
111 } | |
112 | |
113 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { | |
114 assert(old->is_objArray(), "must be obj array"); | |
115 assert(old->is_forwarded(), "must be forwarded"); | |
116 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
117 assert(!old_gen()->is_in(old), "must be in young generation."); |
0 | 118 |
119 objArrayOop obj = objArrayOop(old->forwardee()); | |
120 // Process ParGCArrayScanChunk elements now | |
121 // and push the remainder back onto queue | |
122 int start = arrayOop(old)->length(); | |
123 int end = obj->length(); | |
124 int remainder = end - start; | |
125 assert(start <= end, "just checking"); | |
126 if (remainder > 2 * ParGCArrayScanChunk) { | |
127 // Test above combines last partial chunk with a full chunk | |
128 end = start + ParGCArrayScanChunk; | |
129 arrayOop(old)->set_length(end); | |
130 // Push remainder. | |
131 bool ok = work_queue()->push(old); | |
132 assert(ok, "just popped, push must be okay"); | |
133 } else { | |
134 // Restore length so that it can be used if there | |
135 // is a promotion failure and forwarding pointers | |
136 // must be removed. | |
137 arrayOop(old)->set_length(end); | |
138 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
139 |
0 | 140 // process our set of indices (include header in first chunk) |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
141 // should make sure end is even (aligned to HeapWord in case of compressed oops) |
0 | 142 if ((HeapWord *)obj < young_old_boundary()) { |
143 // object is in to_space | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
144 obj->oop_iterate_range(&_to_space_closure, start, end); |
0 | 145 } else { |
146 // object is in old generation | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
147 obj->oop_iterate_range(&_old_gen_closure, start, end); |
0 | 148 } |
149 } | |
150 | |
151 | |
152 void ParScanThreadState::trim_queues(int max_size) { | |
153 ObjToScanQueue* queue = work_queue(); | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
154 do { |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
155 while (queue->size() > (juint)max_size) { |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
156 oop obj_to_scan; |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
157 if (queue->pop_local(obj_to_scan)) { |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
158 if ((HeapWord *)obj_to_scan < young_old_boundary()) { |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
159 if (obj_to_scan->is_objArray() && |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
160 obj_to_scan->is_forwarded() && |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
161 obj_to_scan->forwardee() != obj_to_scan) { |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
162 scan_partial_array_and_push_remainder(obj_to_scan); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
163 } else { |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
164 // object is in to_space |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
165 obj_to_scan->oop_iterate(&_to_space_closure); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
166 } |
0 | 167 } else { |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
168 // object is in old generation |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
169 obj_to_scan->oop_iterate(&_old_gen_closure); |
0 | 170 } |
171 } | |
172 } | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
173 // For the case of compressed oops, we have a private, non-shared |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
174 // overflow stack, so we eagerly drain it so as to more evenly |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
175 // distribute load early. Note: this may be good to do in |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
176 // general rather than delay for the final stealing phase. |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
177 // If applicable, we'll transfer a set of objects over to our |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
178 // work queue, allowing them to be stolen and draining our |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
179 // private overflow stack. |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
180 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
181 } |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
182 |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
183 bool ParScanThreadState::take_from_overflow_stack() { |
695 | 184 assert(ParGCUseLocalOverflow, "Else should not call"); |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
185 assert(young_gen()->overflow_list() == NULL, "Error"); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
186 ObjToScanQueue* queue = work_queue(); |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
187 Stack<oop>* const of_stack = overflow_stack(); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
188 const size_t num_overflow_elems = of_stack->size(); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
189 const size_t space_available = queue->max_elems() - queue->size(); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
190 const size_t num_take_elems = MIN3(space_available / 4, |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
191 ParGCDesiredObjsFromOverflowList, |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
192 num_overflow_elems); |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
193 // Transfer the most recent num_take_elems from the overflow |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
194 // stack to our work queue. |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
195 for (size_t i = 0; i != num_take_elems; i++) { |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
196 oop cur = of_stack->pop(); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
197 oop obj_to_push = cur->forwardee(); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
198 assert(Universe::heap()->is_in_reserved(cur), "Should be in heap"); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
199 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
200 assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap"); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
201 if (should_be_partially_scanned(obj_to_push, cur)) { |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
202 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
203 obj_to_push = cur; |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
204 } |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
205 bool ok = queue->push(obj_to_push); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
206 assert(ok, "Should have succeeded"); |
0 | 207 } |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
208 assert(young_gen()->overflow_list() == NULL, "Error"); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
209 return num_take_elems > 0; // was something transferred? |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
210 } |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
211 |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
212 void ParScanThreadState::push_on_overflow_stack(oop p) { |
695 | 213 assert(ParGCUseLocalOverflow, "Else should not call"); |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
214 overflow_stack()->push(p); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
215 assert(young_gen()->overflow_list() == NULL, "Error"); |
0 | 216 } |
217 | |
218 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { | |
219 | |
220 // Otherwise, if the object is small enough, try to reallocate the | |
221 // buffer. | |
222 HeapWord* obj = NULL; | |
223 if (!_to_space_full) { | |
224 ParGCAllocBuffer* const plab = to_space_alloc_buffer(); | |
225 Space* const sp = to_space(); | |
226 if (word_sz * 100 < | |
227 ParallelGCBufferWastePct * plab->word_sz()) { | |
228 // Is small enough; abandon this buffer and start a new one. | |
229 plab->retire(false, false); | |
230 size_t buf_size = plab->word_sz(); | |
231 HeapWord* buf_space = sp->par_allocate(buf_size); | |
232 if (buf_space == NULL) { | |
233 const size_t min_bytes = | |
234 ParGCAllocBuffer::min_size() << LogHeapWordSize; | |
235 size_t free_bytes = sp->free(); | |
236 while(buf_space == NULL && free_bytes >= min_bytes) { | |
237 buf_size = free_bytes >> LogHeapWordSize; | |
238 assert(buf_size == (size_t)align_object_size(buf_size), | |
239 "Invariant"); | |
240 buf_space = sp->par_allocate(buf_size); | |
241 free_bytes = sp->free(); | |
242 } | |
243 } | |
244 if (buf_space != NULL) { | |
245 plab->set_word_size(buf_size); | |
246 plab->set_buf(buf_space); | |
247 record_survivor_plab(buf_space, buf_size); | |
248 obj = plab->allocate(word_sz); | |
249 // Note that we cannot compare buf_size < word_sz below | |
250 // because of AlignmentReserve (see ParGCAllocBuffer::allocate()). | |
251 assert(obj != NULL || plab->words_remaining() < word_sz, | |
252 "Else should have been able to allocate"); | |
253 // It's conceivable that we may be able to use the | |
254 // buffer we just grabbed for subsequent small requests | |
255 // even if not for this one. | |
256 } else { | |
257 // We're used up. | |
258 _to_space_full = true; | |
259 } | |
260 | |
261 } else { | |
262 // Too large; allocate the object individually. | |
263 obj = sp->par_allocate(word_sz); | |
264 } | |
265 } | |
266 return obj; | |
267 } | |
268 | |
269 | |
270 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, | |
271 size_t word_sz) { | |
272 // Is the alloc in the current alloc buffer? | |
273 if (to_space_alloc_buffer()->contains(obj)) { | |
274 assert(to_space_alloc_buffer()->contains(obj + word_sz - 1), | |
275 "Should contain whole object."); | |
276 to_space_alloc_buffer()->undo_allocation(obj, word_sz); | |
277 } else { | |
481
7d7a7c599c17
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
457
diff
changeset
|
278 CollectedHeap::fill_with_object(obj, word_sz); |
0 | 279 } |
280 } | |
281 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
282 void ParScanThreadState::print_and_clear_promotion_failure_size() { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
283 if (_promotion_failure_size != 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
284 if (PrintPromotionFailure) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
285 gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
286 _thread_num, _promotion_failure_size); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
287 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
288 _promotion_failure_size = 0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
289 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
290 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
291 |
0 | 292 class ParScanThreadStateSet: private ResourceArray { |
293 public: | |
294 // Initializes states for the specified number of threads; | |
295 ParScanThreadStateSet(int num_threads, | |
296 Space& to_space, | |
297 ParNewGeneration& gen, | |
298 Generation& old_gen, | |
299 ObjToScanQueueSet& queue_set, | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
300 Stack<oop>* overflow_stacks_, |
0 | 301 size_t desired_plab_sz, |
302 ParallelTaskTerminator& term); | |
1710 | 303 |
304 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } | |
305 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
306 inline ParScanThreadState& thread_state(int i); |
1710 | 307 |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
308 void reset(bool promotion_failed); |
0 | 309 void flush(); |
1710 | 310 |
311 #if TASKQUEUE_STATS | |
312 static void | |
313 print_termination_stats_hdr(outputStream* const st = gclog_or_tty); | |
314 void print_termination_stats(outputStream* const st = gclog_or_tty); | |
315 static void | |
316 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); | |
317 void print_taskqueue_stats(outputStream* const st = gclog_or_tty); | |
318 void reset_stats(); | |
319 #endif // TASKQUEUE_STATS | |
320 | |
0 | 321 private: |
322 ParallelTaskTerminator& _term; | |
323 ParNewGeneration& _gen; | |
324 Generation& _next_gen; | |
325 }; | |
326 | |
327 | |
328 ParScanThreadStateSet::ParScanThreadStateSet( | |
329 int num_threads, Space& to_space, ParNewGeneration& gen, | |
330 Generation& old_gen, ObjToScanQueueSet& queue_set, | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
331 Stack<oop>* overflow_stacks, |
0 | 332 size_t desired_plab_sz, ParallelTaskTerminator& term) |
333 : ResourceArray(sizeof(ParScanThreadState), num_threads), | |
1710 | 334 _gen(gen), _next_gen(old_gen), _term(term) |
0 | 335 { |
336 assert(num_threads > 0, "sanity check!"); | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
337 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
338 "overflow_stack allocation mismatch"); |
0 | 339 // Initialize states. |
340 for (int i = 0; i < num_threads; ++i) { | |
341 new ((ParScanThreadState*)_data + i) | |
342 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
343 overflow_stacks, desired_plab_sz, term); |
0 | 344 } |
345 } | |
346 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
347 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) |
0 | 348 { |
349 assert(i >= 0 && i < length(), "sanity check!"); | |
350 return ((ParScanThreadState*)_data)[i]; | |
351 } | |
352 | |
353 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
354 void ParScanThreadStateSet::reset(bool promotion_failed) |
0 | 355 { |
356 _term.reset_for_reuse(); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
357 if (promotion_failed) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
358 for (int i = 0; i < length(); ++i) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
359 thread_state(i).print_and_clear_promotion_failure_size(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
360 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
361 } |
0 | 362 } |
363 | |
1710 | 364 #if TASKQUEUE_STATS |
365 void | |
366 ParScanThreadState::reset_stats() | |
367 { | |
368 taskqueue_stats().reset(); | |
369 _term_attempts = 0; | |
370 _overflow_refills = 0; | |
371 _overflow_refill_objs = 0; | |
372 } | |
373 | |
374 void ParScanThreadStateSet::reset_stats() | |
375 { | |
376 for (int i = 0; i < length(); ++i) { | |
377 thread_state(i).reset_stats(); | |
378 } | |
379 } | |
380 | |
381 void | |
382 ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) | |
383 { | |
384 st->print_raw_cr("GC Termination Stats"); | |
385 st->print_raw_cr(" elapsed --strong roots-- " | |
386 "-------termination-------"); | |
387 st->print_raw_cr("thr ms ms % " | |
388 " ms % attempts"); | |
389 st->print_raw_cr("--- --------- --------- ------ " | |
390 "--------- ------ --------"); | |
391 } | |
392 | |
393 void ParScanThreadStateSet::print_termination_stats(outputStream* const st) | |
394 { | |
395 print_termination_stats_hdr(st); | |
396 | |
397 for (int i = 0; i < length(); ++i) { | |
398 const ParScanThreadState & pss = thread_state(i); | |
399 const double elapsed_ms = pss.elapsed_time() * 1000.0; | |
400 const double s_roots_ms = pss.strong_roots_time() * 1000.0; | |
401 const double term_ms = pss.term_time() * 1000.0; | |
402 st->print_cr("%3d %9.2f %9.2f %6.2f " | |
403 "%9.2f %6.2f " SIZE_FORMAT_W(8), | |
404 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, | |
405 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); | |
406 } | |
407 } | |
408 | |
409 // Print stats related to work queue activity. | |
410 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) | |
411 { | |
412 st->print_raw_cr("GC Task Stats"); | |
413 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); | |
414 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); | |
415 } | |
416 | |
417 void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) | |
418 { | |
419 print_taskqueue_stats_hdr(st); | |
420 | |
421 TaskQueueStats totals; | |
422 for (int i = 0; i < length(); ++i) { | |
423 const ParScanThreadState & pss = thread_state(i); | |
424 const TaskQueueStats & stats = pss.taskqueue_stats(); | |
425 st->print("%3d ", i); stats.print(st); st->cr(); | |
426 totals += stats; | |
427 | |
428 if (pss.overflow_refills() > 0) { | |
429 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills " | |
430 SIZE_FORMAT_W(10) " overflow objects", | |
431 pss.overflow_refills(), pss.overflow_refill_objs()); | |
432 } | |
433 } | |
434 st->print("tot "); totals.print(st); st->cr(); | |
435 | |
436 DEBUG_ONLY(totals.verify()); | |
437 } | |
438 #endif // TASKQUEUE_STATS | |
439 | |
0 | 440 void ParScanThreadStateSet::flush() |
441 { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
442 // Work in this loop should be kept as lightweight as |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
443 // possible since this might otherwise become a bottleneck |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
444 // to scaling. Should we add heavy-weight work into this |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
445 // loop, consider parallelizing the loop into the worker threads. |
0 | 446 for (int i = 0; i < length(); ++i) { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
447 ParScanThreadState& par_scan_state = thread_state(i); |
0 | 448 |
449 // Flush stats related to To-space PLAB activity and | |
450 // retire the last buffer. | |
451 par_scan_state.to_space_alloc_buffer()-> | |
452 flush_stats_and_retire(_gen.plab_stats(), | |
453 false /* !retain */); | |
454 | |
455 // Every thread has its own age table. We need to merge | |
456 // them all into one. | |
457 ageTable *local_table = par_scan_state.age_table(); | |
458 _gen.age_table()->merge(local_table); | |
459 | |
460 // Inform old gen that we're done. | |
461 _next_gen.par_promote_alloc_done(i); | |
462 _next_gen.par_oop_since_save_marks_iterate_done(i); | |
1710 | 463 } |
0 | 464 |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
465 if (UseConcMarkSweepGC && ParallelGCThreads > 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
466 // We need to call this even when ResizeOldPLAB is disabled |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
467 // so as to avoid breaking some asserts. While we may be able |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
468 // to avoid this by reorganizing the code a bit, I am loathe |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
469 // to do that unless we find cases where ergo leads to bad |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
470 // performance. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
471 CFLS_LAB::compute_desired_plab_size(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
472 } |
0 | 473 } |
474 | |
475 ParScanClosure::ParScanClosure(ParNewGeneration* g, | |
476 ParScanThreadState* par_scan_state) : | |
477 OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g) | |
478 { | |
479 assert(_g->level() == 0, "Optimized for youngest generation"); | |
480 _boundary = _g->reserved().end(); | |
481 } | |
482 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
483 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
484 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
485 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
486 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
487 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
488 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
489 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
490 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
491 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
492 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
493 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
494 |
0 | 495 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, |
496 ParScanThreadState* par_scan_state) | |
497 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
498 {} |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
499 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
500 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
501 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } |
0 | 502 |
503 #ifdef WIN32 | |
504 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ | |
505 #endif | |
506 | |
507 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( | |
508 ParScanThreadState* par_scan_state_, | |
509 ParScanWithoutBarrierClosure* to_space_closure_, | |
510 ParScanWithBarrierClosure* old_gen_closure_, | |
511 ParRootScanWithoutBarrierClosure* to_space_root_closure_, | |
512 ParNewGeneration* par_gen_, | |
513 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, | |
514 ObjToScanQueueSet* task_queues_, | |
515 ParallelTaskTerminator* terminator_) : | |
516 | |
517 _par_scan_state(par_scan_state_), | |
518 _to_space_closure(to_space_closure_), | |
519 _old_gen_closure(old_gen_closure_), | |
520 _to_space_root_closure(to_space_root_closure_), | |
521 _old_gen_root_closure(old_gen_root_closure_), | |
522 _par_gen(par_gen_), | |
523 _task_queues(task_queues_), | |
524 _terminator(terminator_) | |
525 {} | |
526 | |
527 void ParEvacuateFollowersClosure::do_void() { | |
528 ObjToScanQueue* work_q = par_scan_state()->work_queue(); | |
529 | |
530 while (true) { | |
531 | |
532 // Scan to-space and old-gen objs until we run out of both. | |
533 oop obj_to_scan; | |
534 par_scan_state()->trim_queues(0); | |
535 | |
536 // We have no local work, attempt to steal from other threads. | |
537 | |
538 // attempt to steal work from promoted. | |
539 if (task_queues()->steal(par_scan_state()->thread_num(), | |
540 par_scan_state()->hash_seed(), | |
541 obj_to_scan)) { | |
542 bool res = work_q->push(obj_to_scan); | |
543 assert(res, "Empty queue should have room for a push."); | |
544 | |
545 // if successful, goto Start. | |
546 continue; | |
547 | |
548 // try global overflow list. | |
549 } else if (par_gen()->take_from_overflow_list(par_scan_state())) { | |
550 continue; | |
551 } | |
552 | |
553 // Otherwise, offer termination. | |
554 par_scan_state()->start_term_time(); | |
555 if (terminator()->offer_termination()) break; | |
556 par_scan_state()->end_term_time(); | |
557 } | |
534 | 558 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, |
559 "Broken overflow list?"); | |
0 | 560 // Finish the last termination pause. |
561 par_scan_state()->end_term_time(); | |
562 } | |
563 | |
564 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen, | |
565 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) : | |
566 AbstractGangTask("ParNewGeneration collection"), | |
567 _gen(gen), _next_gen(next_gen), | |
568 _young_old_boundary(young_old_boundary), | |
569 _state_set(state_set) | |
570 {} | |
571 | |
572 void ParNewGenTask::work(int i) { | |
573 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
574 // Since this is being done in a separate thread, need new resource | |
575 // and handle marks. | |
576 ResourceMark rm; | |
577 HandleMark hm; | |
578 // We would need multiple old-gen queues otherwise. | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
579 assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen."); |
0 | 580 |
581 Generation* old_gen = gch->next_gen(_gen); | |
582 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
583 ParScanThreadState& par_scan_state = _state_set->thread_state(i); |
0 | 584 par_scan_state.set_young_old_boundary(_young_old_boundary); |
585 | |
586 par_scan_state.start_strong_roots(); | |
587 gch->gen_process_strong_roots(_gen->level(), | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
695
diff
changeset
|
588 true, // Process younger gens, if any, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
695
diff
changeset
|
589 // as strong roots. |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
695
diff
changeset
|
590 false, // no scope; this is parallel code |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
695
diff
changeset
|
591 false, // not collecting perm generation. |
0 | 592 SharedHeap::SO_AllClasses, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
695
diff
changeset
|
593 &par_scan_state.to_space_root_closure(), |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
695
diff
changeset
|
594 true, // walk *all* scavengable nmethods |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
695
diff
changeset
|
595 &par_scan_state.older_gen_closure()); |
0 | 596 par_scan_state.end_strong_roots(); |
597 | |
598 // "evacuate followers". | |
599 par_scan_state.evacuate_followers_closure().do_void(); | |
600 } | |
601 | |
602 #ifdef _MSC_VER | |
603 #pragma warning( push ) | |
604 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
605 #endif | |
606 ParNewGeneration:: | |
607 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level) | |
608 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"), | |
609 _overflow_list(NULL), | |
610 _is_alive_closure(this), | |
611 _plab_stats(YoungPLABSize, PLABWeight) | |
612 { | |
534 | 613 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) |
614 NOT_PRODUCT(_num_par_pushes = 0;) | |
0 | 615 _task_queues = new ObjToScanQueueSet(ParallelGCThreads); |
616 guarantee(_task_queues != NULL, "task_queues allocation failure."); | |
617 | |
618 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) { | |
1665 | 619 ObjToScanQueue *q = new ObjToScanQueue(); |
620 guarantee(q != NULL, "work_queue Allocation failure."); | |
621 _task_queues->register_queue(i1, q); | |
0 | 622 } |
623 | |
624 for (uint i2 = 0; i2 < ParallelGCThreads; i2++) | |
625 _task_queues->queue(i2)->initialize(); | |
626 | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
627 _overflow_stacks = NULL; |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
628 if (ParGCUseLocalOverflow) { |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
629 _overflow_stacks = NEW_C_HEAP_ARRAY(Stack<oop>, ParallelGCThreads); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
630 for (size_t i = 0; i < ParallelGCThreads; ++i) { |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
631 new (_overflow_stacks + i) Stack<oop>(); |
695 | 632 } |
633 } | |
634 | |
0 | 635 if (UsePerfData) { |
636 EXCEPTION_MARK; | |
637 ResourceMark rm; | |
638 | |
639 const char* cname = | |
640 PerfDataManager::counter_name(_gen_counters->name_space(), "threads"); | |
641 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, | |
642 ParallelGCThreads, CHECK); | |
643 } | |
644 } | |
645 #ifdef _MSC_VER | |
646 #pragma warning( pop ) | |
647 #endif | |
648 | |
649 // ParNewGeneration:: | |
650 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : | |
651 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} | |
652 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
653 template <class T> |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
654 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
655 #ifdef ASSERT |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
656 { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
657 assert(!oopDesc::is_null(*p), "expected non-null ref"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
658 oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
659 // We never expect to see a null reference being processed |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
660 // as a weak reference. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
661 assert(obj->is_oop(), "expected an oop while scanning weak refs"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
662 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
663 #endif // ASSERT |
0 | 664 |
665 _par_cl->do_oop_nv(p); | |
666 | |
667 if (Universe::heap()->is_in_reserved(p)) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
668 oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
669 _rs->write_ref_field_gc_par(p, obj); |
0 | 670 } |
671 } | |
672 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
673 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
674 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
675 |
0 | 676 // ParNewGeneration:: |
677 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : | |
678 DefNewGeneration::KeepAliveClosure(cl) {} | |
679 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
680 template <class T> |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
681 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
682 #ifdef ASSERT |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
683 { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
684 assert(!oopDesc::is_null(*p), "expected non-null ref"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
685 oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
686 // We never expect to see a null reference being processed |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
687 // as a weak reference. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
688 assert(obj->is_oop(), "expected an oop while scanning weak refs"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
689 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
690 #endif // ASSERT |
0 | 691 |
692 _cl->do_oop_nv(p); | |
693 | |
694 if (Universe::heap()->is_in_reserved(p)) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
695 oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
696 _rs->write_ref_field_gc_par(p, obj); |
0 | 697 } |
698 } | |
699 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
700 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
701 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
702 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
703 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
704 T heap_oop = oopDesc::load_heap_oop(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
705 if (!oopDesc::is_null(heap_oop)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
706 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
0 | 707 if ((HeapWord*)obj < _boundary) { |
708 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
709 oop new_obj = obj->is_forwarded() |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
710 ? obj->forwardee() |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
711 : _g->DefNewGeneration::copy_to_survivor_space(obj); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
712 oopDesc::encode_store_heap_oop_not_null(p, new_obj); |
0 | 713 } |
714 if (_gc_barrier) { | |
715 // If p points to a younger generation, mark the card. | |
716 if ((HeapWord*)obj < _gen_boundary) { | |
717 _rs->write_ref_field_gc_par(p, obj); | |
718 } | |
719 } | |
720 } | |
721 } | |
722 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
723 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
724 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
6
diff
changeset
|
725 |
0 | 726 class ParNewRefProcTaskProxy: public AbstractGangTask { |
727 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; | |
728 public: | |
729 ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen, | |
730 Generation& next_gen, | |
731 HeapWord* young_old_boundary, | |
732 ParScanThreadStateSet& state_set); | |
733 | |
734 private: | |
735 virtual void work(int i); | |
736 | |
737 private: | |
738 ParNewGeneration& _gen; | |
739 ProcessTask& _task; | |
740 Generation& _next_gen; | |
741 HeapWord* _young_old_boundary; | |
742 ParScanThreadStateSet& _state_set; | |
743 }; | |
744 | |
745 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy( | |
746 ProcessTask& task, ParNewGeneration& gen, | |
747 Generation& next_gen, | |
748 HeapWord* young_old_boundary, | |
749 ParScanThreadStateSet& state_set) | |
750 : AbstractGangTask("ParNewGeneration parallel reference processing"), | |
751 _gen(gen), | |
752 _task(task), | |
753 _next_gen(next_gen), | |
754 _young_old_boundary(young_old_boundary), | |
755 _state_set(state_set) | |
756 { | |
757 } | |
758 | |
759 void ParNewRefProcTaskProxy::work(int i) | |
760 { | |
761 ResourceMark rm; | |
762 HandleMark hm; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
763 ParScanThreadState& par_scan_state = _state_set.thread_state(i); |
0 | 764 par_scan_state.set_young_old_boundary(_young_old_boundary); |
765 _task.work(i, par_scan_state.is_alive_closure(), | |
766 par_scan_state.keep_alive_closure(), | |
767 par_scan_state.evacuate_followers_closure()); | |
768 } | |
769 | |
770 class ParNewRefEnqueueTaskProxy: public AbstractGangTask { | |
771 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; | |
772 EnqueueTask& _task; | |
773 | |
774 public: | |
775 ParNewRefEnqueueTaskProxy(EnqueueTask& task) | |
776 : AbstractGangTask("ParNewGeneration parallel reference enqueue"), | |
777 _task(task) | |
778 { } | |
779 | |
780 virtual void work(int i) | |
781 { | |
782 _task.work(i); | |
783 } | |
784 }; | |
785 | |
786 | |
787 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) | |
788 { | |
789 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
790 assert(gch->kind() == CollectedHeap::GenCollectedHeap, | |
791 "not a generational heap"); | |
792 WorkGang* workers = gch->workers(); | |
793 assert(workers != NULL, "Need parallel worker threads."); | |
794 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(), | |
795 _generation.reserved().end(), _state_set); | |
796 workers->run_task(&rp_task); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
797 _state_set.reset(_generation.promotion_failed()); |
0 | 798 } |
799 | |
800 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) | |
801 { | |
802 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
803 WorkGang* workers = gch->workers(); | |
804 assert(workers != NULL, "Need parallel worker threads."); | |
805 ParNewRefEnqueueTaskProxy enq_task(task); | |
806 workers->run_task(&enq_task); | |
807 } | |
808 | |
809 void ParNewRefProcTaskExecutor::set_single_threaded_mode() | |
810 { | |
811 _state_set.flush(); | |
812 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
813 gch->set_par_threads(0); // 0 ==> non-parallel. | |
814 gch->save_marks(); | |
815 } | |
816 | |
817 ScanClosureWithParBarrier:: | |
818 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : | |
819 ScanClosure(g, gc_barrier) {} | |
820 | |
821 EvacuateFollowersClosureGeneral:: | |
822 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, | |
823 OopsInGenClosure* cur, | |
824 OopsInGenClosure* older) : | |
825 _gch(gch), _level(level), | |
826 _scan_cur_or_nonheap(cur), _scan_older(older) | |
827 {} | |
828 | |
829 void EvacuateFollowersClosureGeneral::do_void() { | |
830 do { | |
831 // Beware: this call will lead to closure applications via virtual | |
832 // calls. | |
833 _gch->oop_since_save_marks_iterate(_level, | |
834 _scan_cur_or_nonheap, | |
835 _scan_older); | |
836 } while (!_gch->no_allocs_since_save_marks(_level)); | |
837 } | |
838 | |
839 | |
840 bool ParNewGeneration::_avoid_promotion_undo = false; | |
841 | |
842 void ParNewGeneration::adjust_desired_tenuring_threshold() { | |
843 // Set the desired survivor size to half the real survivor space | |
844 _tenuring_threshold = | |
845 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); | |
846 } | |
847 | |
848 // A Generation that does parallel young-gen collection. | |
849 | |
850 void ParNewGeneration::collect(bool full, | |
851 bool clear_all_soft_refs, | |
852 size_t size, | |
853 bool is_tlab) { | |
854 assert(full || size > 0, "otherwise we don't want to collect"); | |
855 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
856 assert(gch->kind() == CollectedHeap::GenCollectedHeap, | |
857 "not a CMS generational heap"); | |
858 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); | |
859 WorkGang* workers = gch->workers(); | |
860 _next_gen = gch->next_gen(this); | |
861 assert(_next_gen != NULL, | |
862 "This must be the youngest gen, and not the only gen"); | |
863 assert(gch->n_gens() == 2, | |
864 "Par collection currently only works with single older gen."); | |
865 // Do we have to avoid promotion_undo? | |
866 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { | |
867 set_avoid_promotion_undo(true); | |
868 } | |
869 | |
870 // If the next generation is too full to accomodate worst-case promotion | |
871 // from this generation, pass on collection; let the next generation | |
872 // do it. | |
873 if (!collection_attempt_is_safe()) { | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
874 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one |
0 | 875 return; |
876 } | |
877 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); | |
878 | |
879 init_assuming_no_promotion_failure(); | |
880 | |
881 if (UseAdaptiveSizePolicy) { | |
882 set_survivor_overflow(false); | |
883 size_policy->minor_collection_begin(); | |
884 } | |
885 | |
886 TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); | |
887 // Capture heap used before collection (for printing). | |
888 size_t gch_prev_used = gch->used(); | |
889 | |
890 SpecializationStats::clear(); | |
891 | |
892 age_table()->clear(); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
893 to()->clear(SpaceDecorator::Mangle); |
0 | 894 |
895 gch->save_marks(); | |
896 assert(workers != NULL, "Need parallel worker threads."); | |
897 ParallelTaskTerminator _term(workers->total_workers(), task_queues()); | |
898 ParScanThreadStateSet thread_state_set(workers->total_workers(), | |
899 *to(), *this, *_next_gen, *task_queues(), | |
695 | 900 _overflow_stacks, desired_plab_sz(), _term); |
0 | 901 |
902 ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set); | |
903 int n_workers = workers->total_workers(); | |
904 gch->set_par_threads(n_workers); | |
905 gch->rem_set()->prepare_for_younger_refs_iterate(true); | |
906 // It turns out that even when we're using 1 thread, doing the work in a | |
907 // separate thread causes wide variance in run times. We can't help this | |
908 // in the multi-threaded case, but we special-case n=1 here to get | |
909 // repeatable measurements of the 1-thread overhead of the parallel code. | |
910 if (n_workers > 1) { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
695
diff
changeset
|
911 GenCollectedHeap::StrongRootsScope srs(gch); |
0 | 912 workers->run_task(&tsk); |
913 } else { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
695
diff
changeset
|
914 GenCollectedHeap::StrongRootsScope srs(gch); |
0 | 915 tsk.work(0); |
916 } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
917 thread_state_set.reset(promotion_failed()); |
0 | 918 |
919 // Process (weak) reference objects found during scavenge. | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
269
diff
changeset
|
920 ReferenceProcessor* rp = ref_processor(); |
0 | 921 IsAliveClosure is_alive(this); |
922 ScanWeakRefClosure scan_weak_ref(this); | |
923 KeepAliveClosure keep_alive(&scan_weak_ref); | |
924 ScanClosure scan_without_gc_barrier(this, false); | |
925 ScanClosureWithParBarrier scan_with_gc_barrier(this, true); | |
926 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); | |
927 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, | |
928 &scan_without_gc_barrier, &scan_with_gc_barrier); | |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
454
diff
changeset
|
929 rp->setup_policy(clear_all_soft_refs); |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
269
diff
changeset
|
930 if (rp->processing_is_mt()) { |
0 | 931 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
269
diff
changeset
|
932 rp->process_discovered_references(&is_alive, &keep_alive, |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
269
diff
changeset
|
933 &evacuate_followers, &task_executor); |
0 | 934 } else { |
935 thread_state_set.flush(); | |
936 gch->set_par_threads(0); // 0 ==> non-parallel. | |
937 gch->save_marks(); | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
269
diff
changeset
|
938 rp->process_discovered_references(&is_alive, &keep_alive, |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
269
diff
changeset
|
939 &evacuate_followers, NULL); |
0 | 940 } |
941 if (!promotion_failed()) { | |
942 // Swap the survivor spaces. | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
943 eden()->clear(SpaceDecorator::Mangle); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
944 from()->clear(SpaceDecorator::Mangle); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
945 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
946 // This is now done here because of the piece-meal mangling which |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
947 // can check for valid mangling at intermediate points in the |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
948 // collection(s). When a minor collection fails to collect |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
949 // sufficient space resizing of the young generation can occur |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
950 // an redistribute the spaces in the young generation. Mangle |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
951 // here so that unzapped regions don't get distributed to |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
952 // other spaces. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
953 to()->mangle_unused_area(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
167
diff
changeset
|
954 } |
0 | 955 swap_spaces(); |
956 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1145
diff
changeset
|
957 // A successful scavenge should restart the GC time limit count which is |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1145
diff
changeset
|
958 // for full GC's. |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1145
diff
changeset
|
959 size_policy->reset_gc_overhead_limit_count(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1145
diff
changeset
|
960 |
0 | 961 assert(to()->is_empty(), "to space should be empty now"); |
962 } else { | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
963 assert(_promo_failure_scan_stack.is_empty(), "post condition"); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
964 _promo_failure_scan_stack.clear(true); // Clear cached segments. |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
965 |
0 | 966 remove_forwarding_pointers(); |
967 if (PrintGCDetails) { | |
968 gclog_or_tty->print(" (promotion failed)"); | |
969 } | |
970 // All the spaces are in play for mark-sweep. | |
971 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. | |
972 from()->set_next_compaction_space(to()); | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
973 gch->set_incremental_collection_failed(); |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
974 // Inform the next generation that a promotion failure occurred. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
975 _next_gen->promotion_failure_occurred(); |
6
73e96e5c30df
6624765: Guarantee failure "Unexpected dirty card found"
jmasa
parents:
0
diff
changeset
|
976 |
73e96e5c30df
6624765: Guarantee failure "Unexpected dirty card found"
jmasa
parents:
0
diff
changeset
|
977 // Reset the PromotionFailureALot counters. |
73e96e5c30df
6624765: Guarantee failure "Unexpected dirty card found"
jmasa
parents:
0
diff
changeset
|
978 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) |
0 | 979 } |
980 // set new iteration safe limit for the survivor spaces | |
981 from()->set_concurrent_iteration_safe_limit(from()->top()); | |
982 to()->set_concurrent_iteration_safe_limit(to()->top()); | |
983 | |
984 adjust_desired_tenuring_threshold(); | |
985 if (ResizePLAB) { | |
986 plab_stats()->adjust_desired_plab_sz(); | |
987 } | |
988 | |
989 if (PrintGC && !PrintGCDetails) { | |
990 gch->print_heap_change(gch_prev_used); | |
991 } | |
992 | |
1712
2d6b74c9a797
6976378: ParNew: stats are printed unconditionally in debug builds
jcoomes
parents:
1710
diff
changeset
|
993 if (PrintGCDetails && ParallelGCVerbose) { |
2d6b74c9a797
6976378: ParNew: stats are printed unconditionally in debug builds
jcoomes
parents:
1710
diff
changeset
|
994 TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats()); |
2d6b74c9a797
6976378: ParNew: stats are printed unconditionally in debug builds
jcoomes
parents:
1710
diff
changeset
|
995 TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats()); |
2d6b74c9a797
6976378: ParNew: stats are printed unconditionally in debug builds
jcoomes
parents:
1710
diff
changeset
|
996 } |
1710 | 997 |
0 | 998 if (UseAdaptiveSizePolicy) { |
999 size_policy->minor_collection_end(gch->gc_cause()); | |
1000 size_policy->avg_survived()->sample(from()->used()); | |
1001 } | |
1002 | |
1003 update_time_of_last_gc(os::javaTimeMillis()); | |
1004 | |
1005 SpecializationStats::print(); | |
1006 | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
269
diff
changeset
|
1007 rp->set_enqueuing_is_done(true); |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
269
diff
changeset
|
1008 if (rp->processing_is_mt()) { |
0 | 1009 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
269
diff
changeset
|
1010 rp->enqueue_discovered_references(&task_executor); |
0 | 1011 } else { |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
269
diff
changeset
|
1012 rp->enqueue_discovered_references(NULL); |
0 | 1013 } |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
269
diff
changeset
|
1014 rp->verify_no_references_recorded(); |
0 | 1015 } |
1016 | |
1017 static int sum; | |
1018 void ParNewGeneration::waste_some_time() { | |
1019 for (int i = 0; i < 100; i++) { | |
1020 sum += i; | |
1021 } | |
1022 } | |
1023 | |
1024 static const oop ClaimedForwardPtr = oop(0x4); | |
1025 | |
1026 // Because of concurrency, there are times where an object for which | |
1027 // "is_forwarded()" is true contains an "interim" forwarding pointer | |
1028 // value. Such a value will soon be overwritten with a real value. | |
1029 // This method requires "obj" to have a forwarding pointer, and waits, if | |
1030 // necessary for a real one to be inserted, and returns it. | |
1031 | |
1032 oop ParNewGeneration::real_forwardee(oop obj) { | |
1033 oop forward_ptr = obj->forwardee(); | |
1034 if (forward_ptr != ClaimedForwardPtr) { | |
1035 return forward_ptr; | |
1036 } else { | |
1037 return real_forwardee_slow(obj); | |
1038 } | |
1039 } | |
1040 | |
1041 oop ParNewGeneration::real_forwardee_slow(oop obj) { | |
1042 // Spin-read if it is claimed but not yet written by another thread. | |
1043 oop forward_ptr = obj->forwardee(); | |
1044 while (forward_ptr == ClaimedForwardPtr) { | |
1045 waste_some_time(); | |
1046 assert(obj->is_forwarded(), "precondition"); | |
1047 forward_ptr = obj->forwardee(); | |
1048 } | |
1049 return forward_ptr; | |
1050 } | |
1051 | |
1052 #ifdef ASSERT | |
1053 bool ParNewGeneration::is_legal_forward_ptr(oop p) { | |
1054 return | |
1055 (_avoid_promotion_undo && p == ClaimedForwardPtr) | |
1056 || Universe::heap()->is_in_reserved(p); | |
1057 } | |
1058 #endif | |
1059 | |
1060 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { | |
2038
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
1972
diff
changeset
|
1061 if (m->must_be_preserved_for_promotion_failure(obj)) { |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
1972
diff
changeset
|
1062 // We should really have separate per-worker stacks, rather |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
1972
diff
changeset
|
1063 // than use locking of a common pair of stacks. |
0 | 1064 MutexLocker ml(ParGCRareEvent_lock); |
2038
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
1972
diff
changeset
|
1065 preserve_mark(obj, m); |
0 | 1066 } |
1067 } | |
1068 | |
1069 // Multiple GC threads may try to promote an object. If the object | |
1070 // is successfully promoted, a forwarding pointer will be installed in | |
1071 // the object in the young generation. This method claims the right | |
1072 // to install the forwarding pointer before it copies the object, | |
1073 // thus avoiding the need to undo the copy as in | |
1074 // copy_to_survivor_space_avoiding_with_undo. | |
1075 | |
1076 oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo( | |
1077 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { | |
1078 // In the sequential version, this assert also says that the object is | |
1079 // not forwarded. That might not be the case here. It is the case that | |
1080 // the caller observed it to be not forwarded at some time in the past. | |
1081 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); | |
1082 | |
1083 // The sequential code read "old->age()" below. That doesn't work here, | |
1084 // since the age is in the mark word, and that might be overwritten with | |
1085 // a forwarding pointer by a parallel thread. So we must save the mark | |
1086 // word in a local and then analyze it. | |
1087 oopDesc dummyOld; | |
1088 dummyOld.set_mark(m); | |
1089 assert(!dummyOld.is_forwarded(), | |
1090 "should not be called with forwarding pointer mark word."); | |
1091 | |
1092 oop new_obj = NULL; | |
1093 oop forward_ptr; | |
1094 | |
1095 // Try allocating obj in to-space (unless too old) | |
1096 if (dummyOld.age() < tenuring_threshold()) { | |
1097 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); | |
1098 if (new_obj == NULL) { | |
1099 set_survivor_overflow(true); | |
1100 } | |
1101 } | |
1102 | |
1103 if (new_obj == NULL) { | |
1104 // Either to-space is full or we decided to promote | |
1105 // try allocating obj tenured | |
1106 | |
1107 // Attempt to install a null forwarding pointer (atomically), | |
1108 // to claim the right to install the real forwarding pointer. | |
1109 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); | |
1110 if (forward_ptr != NULL) { | |
1111 // someone else beat us to it. | |
1112 return real_forwardee(old); | |
1113 } | |
1114 | |
1115 new_obj = _next_gen->par_promote(par_scan_state->thread_num(), | |
1116 old, m, sz); | |
1117 | |
1118 if (new_obj == NULL) { | |
1119 // promotion failed, forward to self | |
1120 _promotion_failed = true; | |
1121 new_obj = old; | |
1122 | |
1123 preserve_mark_if_necessary(old, m); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
1124 // Log the size of the maiden promotion failure |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
1125 par_scan_state->log_promotion_failure(sz); |
0 | 1126 } |
1127 | |
1128 old->forward_to(new_obj); | |
1129 forward_ptr = NULL; | |
1130 } else { | |
1131 // Is in to-space; do copying ourselves. | |
1132 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); | |
1133 forward_ptr = old->forward_to_atomic(new_obj); | |
1134 // Restore the mark word copied above. | |
1135 new_obj->set_mark(m); | |
1136 // Increment age if obj still in new generation | |
1137 new_obj->incr_age(); | |
1138 par_scan_state->age_table()->add(new_obj, sz); | |
1139 } | |
1140 assert(new_obj != NULL, "just checking"); | |
1141 | |
1142 if (forward_ptr == NULL) { | |
1143 oop obj_to_push = new_obj; | |
1144 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { | |
1145 // Length field used as index of next element to be scanned. | |
1146 // Real length can be obtained from real_forwardee() | |
1147 arrayOop(old)->set_length(0); | |
1148 obj_to_push = old; | |
1149 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, | |
1150 "push forwarded object"); | |
1151 } | |
1152 // Push it on one of the queues of to-be-scanned objects. | |
534 | 1153 bool simulate_overflow = false; |
1154 NOT_PRODUCT( | |
1155 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { | |
1156 // simulate a stack overflow | |
1157 simulate_overflow = true; | |
1158 } | |
1159 ) | |
1160 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { | |
0 | 1161 // Add stats for overflow pushes. |
1162 if (Verbose && PrintGCDetails) { | |
1163 gclog_or_tty->print("queue overflow!\n"); | |
1164 } | |
534 | 1165 push_on_overflow_list(old, par_scan_state); |
1710 | 1166 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); |
0 | 1167 } |
1168 | |
1169 return new_obj; | |
1170 } | |
1171 | |
1172 // Oops. Someone beat us to it. Undo the allocation. Where did we | |
1173 // allocate it? | |
1174 if (is_in_reserved(new_obj)) { | |
1175 // Must be in to_space. | |
1176 assert(to()->is_in_reserved(new_obj), "Checking"); | |
1177 if (forward_ptr == ClaimedForwardPtr) { | |
1178 // Wait to get the real forwarding pointer value. | |
1179 forward_ptr = real_forwardee(old); | |
1180 } | |
1181 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); | |
1182 } | |
1183 | |
1184 return forward_ptr; | |
1185 } | |
1186 | |
1187 | |
1188 // Multiple GC threads may try to promote the same object. If two | |
1189 // or more GC threads copy the object, only one wins the race to install | |
1190 // the forwarding pointer. The other threads have to undo their copy. | |
1191 | |
1192 oop ParNewGeneration::copy_to_survivor_space_with_undo( | |
1193 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { | |
1194 | |
1195 // In the sequential version, this assert also says that the object is | |
1196 // not forwarded. That might not be the case here. It is the case that | |
1197 // the caller observed it to be not forwarded at some time in the past. | |
1198 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); | |
1199 | |
1200 // The sequential code read "old->age()" below. That doesn't work here, | |
1201 // since the age is in the mark word, and that might be overwritten with | |
1202 // a forwarding pointer by a parallel thread. So we must save the mark | |
1203 // word here, install it in a local oopDesc, and then analyze it. | |
1204 oopDesc dummyOld; | |
1205 dummyOld.set_mark(m); | |
1206 assert(!dummyOld.is_forwarded(), | |
1207 "should not be called with forwarding pointer mark word."); | |
1208 | |
1209 bool failed_to_promote = false; | |
1210 oop new_obj = NULL; | |
1211 oop forward_ptr; | |
1212 | |
1213 // Try allocating obj in to-space (unless too old) | |
1214 if (dummyOld.age() < tenuring_threshold()) { | |
1215 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); | |
1216 if (new_obj == NULL) { | |
1217 set_survivor_overflow(true); | |
1218 } | |
1219 } | |
1220 | |
1221 if (new_obj == NULL) { | |
1222 // Either to-space is full or we decided to promote | |
1223 // try allocating obj tenured | |
1224 new_obj = _next_gen->par_promote(par_scan_state->thread_num(), | |
1225 old, m, sz); | |
1226 | |
1227 if (new_obj == NULL) { | |
1228 // promotion failed, forward to self | |
1229 forward_ptr = old->forward_to_atomic(old); | |
1230 new_obj = old; | |
1231 | |
1232 if (forward_ptr != NULL) { | |
1233 return forward_ptr; // someone else succeeded | |
1234 } | |
1235 | |
1236 _promotion_failed = true; | |
1237 failed_to_promote = true; | |
1238 | |
1239 preserve_mark_if_necessary(old, m); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
1240 // Log the size of the maiden promotion failure |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
1241 par_scan_state->log_promotion_failure(sz); |
0 | 1242 } |
1243 } else { | |
1244 // Is in to-space; do copying ourselves. | |
1245 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); | |
1246 // Restore the mark word copied above. | |
1247 new_obj->set_mark(m); | |
1248 // Increment age if new_obj still in new generation | |
1249 new_obj->incr_age(); | |
1250 par_scan_state->age_table()->add(new_obj, sz); | |
1251 } | |
1252 assert(new_obj != NULL, "just checking"); | |
1253 | |
1254 // Now attempt to install the forwarding pointer (atomically). | |
1255 // We have to copy the mark word before overwriting with forwarding | |
1256 // ptr, so we can restore it below in the copy. | |
1257 if (!failed_to_promote) { | |
1258 forward_ptr = old->forward_to_atomic(new_obj); | |
1259 } | |
1260 | |
1261 if (forward_ptr == NULL) { | |
1262 oop obj_to_push = new_obj; | |
1263 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { | |
1264 // Length field used as index of next element to be scanned. | |
1265 // Real length can be obtained from real_forwardee() | |
1266 arrayOop(old)->set_length(0); | |
1267 obj_to_push = old; | |
1268 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, | |
1269 "push forwarded object"); | |
1270 } | |
1271 // Push it on one of the queues of to-be-scanned objects. | |
534 | 1272 bool simulate_overflow = false; |
1273 NOT_PRODUCT( | |
1274 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { | |
1275 // simulate a stack overflow | |
1276 simulate_overflow = true; | |
1277 } | |
1278 ) | |
1279 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { | |
0 | 1280 // Add stats for overflow pushes. |
534 | 1281 push_on_overflow_list(old, par_scan_state); |
1710 | 1282 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); |
0 | 1283 } |
1284 | |
1285 return new_obj; | |
1286 } | |
1287 | |
1288 // Oops. Someone beat us to it. Undo the allocation. Where did we | |
1289 // allocate it? | |
1290 if (is_in_reserved(new_obj)) { | |
1291 // Must be in to_space. | |
1292 assert(to()->is_in_reserved(new_obj), "Checking"); | |
1293 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); | |
1294 } else { | |
1295 assert(!_avoid_promotion_undo, "Should not be here if avoiding."); | |
1296 _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(), | |
1297 (HeapWord*)new_obj, sz); | |
1298 } | |
1299 | |
1300 return forward_ptr; | |
1301 } | |
1302 | |
534 | 1303 #ifndef PRODUCT |
1304 // It's OK to call this multi-threaded; the worst thing | |
1305 // that can happen is that we'll get a bunch of closely | |
1306 // spaced simulated oveflows, but that's OK, in fact | |
1307 // probably good as it would exercise the overflow code | |
1308 // under contention. | |
1309 bool ParNewGeneration::should_simulate_overflow() { | |
1310 if (_overflow_counter-- <= 0) { // just being defensive | |
1311 _overflow_counter = ParGCWorkQueueOverflowInterval; | |
1312 return true; | |
1313 } else { | |
1314 return false; | |
1315 } | |
1316 } | |
1317 #endif | |
1318 | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1319 // In case we are using compressed oops, we need to be careful. |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1320 // If the object being pushed is an object array, then its length |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1321 // field keeps track of the "grey boundary" at which the next |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1322 // incremental scan will be done (see ParGCArrayScanChunk). |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1323 // When using compressed oops, this length field is kept in the |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1324 // lower 32 bits of the erstwhile klass word and cannot be used |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1325 // for the overflow chaining pointer (OCP below). As such the OCP |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1326 // would itself need to be compressed into the top 32-bits in this |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1327 // case. Unfortunately, see below, in the event that we have a |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1328 // promotion failure, the node to be pushed on the list can be |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1329 // outside of the Java heap, so the heap-based pointer compression |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1330 // would not work (we would have potential aliasing between C-heap |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1331 // and Java-heap pointers). For this reason, when using compressed |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1332 // oops, we simply use a worker-thread-local, non-shared overflow |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1333 // list in the form of a growable array, with a slightly different |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1334 // overflow stack draining strategy. If/when we start using fat |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1335 // stacks here, we can go back to using (fat) pointer chains |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1336 // (although some performance comparisons would be useful since |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1337 // single global lists have their own performance disadvantages |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1338 // as we were made painfully aware not long ago, see 6786503). |
534 | 1339 #define BUSY (oop(0x1aff1aff)) |
1340 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1341 assert(is_in_reserved(from_space_obj), "Should be from this generation"); |
695 | 1342 if (ParGCUseLocalOverflow) { |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1343 // In the case of compressed oops, we use a private, not-shared |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1344 // overflow stack. |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1345 par_scan_state->push_on_overflow_stack(from_space_obj); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1346 } else { |
695 | 1347 assert(!UseCompressedOops, "Error"); |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1348 // if the object has been forwarded to itself, then we cannot |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1349 // use the klass pointer for the linked list. Instead we have |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1350 // to allocate an oopDesc in the C-Heap and use that for the linked list. |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1351 // XXX This is horribly inefficient when a promotion failure occurs |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1352 // and should be fixed. XXX FIX ME !!! |
534 | 1353 #ifndef PRODUCT |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1354 Atomic::inc_ptr(&_num_par_pushes); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1355 assert(_num_par_pushes > 0, "Tautology"); |
534 | 1356 #endif |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1357 if (from_space_obj->forwardee() == from_space_obj) { |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1358 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1359 listhead->forward_to(from_space_obj); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1360 from_space_obj = listhead; |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1361 } |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1362 oop observed_overflow_list = _overflow_list; |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1363 oop cur_overflow_list; |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1364 do { |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1365 cur_overflow_list = observed_overflow_list; |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1366 if (cur_overflow_list != BUSY) { |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1367 from_space_obj->set_klass_to_list_ptr(cur_overflow_list); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1368 } else { |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1369 from_space_obj->set_klass_to_list_ptr(NULL); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1370 } |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1371 observed_overflow_list = |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1372 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1373 } while (cur_overflow_list != observed_overflow_list); |
0 | 1374 } |
1375 } | |
1376 | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1377 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1378 bool res; |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1379 |
695 | 1380 if (ParGCUseLocalOverflow) { |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1381 res = par_scan_state->take_from_overflow_stack(); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1382 } else { |
695 | 1383 assert(!UseCompressedOops, "Error"); |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1384 res = take_from_overflow_list_work(par_scan_state); |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1385 } |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1386 return res; |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1387 } |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1388 |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1389 |
534 | 1390 // *NOTE*: The overflow list manipulation code here and |
1391 // in CMSCollector:: are very similar in shape, | |
1392 // except that in the CMS case we thread the objects | |
1393 // directly into the list via their mark word, and do | |
1394 // not need to deal with special cases below related | |
1395 // to chunking of object arrays and promotion failure | |
1396 // handling. | |
1397 // CR 6797058 has been filed to attempt consolidation of | |
1398 // the common code. | |
1399 // Because of the common code, if you make any changes in | |
1400 // the code below, please check the CMS version to see if | |
1401 // similar changes might be needed. | |
1402 // See CMSCollector::par_take_from_overflow_list() for | |
1403 // more extensive documentation comments. | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1404 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { |
0 | 1405 ObjToScanQueue* work_q = par_scan_state->work_queue(); |
1406 // How many to take? | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1407 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, |
534 | 1408 (size_t)ParGCDesiredObjsFromOverflowList); |
0 | 1409 |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
1410 assert(!UseCompressedOops, "Error"); |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
579
diff
changeset
|
1411 assert(par_scan_state->overflow_stack() == NULL, "Error"); |
0 | 1412 if (_overflow_list == NULL) return false; |
1413 | |
1414 // Otherwise, there was something there; try claiming the list. | |
534 | 1415 oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); |
1416 // Trim off a prefix of at most objsFromOverflow items | |
1417 Thread* tid = Thread::current(); | |
1418 size_t spin_count = (size_t)ParallelGCThreads; | |
1419 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); | |
1420 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { | |
1421 // someone grabbed it before we did ... | |
1422 // ... we spin for a short while... | |
1423 os::sleep(tid, sleep_time_millis, false); | |
1424 if (_overflow_list == NULL) { | |
1425 // nothing left to take | |
1426 return false; | |
1427 } else if (_overflow_list != BUSY) { | |
1428 // try and grab the prefix | |
1429 prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); | |
1430 } | |
0 | 1431 } |
534 | 1432 if (prefix == NULL || prefix == BUSY) { |
1433 // Nothing to take or waited long enough | |
1434 if (prefix == NULL) { | |
1435 // Write back the NULL in case we overwrote it with BUSY above | |
1436 // and it is still the same value. | |
1437 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); | |
1438 } | |
1439 return false; | |
1440 } | |
1441 assert(prefix != NULL && prefix != BUSY, "Error"); | |
1442 size_t i = 1; | |
0 | 1443 oop cur = prefix; |
167
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
113
diff
changeset
|
1444 while (i < objsFromOverflow && cur->klass_or_null() != NULL) { |
0 | 1445 i++; cur = oop(cur->klass()); |
1446 } | |
1447 | |
1448 // Reattach remaining (suffix) to overflow list | |
534 | 1449 if (cur->klass_or_null() == NULL) { |
1450 // Write back the NULL in lieu of the BUSY we wrote | |
1451 // above and it is still the same value. | |
1452 if (_overflow_list == BUSY) { | |
1453 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); | |
0 | 1454 } |
534 | 1455 } else { |
1456 assert(cur->klass_or_null() != BUSY, "Error"); | |
1457 oop suffix = oop(cur->klass()); // suffix will be put back on global list | |
1458 cur->set_klass_to_list_ptr(NULL); // break off suffix | |
1459 // It's possible that the list is still in the empty(busy) state | |
1460 // we left it in a short while ago; in that case we may be | |
1461 // able to place back the suffix. | |
1462 oop observed_overflow_list = _overflow_list; | |
1463 oop cur_overflow_list = observed_overflow_list; | |
1464 bool attached = false; | |
1465 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { | |
1466 observed_overflow_list = | |
1467 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); | |
1468 if (cur_overflow_list == observed_overflow_list) { | |
1469 attached = true; | |
1470 break; | |
1471 } else cur_overflow_list = observed_overflow_list; | |
1472 } | |
1473 if (!attached) { | |
1474 // Too bad, someone else got in in between; we'll need to do a splice. | |
1475 // Find the last item of suffix list | |
1476 oop last = suffix; | |
1477 while (last->klass_or_null() != NULL) { | |
1478 last = oop(last->klass()); | |
1479 } | |
1480 // Atomically prepend suffix to current overflow list | |
1481 observed_overflow_list = _overflow_list; | |
1482 do { | |
1483 cur_overflow_list = observed_overflow_list; | |
1484 if (cur_overflow_list != BUSY) { | |
1485 // Do the splice ... | |
1486 last->set_klass_to_list_ptr(cur_overflow_list); | |
1487 } else { // cur_overflow_list == BUSY | |
1488 last->set_klass_to_list_ptr(NULL); | |
1489 } | |
1490 observed_overflow_list = | |
1491 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); | |
1492 } while (cur_overflow_list != observed_overflow_list); | |
0 | 1493 } |
1494 } | |
1495 | |
1496 // Push objects on prefix list onto this thread's work queue | |
534 | 1497 assert(prefix != NULL && prefix != BUSY, "program logic"); |
0 | 1498 cur = prefix; |
534 | 1499 ssize_t n = 0; |
0 | 1500 while (cur != NULL) { |
1501 oop obj_to_push = cur->forwardee(); | |
454
df4305d4c1a1
6774607: SIGSEGV or (!is_null(v),"oop value can never be zero") assertion when running with CMS and COOPs
ysr
parents:
453
diff
changeset
|
1502 oop next = oop(cur->klass_or_null()); |
0 | 1503 cur->set_klass(obj_to_push->klass()); |
534 | 1504 // This may be an array object that is self-forwarded. In that case, the list pointer |
1505 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. | |
1506 if (!is_in_reserved(cur)) { | |
1507 // This can become a scaling bottleneck when there is work queue overflow coincident | |
1508 // with promotion failure. | |
1509 oopDesc* f = cur; | |
1510 FREE_C_HEAP_ARRAY(oopDesc, f); | |
1511 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { | |
0 | 1512 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); |
534 | 1513 obj_to_push = cur; |
0 | 1514 } |
534 | 1515 bool ok = work_q->push(obj_to_push); |
1516 assert(ok, "Should have succeeded"); | |
0 | 1517 cur = next; |
1518 n++; | |
1519 } | |
1710 | 1520 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); |
534 | 1521 #ifndef PRODUCT |
1522 assert(_num_par_pushes >= n, "Too many pops?"); | |
1523 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); | |
1524 #endif | |
0 | 1525 return true; |
1526 } | |
534 | 1527 #undef BUSY |
0 | 1528 |
1529 void ParNewGeneration::ref_processor_init() | |
1530 { | |
1531 if (_ref_processor == NULL) { | |
1532 // Allocate and initialize a reference processor | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2038
diff
changeset
|
1533 _ref_processor = |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2038
diff
changeset
|
1534 new ReferenceProcessor(_reserved, // span |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2038
diff
changeset
|
1535 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2038
diff
changeset
|
1536 (int) ParallelGCThreads, // mt processing degree |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2038
diff
changeset
|
1537 refs_discovery_is_mt(), // mt discovery |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2038
diff
changeset
|
1538 (int) ParallelGCThreads, // mt discovery degree |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2038
diff
changeset
|
1539 refs_discovery_is_atomic(), // atomic_discovery |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2038
diff
changeset
|
1540 NULL, // is_alive_non_header |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2038
diff
changeset
|
1541 false); // write barrier for next field updates |
0 | 1542 } |
1543 } | |
1544 | |
1545 const char* ParNewGeneration::name() const { | |
1546 return "par new generation"; | |
1547 } | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1712
diff
changeset
|
1548 |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1712
diff
changeset
|
1549 bool ParNewGeneration::in_use() { |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1712
diff
changeset
|
1550 return UseParNewGC && ParallelGCThreads > 0; |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1712
diff
changeset
|
1551 } |