Mercurial > hg > truffle
comparison src/share/vm/memory/referenceProcessor.hpp @ 2369:92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
Summary: Use _max_num_q = max(discovery_degree, processing_degree), and let balance_queues() redistribute from discovery_degree to processing_degree of queues. This should also allow a more dynamic and flexible parallelism policy in the future.
Reviewed-by: jmasa, johnc
author | ysr |
---|---|
date | Thu, 17 Mar 2011 10:32:46 -0700 |
parents | 8df09fb45352 |
children | c2bf0120ee5d |
comparison
equal
deleted
inserted
replaced
2368:dde920245681 | 2369:92da084fefc9 |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
69 bool _discovered_list_needs_barrier; | 69 bool _discovered_list_needs_barrier; |
70 BarrierSet* _bs; // Cached copy of BarrierSet. | 70 BarrierSet* _bs; // Cached copy of BarrierSet. |
71 bool _enqueuing_is_done; // true if all weak references enqueued | 71 bool _enqueuing_is_done; // true if all weak references enqueued |
72 bool _processing_is_mt; // true during phases when | 72 bool _processing_is_mt; // true during phases when |
73 // reference processing is MT. | 73 // reference processing is MT. |
74 int _next_id; // round-robin counter in | 74 int _next_id; // round-robin mod _num_q counter in |
75 // support of work distribution | 75 // support of work distribution |
76 | 76 |
77 // For collectors that do not keep GC marking information | 77 // For collectors that do not keep GC marking information |
78 // in the object header, this field holds a closure that | 78 // in the object header, this field holds a closure that |
79 // helps the reference processor determine the reachability | 79 // helps the reference processor determine the reachability |
101 DiscoveredList* _discoveredFinalRefs; | 101 DiscoveredList* _discoveredFinalRefs; |
102 DiscoveredList* _discoveredPhantomRefs; | 102 DiscoveredList* _discoveredPhantomRefs; |
103 | 103 |
104 public: | 104 public: |
105 int num_q() { return _num_q; } | 105 int num_q() { return _num_q; } |
106 void set_mt_degree(int v) { _num_q = v; } | 106 int max_num_q() { return _max_num_q; } |
107 void set_active_mt_degree(int v) { _num_q = v; } | |
107 DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; } | 108 DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; } |
108 static oop sentinel_ref() { return _sentinelRef; } | 109 static oop sentinel_ref() { return _sentinelRef; } |
109 static oop* adr_sentinel_ref() { return &_sentinelRef; } | 110 static oop* adr_sentinel_ref() { return &_sentinelRef; } |
110 ReferencePolicy* setup_policy(bool always_clear) { | 111 ReferencePolicy* setup_policy(bool always_clear) { |
111 _current_soft_ref_policy = always_clear ? | 112 _current_soft_ref_policy = always_clear ? |
214 BoolObjectClosure* is_alive, | 215 BoolObjectClosure* is_alive, |
215 OopClosure* keep_alive, | 216 OopClosure* keep_alive, |
216 VoidClosure* complete_gc, | 217 VoidClosure* complete_gc, |
217 YieldClosure* yield); | 218 YieldClosure* yield); |
218 | 219 |
220 // round-robin mod _num_q (not: _not_ mode _max_num_q) | |
219 int next_id() { | 221 int next_id() { |
220 int id = _next_id; | 222 int id = _next_id; |
221 if (++_next_id == _num_q) { | 223 if (++_next_id == _num_q) { |
222 _next_id = 0; | 224 _next_id = 0; |
223 } | 225 } |
254 _is_alive_non_header(NULL), | 256 _is_alive_non_header(NULL), |
255 _num_q(0), | 257 _num_q(0), |
256 _max_num_q(0), | 258 _max_num_q(0), |
257 _processing_is_mt(false), | 259 _processing_is_mt(false), |
258 _next_id(0) | 260 _next_id(0) |
259 {} | 261 { } |
260 | 262 |
261 ReferenceProcessor(MemRegion span, bool atomic_discovery, | 263 // Default parameters give you a vanilla reference processor. |
262 bool mt_discovery, | 264 ReferenceProcessor(MemRegion span, |
263 int mt_degree = 1, | 265 bool mt_processing = false, int mt_processing_degree = 1, |
264 bool mt_processing = false, | 266 bool mt_discovery = false, int mt_discovery_degree = 1, |
267 bool atomic_discovery = true, | |
268 BoolObjectClosure* is_alive_non_header = NULL, | |
265 bool discovered_list_needs_barrier = false); | 269 bool discovered_list_needs_barrier = false); |
266 | |
267 // Allocates and initializes a reference processor. | |
268 static ReferenceProcessor* create_ref_processor( | |
269 MemRegion span, | |
270 bool atomic_discovery, | |
271 bool mt_discovery, | |
272 BoolObjectClosure* is_alive_non_header = NULL, | |
273 int parallel_gc_threads = 1, | |
274 bool mt_processing = false, | |
275 bool discovered_list_needs_barrier = false); | |
276 | 270 |
277 // RefDiscoveryPolicy values | 271 // RefDiscoveryPolicy values |
278 enum DiscoveryPolicy { | 272 enum DiscoveryPolicy { |
279 ReferenceBasedDiscovery = 0, | 273 ReferenceBasedDiscovery = 0, |
280 ReferentBasedDiscovery = 1, | 274 ReferentBasedDiscovery = 1, |
395 }; | 389 }; |
396 | 390 |
397 // A utility class to temporarily change the MT'ness of | 391 // A utility class to temporarily change the MT'ness of |
398 // reference discovery for the given ReferenceProcessor | 392 // reference discovery for the given ReferenceProcessor |
399 // in the scope that contains it. | 393 // in the scope that contains it. |
400 class ReferenceProcessorMTMutator: StackObj { | 394 class ReferenceProcessorMTDiscoveryMutator: StackObj { |
401 private: | 395 private: |
402 ReferenceProcessor* _rp; | 396 ReferenceProcessor* _rp; |
403 bool _saved_mt; | 397 bool _saved_mt; |
404 | 398 |
405 public: | 399 public: |
406 ReferenceProcessorMTMutator(ReferenceProcessor* rp, | 400 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp, |
407 bool mt): | 401 bool mt): |
408 _rp(rp) { | 402 _rp(rp) { |
409 _saved_mt = _rp->discovery_is_mt(); | 403 _saved_mt = _rp->discovery_is_mt(); |
410 _rp->set_mt_discovery(mt); | 404 _rp->set_mt_discovery(mt); |
411 } | 405 } |
412 | 406 |
413 ~ReferenceProcessorMTMutator() { | 407 ~ReferenceProcessorMTDiscoveryMutator() { |
414 _rp->set_mt_discovery(_saved_mt); | 408 _rp->set_mt_discovery(_saved_mt); |
415 } | 409 } |
416 }; | 410 }; |
417 | 411 |
418 | 412 |