10246
|
1 /*
|
|
2 * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 * or visit www.oracle.com if you need additional information or have any
|
|
21 * questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
|
|
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
|
|
27
|
|
28 #include "gc_implementation/g1/g1_globals.hpp"
|
|
29 #include "gc_implementation/g1/g1CardCounts.hpp"
|
|
30 #include "memory/allocation.hpp"
|
|
31 #include "runtime/safepoint.hpp"
|
|
32 #include "runtime/thread.inline.hpp"
|
|
33 #include "utilities/globalDefinitions.hpp"
|
|
34
|
|
35 class DirtyCardQueue;
|
|
36 class G1CollectedHeap;
|
|
37 class G1RemSet;
|
|
38 class HeapRegion;
|
|
39
|
|
40 // An evicting cache of cards that have been logged by the G1 post
|
|
41 // write barrier. Placing a card in the cache delays the refinement
|
|
42 // of the card until the card is evicted, or the cache is drained
|
|
43 // during the next evacuation pause.
|
|
44 //
|
|
45 // The first thing the G1 post write barrier does is to check whether
|
|
46 // the card containing the updated pointer is already dirty and, if
|
|
47 // so, skips the remaining code in the barrier.
|
|
48 //
|
|
49 // Delaying the refinement of a card will make the card fail the
|
|
50 // first is_dirty check in the write barrier, skipping the remainder
|
|
51 // of the write barrier.
|
|
52 //
|
|
53 // This can significantly reduce the overhead of the write barrier
|
|
54 // code, increasing throughput.
|
|
55
|
|
56 class G1HotCardCache: public CHeapObj<mtGC> {
|
|
57 G1CollectedHeap* _g1h;
|
|
58
|
|
59 // The card cache table
|
|
60 jbyte** _hot_cache;
|
|
61
|
|
62 int _hot_cache_size;
|
|
63 int _n_hot;
|
|
64 int _hot_cache_idx;
|
|
65
|
|
66 int _hot_cache_par_chunk_size;
|
|
67 volatile int _hot_cache_par_claimed_idx;
|
|
68
|
|
69 bool _use_cache;
|
|
70
|
|
71 G1CardCounts _card_counts;
|
|
72
|
|
73 bool default_use_cache() const {
|
|
74 return (G1ConcRSLogCacheSize > 0);
|
|
75 }
|
|
76
|
|
77 public:
|
|
78 G1HotCardCache(G1CollectedHeap* g1h);
|
|
79 ~G1HotCardCache();
|
|
80
|
|
81 void initialize();
|
|
82
|
|
83 bool use_cache() { return _use_cache; }
|
|
84
|
|
85 void set_use_cache(bool b) {
|
|
86 _use_cache = (b ? default_use_cache() : false);
|
|
87 }
|
|
88
|
|
89 // Returns the card to be refined or NULL.
|
|
90 //
|
|
91 // Increments the count for given the card. if the card is not 'hot',
|
|
92 // it is returned for immediate refining. Otherwise the card is
|
|
93 // added to the hot card cache.
|
|
94 // If there is enough room in the hot card cache for the card we're
|
|
95 // adding, NULL is returned and no further action in needed.
|
|
96 // If we evict a card from the cache to make room for the new card,
|
|
97 // the evicted card is then returned for refinement.
|
|
98 jbyte* insert(jbyte* card_ptr);
|
|
99
|
|
100 // Refine the cards that have delayed as a result of
|
|
101 // being in the cache.
|
|
102 void drain(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
|
|
103
|
|
104 // Set up for parallel processing of the cards in the hot cache
|
|
105 void reset_hot_cache_claimed_index() {
|
|
106 _hot_cache_par_claimed_idx = 0;
|
|
107 }
|
|
108
|
|
109 // Resets the hot card cache and discards the entries.
|
|
110 void reset_hot_cache() {
|
|
111 assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
|
|
112 assert(Thread::current()->is_VM_thread(), "Current thread should be the VMthread");
|
|
113 _hot_cache_idx = 0; _n_hot = 0;
|
|
114 }
|
|
115
|
|
116 bool hot_cache_is_empty() { return _n_hot == 0; }
|
|
117
|
|
118 // Resizes the card counts table to match the given capacity
|
|
119 void resize_card_counts(size_t heap_capacity);
|
|
120
|
|
121 // Zeros the values in the card counts table for entire committed heap
|
|
122 void reset_card_counts();
|
|
123
|
|
124 // Zeros the values in the card counts table for the given region
|
|
125 void reset_card_counts(HeapRegion* hr);
|
|
126 };
|
|
127
|
|
128 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
|