Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/heapRegionSeq.cpp @ 1886:72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
Summary: There is a race between the concurrent refinement threads and the humongous object allocation that can cause the concurrent refinement threads to corrupt the part of the BOT that it is being initialized by the humongous object allocation operation. The solution is to do the humongous object allocation in careful steps to ensure that the concurrent refinement threads always have a consistent view over the BOT, region contents, and top. The fix includes some very minor tidying up in sparsePRT.
Reviewed-by: jcoomes, johnc, ysr
author | tonyp |
---|---|
date | Sat, 16 Oct 2010 17:12:19 -0400 |
parents | 2dfd013a7465 |
children | f95d63e2154a |
rev | line source |
---|---|
342 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
960
diff
changeset
|
2 * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
960
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
960
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
960
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_heapRegionSeq.cpp.incl" | |
27 | |
28 // Local to this file. | |
29 | |
30 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) { | |
31 if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1; | |
32 else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1; | |
33 else if (*hr1p == *hr2p) return 0; | |
34 else { | |
35 assert(false, "We should never compare distinct overlapping regions."); | |
36 } | |
37 return 0; | |
38 } | |
39 | |
393 | 40 HeapRegionSeq::HeapRegionSeq(const size_t max_size) : |
342 | 41 _alloc_search_start(0), |
42 // The line below is the worst bit of C++ hackery I've ever written | |
43 // (Detlefs, 11/23). You should think of it as equivalent to | |
44 // "_regions(100, true)": initialize the growable array and inform it | |
1688
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
45 // that it should allocate its elem array(s) on the C heap. |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
46 // |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
47 // The first argument, however, is actually a comma expression |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
48 // (set_allocation_type(this, C_HEAP), 100). The purpose of the |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
49 // set_allocation_type() call is to replace the default allocation |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
50 // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
51 // allow to pass the assert in GenericGrowableArray() which checks |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
52 // that a growable array object must be on C heap if elements are. |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
53 // |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
54 // Note: containing object is allocated on C heap since it is CHeapObj. |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
55 // |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
56 _regions((ResourceObj::set_allocation_type((address)&_regions, |
2dfd013a7465
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
1552
diff
changeset
|
57 ResourceObj::C_HEAP), |
393 | 58 (int)max_size), |
342 | 59 true), |
60 _next_rr_candidate(0), | |
61 _seq_bottom(NULL) | |
62 {} | |
63 | |
64 // Private methods. | |
65 | |
66 HeapWord* | |
67 HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) { | |
68 assert(G1CollectedHeap::isHumongous(word_size), | |
69 "Allocation size should be humongous"); | |
70 int cur = ind; | |
71 int first = cur; | |
72 size_t sumSizes = 0; | |
73 while (cur < _regions.length() && sumSizes < word_size) { | |
74 // Loop invariant: | |
75 // For all i in [first, cur): | |
76 // _regions.at(i)->is_empty() | |
77 // && _regions.at(i) is contiguous with its predecessor, if any | |
78 // && sumSizes is the sum of the sizes of the regions in the interval | |
79 // [first, cur) | |
80 HeapRegion* curhr = _regions.at(cur); | |
81 if (curhr->is_empty() | |
82 && (first == cur | |
83 || (_regions.at(cur-1)->end() == | |
84 curhr->bottom()))) { | |
85 sumSizes += curhr->capacity() / HeapWordSize; | |
86 } else { | |
87 first = cur + 1; | |
88 sumSizes = 0; | |
89 } | |
90 cur++; | |
91 } | |
92 if (sumSizes >= word_size) { | |
93 _alloc_search_start = cur; | |
1886
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
94 |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
95 // We need to initialize the region(s) we just discovered. This is |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
96 // a bit tricky given that it can happen concurrently with |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
97 // refinement threads refining cards on these regions and |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
98 // potentially wanting to refine the BOT as they are scanning |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
99 // those cards (this can happen shortly after a cleanup; see CR |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
100 // 6991377). So we have to set up the region(s) carefully and in |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
101 // a specific order. |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
102 |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
103 // Currently, allocs_are_zero_filled() returns false. The zero |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
104 // filling infrastructure will be going away soon (see CR 6977804). |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
105 // So no need to do anything else here. |
342 | 106 bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled(); |
1886
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
107 assert(!zf, "not supported"); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
108 |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
109 // This will be the "starts humongous" region. |
342 | 110 HeapRegion* first_hr = _regions.at(first); |
1886
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
111 { |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
112 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
113 first_hr->set_zero_fill_allocated(); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
114 } |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
115 // The header of the new object will be placed at the bottom of |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
116 // the first region. |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
117 HeapWord* new_obj = first_hr->bottom(); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
118 // This will be the new end of the first region in the series that |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
119 // should also match the end of the last region in the seriers. |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
120 // (Note: sumSizes = "region size" x "number of regions we found"). |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
121 HeapWord* new_end = new_obj + sumSizes; |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
122 // This will be the new top of the first region that will reflect |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
123 // this allocation. |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
124 HeapWord* new_top = new_obj + word_size; |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
125 |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
126 // First, we need to zero the header of the space that we will be |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
127 // allocating. When we update top further down, some refinement |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
128 // threads might try to scan the region. By zeroing the header we |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
129 // ensure that any thread that will try to scan the region will |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
130 // come across the zero klass word and bail out. |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
131 // |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
132 // NOTE: It would not have been correct to have used |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
133 // CollectedHeap::fill_with_object() and make the space look like |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
134 // an int array. The thread that is doing the allocation will |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
135 // later update the object header to a potentially different array |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
136 // type and, for a very short period of time, the klass and length |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
137 // fields will be inconsistent. This could cause a refinement |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
138 // thread to calculate the object size incorrectly. |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
139 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
140 |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
141 // We will set up the first region as "starts humongous". This |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
142 // will also update the BOT covering all the regions to reflect |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
143 // that there is a single object that starts at the bottom of the |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
144 // first region. |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
145 first_hr->set_startsHumongous(new_end); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
146 |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
147 // Then, if there are any, we will set up the "continues |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
148 // humongous" regions. |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
149 HeapRegion* hr = NULL; |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
150 for (int i = first + 1; i < cur; ++i) { |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
151 hr = _regions.at(i); |
342 | 152 { |
153 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
154 hr->set_zero_fill_allocated(); | |
155 } | |
1886
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
156 hr->set_continuesHumongous(first_hr); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
157 } |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
158 // If we have "continues humongous" regions (hr != NULL), then the |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
159 // end of the last one should match new_end. |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
160 assert(hr == NULL || hr->end() == new_end, "sanity"); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
161 |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
162 // Up to this point no concurrent thread would have been able to |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
163 // do any scanning on any region in this series. All the top |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
164 // fields still point to bottom, so the intersection between |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
165 // [bottom,top] and [card_start,card_end] will be empty. Before we |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
166 // update the top fields, we'll do a storestore to make sure that |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
167 // no thread sees the update to top before the zeroing of the |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
168 // object header and the BOT initialization. |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
169 OrderAccess::storestore(); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
170 |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
171 // Now that the BOT and the object header have been initialized, |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
172 // we can update top of the "starts humongous" region. |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
173 assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
174 "new_top should be in this region"); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
175 first_hr->set_top(new_top); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
176 |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
177 // Now, we will update the top fields of the "continues humongous" |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
178 // regions. The reason we need to do this is that, otherwise, |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
179 // these regions would look empty and this will confuse parts of |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
180 // G1. For example, the code that looks for a consecutive number |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
181 // of empty regions will consider them empty and try to |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
182 // re-allocate them. We can extend is_empty() to also include |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
183 // !continuesHumongous(), but it is easier to just update the top |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
184 // fields here. |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
185 hr = NULL; |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
186 for (int i = first + 1; i < cur; ++i) { |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
187 hr = _regions.at(i); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
188 if ((i + 1) == cur) { |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
189 // last continues humongous region |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
190 assert(hr->bottom() < new_top && new_top <= hr->end(), |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
191 "new_top should fall on this region"); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
192 hr->set_top(new_top); |
342 | 193 } else { |
1886
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
194 // not last one |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
195 assert(new_top > hr->end(), "new_top should be above this region"); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
196 hr->set_top(hr->end()); |
342 | 197 } |
198 } | |
1886
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
199 // If we have continues humongous regions (hr != NULL), then the |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
200 // end of the last one should match new_end and its top should |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
201 // match new_top. |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
202 assert(hr == NULL || |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
203 (hr->end() == new_end && hr->top() == new_top), "sanity"); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
204 |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1688
diff
changeset
|
205 return new_obj; |
342 | 206 } else { |
207 // If we started from the beginning, we want to know why we can't alloc. | |
208 return NULL; | |
209 } | |
210 } | |
211 | |
677 | 212 void HeapRegionSeq::print_empty_runs() { |
342 | 213 int empty_run = 0; |
214 int n_empty = 0; | |
215 int empty_run_start; | |
216 for (int i = 0; i < _regions.length(); i++) { | |
217 HeapRegion* r = _regions.at(i); | |
218 if (r->continuesHumongous()) continue; | |
677 | 219 if (r->is_empty()) { |
342 | 220 assert(!r->isHumongous(), "H regions should not be empty."); |
221 if (empty_run == 0) empty_run_start = i; | |
222 empty_run++; | |
223 n_empty++; | |
224 } else { | |
225 if (empty_run > 0) { | |
226 gclog_or_tty->print(" %d:%d", empty_run_start, empty_run); | |
227 empty_run = 0; | |
228 } | |
229 } | |
230 } | |
231 if (empty_run > 0) { | |
232 gclog_or_tty->print(" %d:%d", empty_run_start, empty_run); | |
233 } | |
234 gclog_or_tty->print_cr(" [tot = %d]", n_empty); | |
235 } | |
236 | |
237 int HeapRegionSeq::find(HeapRegion* hr) { | |
238 // FIXME: optimized for adjacent regions of fixed size. | |
239 int ind = hr->hrs_index(); | |
240 if (ind != -1) { | |
241 assert(_regions.at(ind) == hr, "Mismatch"); | |
242 } | |
243 return ind; | |
244 } | |
245 | |
246 | |
247 // Public methods. | |
248 | |
249 void HeapRegionSeq::insert(HeapRegion* hr) { | |
393 | 250 assert(!_regions.is_full(), "Too many elements in HeapRegionSeq"); |
342 | 251 if (_regions.length() == 0 |
252 || _regions.top()->end() <= hr->bottom()) { | |
253 hr->set_hrs_index(_regions.length()); | |
254 _regions.append(hr); | |
255 } else { | |
256 _regions.append(hr); | |
257 _regions.sort(orderRegions); | |
258 for (int i = 0; i < _regions.length(); i++) { | |
259 _regions.at(i)->set_hrs_index(i); | |
260 } | |
261 } | |
262 char* bot = (char*)_regions.at(0)->bottom(); | |
263 if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot; | |
264 } | |
265 | |
266 size_t HeapRegionSeq::length() { | |
267 return _regions.length(); | |
268 } | |
269 | |
270 size_t HeapRegionSeq::free_suffix() { | |
271 size_t res = 0; | |
272 int first = _regions.length() - 1; | |
273 int cur = first; | |
274 while (cur >= 0 && | |
275 (_regions.at(cur)->is_empty() | |
276 && (first == cur | |
277 || (_regions.at(cur+1)->bottom() == | |
278 _regions.at(cur)->end())))) { | |
279 res++; | |
280 cur--; | |
281 } | |
282 return res; | |
283 } | |
284 | |
285 HeapWord* HeapRegionSeq::obj_allocate(size_t word_size) { | |
286 int cur = _alloc_search_start; | |
287 // Make sure "cur" is a valid index. | |
288 assert(cur >= 0, "Invariant."); | |
289 HeapWord* res = alloc_obj_from_region_index(cur, word_size); | |
290 if (res == NULL) | |
291 res = alloc_obj_from_region_index(0, word_size); | |
292 return res; | |
293 } | |
294 | |
295 void HeapRegionSeq::iterate(HeapRegionClosure* blk) { | |
296 iterate_from((HeapRegion*)NULL, blk); | |
297 } | |
298 | |
299 // The first argument r is the heap region at which iteration begins. | |
300 // This operation runs fastest when r is NULL, or the heap region for | |
301 // which a HeapRegionClosure most recently returned true, or the | |
302 // heap region immediately to its right in the sequence. In all | |
303 // other cases a linear search is required to find the index of r. | |
304 | |
305 void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) { | |
306 | |
307 // :::: FIXME :::: | |
308 // Static cache value is bad, especially when we start doing parallel | |
309 // remembered set update. For now just don't cache anything (the | |
310 // code in the def'd out blocks). | |
311 | |
312 #if 0 | |
313 static int cached_j = 0; | |
314 #endif | |
315 int len = _regions.length(); | |
316 int j = 0; | |
317 // Find the index of r. | |
318 if (r != NULL) { | |
319 #if 0 | |
320 assert(cached_j >= 0, "Invariant."); | |
321 if ((cached_j < len) && (r == _regions.at(cached_j))) { | |
322 j = cached_j; | |
323 } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) { | |
324 j = cached_j + 1; | |
325 } else { | |
326 j = find(r); | |
327 #endif | |
328 if (j < 0) { | |
329 j = 0; | |
330 } | |
331 #if 0 | |
332 } | |
333 #endif | |
334 } | |
335 int i; | |
336 for (i = j; i < len; i += 1) { | |
337 int res = blk->doHeapRegion(_regions.at(i)); | |
338 if (res) { | |
339 #if 0 | |
340 cached_j = i; | |
341 #endif | |
342 blk->incomplete(); | |
343 return; | |
344 } | |
345 } | |
346 for (i = 0; i < j; i += 1) { | |
347 int res = blk->doHeapRegion(_regions.at(i)); | |
348 if (res) { | |
349 #if 0 | |
350 cached_j = i; | |
351 #endif | |
352 blk->incomplete(); | |
353 return; | |
354 } | |
355 } | |
356 } | |
357 | |
358 void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) { | |
359 int len = _regions.length(); | |
360 int i; | |
361 for (i = idx; i < len; i++) { | |
362 if (blk->doHeapRegion(_regions.at(i))) { | |
363 blk->incomplete(); | |
364 return; | |
365 } | |
366 } | |
367 for (i = 0; i < idx; i++) { | |
368 if (blk->doHeapRegion(_regions.at(i))) { | |
369 blk->incomplete(); | |
370 return; | |
371 } | |
372 } | |
373 } | |
374 | |
375 MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes, | |
376 size_t& num_regions_deleted) { | |
377 assert(shrink_bytes % os::vm_page_size() == 0, "unaligned"); | |
378 assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned"); | |
379 | |
380 if (_regions.length() == 0) { | |
381 num_regions_deleted = 0; | |
382 return MemRegion(); | |
383 } | |
384 int j = _regions.length() - 1; | |
385 HeapWord* end = _regions.at(j)->end(); | |
386 HeapWord* last_start = end; | |
387 while (j >= 0 && shrink_bytes > 0) { | |
388 HeapRegion* cur = _regions.at(j); | |
389 // We have to leave humongous regions where they are, | |
390 // and work around them. | |
391 if (cur->isHumongous()) { | |
392 return MemRegion(last_start, end); | |
393 } | |
394 assert(cur == _regions.top(), "Should be top"); | |
395 if (!cur->is_empty()) break; | |
960
eb058abd0562
6861557: G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or ...")
ysr
parents:
677
diff
changeset
|
396 cur->reset_zero_fill(); |
342 | 397 shrink_bytes -= cur->capacity(); |
398 num_regions_deleted++; | |
399 _regions.pop(); | |
400 last_start = cur->bottom(); | |
401 // We need to delete these somehow, but can't currently do so here: if | |
402 // we do, the ZF thread may still access the deleted region. We'll | |
403 // leave this here as a reminder that we have to do something about | |
404 // this. | |
405 // delete cur; | |
406 j--; | |
407 } | |
408 return MemRegion(last_start, end); | |
409 } | |
410 | |
411 | |
412 class PrintHeapRegionClosure : public HeapRegionClosure { | |
413 public: | |
414 bool doHeapRegion(HeapRegion* r) { | |
415 gclog_or_tty->print(PTR_FORMAT ":", r); | |
416 r->print(); | |
417 return false; | |
418 } | |
419 }; | |
420 | |
421 void HeapRegionSeq::print() { | |
422 PrintHeapRegionClosure cl; | |
423 iterate(&cl); | |
424 } |