comparison src/share/vm/memory/compactingPermGenGen.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children 2c106685d6d0
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_compactingPermGenGen.cpp.incl"
27
28
29 // Recursively adjust all pointers in an object and all objects by
30 // referenced it. Clear marks on objects in order to prevent visiting
31 // any object twice.
32
33 class RecursiveAdjustSharedObjectClosure : public OopClosure {
34 public:
35 void do_oop(oop* o) {
36 oop obj = *o;
37 if (obj->is_shared_readwrite()) {
38 if (obj->mark()->is_marked()) {
39 obj->init_mark(); // Don't revisit this object.
40 obj->oop_iterate(this); // Recurse - adjust objects referenced.
41 obj->adjust_pointers(); // Adjust this object's references.
42
43 // Special case: if a class has a read-only constant pool,
44 // then the read-write objects referenced by the pool must
45 // have their marks reset.
46
47 if (obj->klass() == Universe::instanceKlassKlassObj()) {
48 instanceKlass* ik = instanceKlass::cast((klassOop)obj);
49 constantPoolOop cp = ik->constants();
50 if (cp->is_shared_readonly()) {
51 cp->oop_iterate(this);
52 }
53 }
54 }
55 }
56 };
57 };
58
59
60 // We need to go through all placeholders in the system dictionary and
61 // try to resolve them into shared classes. Other threads might be in
62 // the process of loading a shared class and have strong roots on
63 // their stack to the class without having added the class to the
64 // dictionary yet. This means the class will be marked during phase 1
65 // but will not be unmarked during the application of the
66 // RecursiveAdjustSharedObjectClosure to the SystemDictionary. Note
67 // that we must not call find_shared_class with non-read-only symbols
68 // as doing so can cause hash codes to be computed, destroying
69 // forwarding pointers.
70 class TraversePlaceholdersClosure : public OopClosure {
71 public:
72 void do_oop(oop* o) {
73 oop obj = *o;
74 if (obj->klass() == Universe::symbolKlassObj() &&
75 obj->is_shared_readonly()) {
76 symbolHandle sym((symbolOop) obj);
77 oop k = SystemDictionary::find_shared_class(sym);
78 if (k != NULL) {
79 RecursiveAdjustSharedObjectClosure clo;
80 clo.do_oop(&k);
81 }
82 }
83 }
84 };
85
86
87 void CompactingPermGenGen::initialize_performance_counters() {
88
89 const char* gen_name = "perm";
90
91 // Generation Counters - generation 2, 1 subspace
92 _gen_counters = new GenerationCounters(gen_name, 2, 1, &_virtual_space);
93
94 _space_counters = new CSpaceCounters(gen_name, 0,
95 _virtual_space.reserved_size(),
96 _the_space, _gen_counters);
97 }
98
99 void CompactingPermGenGen::update_counters() {
100 if (UsePerfData) {
101 _space_counters->update_all();
102 _gen_counters->update_all();
103 }
104 }
105
106
107 CompactingPermGenGen::CompactingPermGenGen(ReservedSpace rs,
108 ReservedSpace shared_rs,
109 size_t initial_byte_size,
110 int level, GenRemSet* remset,
111 ContiguousSpace* space,
112 PermanentGenerationSpec* spec_) :
113 OneContigSpaceCardGeneration(rs, initial_byte_size, MinPermHeapExpansion,
114 level, remset, space) {
115
116 set_spec(spec_);
117 if (!UseSharedSpaces && !DumpSharedSpaces) {
118 spec()->disable_sharing();
119 }
120
121 // Break virtual space into address ranges for all spaces.
122
123 if (spec()->enable_shared_spaces()) {
124 shared_end = (HeapWord*)(shared_rs.base() + shared_rs.size());
125 misccode_end = shared_end;
126 misccode_bottom = misccode_end - heap_word_size(spec()->misc_code_size());
127 miscdata_end = misccode_bottom;
128 miscdata_bottom = miscdata_end - heap_word_size(spec()->misc_data_size());
129 readwrite_end = miscdata_bottom;
130 readwrite_bottom =
131 readwrite_end - heap_word_size(spec()->read_write_size());
132 readonly_end = readwrite_bottom;
133 readonly_bottom =
134 readonly_end - heap_word_size(spec()->read_only_size());
135 shared_bottom = readonly_bottom;
136 unshared_end = shared_bottom;
137 assert((char*)shared_bottom == shared_rs.base(), "shared space mismatch");
138 } else {
139 shared_end = (HeapWord*)(rs.base() + rs.size());
140 misccode_end = shared_end;
141 misccode_bottom = shared_end;
142 miscdata_end = shared_end;
143 miscdata_bottom = shared_end;
144 readwrite_end = shared_end;
145 readwrite_bottom = shared_end;
146 readonly_end = shared_end;
147 readonly_bottom = shared_end;
148 shared_bottom = shared_end;
149 unshared_end = shared_bottom;
150 }
151 unshared_bottom = (HeapWord*) rs.base();
152
153 // Verify shared and unshared spaces adjacent.
154 assert((char*)shared_bottom == rs.base()+rs.size(), "shared space mismatch");
155 assert(unshared_end > unshared_bottom, "shared space mismatch");
156
157 // Split reserved memory into pieces.
158
159 ReservedSpace ro_rs = shared_rs.first_part(spec()->read_only_size(),
160 UseSharedSpaces);
161 ReservedSpace tmp_rs1 = shared_rs.last_part(spec()->read_only_size());
162 ReservedSpace rw_rs = tmp_rs1.first_part(spec()->read_write_size(),
163 UseSharedSpaces);
164 ReservedSpace tmp_rs2 = tmp_rs1.last_part(spec()->read_write_size());
165 ReservedSpace md_rs = tmp_rs2.first_part(spec()->misc_data_size(),
166 UseSharedSpaces);
167 ReservedSpace mc_rs = tmp_rs2.last_part(spec()->misc_data_size());
168
169 _shared_space_size = spec()->read_only_size()
170 + spec()->read_write_size()
171 + spec()->misc_data_size()
172 + spec()->misc_code_size();
173
174 // Allocate the unshared (default) space.
175 _the_space = new ContigPermSpace(_bts,
176 MemRegion(unshared_bottom, heap_word_size(initial_byte_size)));
177 if (_the_space == NULL)
178 vm_exit_during_initialization("Could not allocate an unshared"
179 " CompactingPermGen Space");
180
181 // Allocate shared spaces
182 if (spec()->enable_shared_spaces()) {
183
184 // If mapping a shared file, the space is not committed, don't
185 // mangle.
186 NOT_PRODUCT(bool old_ZapUnusedHeapArea = ZapUnusedHeapArea;)
187 NOT_PRODUCT(if (UseSharedSpaces) ZapUnusedHeapArea = false;)
188
189 // Commit the memory behind the shared spaces if dumping (not
190 // mapping).
191 if (DumpSharedSpaces) {
192 _ro_vs.initialize(ro_rs, spec()->read_only_size());
193 _rw_vs.initialize(rw_rs, spec()->read_write_size());
194 _md_vs.initialize(md_rs, spec()->misc_data_size());
195 _mc_vs.initialize(mc_rs, spec()->misc_code_size());
196 }
197
198 // Allocate the shared spaces.
199 _ro_bts = new BlockOffsetSharedArray(
200 MemRegion(readonly_bottom,
201 heap_word_size(spec()->read_only_size())),
202 heap_word_size(spec()->read_only_size()));
203 _ro_space = new OffsetTableContigSpace(_ro_bts,
204 MemRegion(readonly_bottom, readonly_end));
205 _rw_bts = new BlockOffsetSharedArray(
206 MemRegion(readwrite_bottom,
207 heap_word_size(spec()->read_write_size())),
208 heap_word_size(spec()->read_write_size()));
209 _rw_space = new OffsetTableContigSpace(_rw_bts,
210 MemRegion(readwrite_bottom, readwrite_end));
211
212 // Restore mangling flag.
213 NOT_PRODUCT(ZapUnusedHeapArea = old_ZapUnusedHeapArea;)
214
215 if (_ro_space == NULL || _rw_space == NULL)
216 vm_exit_during_initialization("Could not allocate a shared space");
217
218 // Cover both shared spaces entirely with cards.
219 _rs->resize_covered_region(MemRegion(readonly_bottom, readwrite_end));
220
221 if (UseSharedSpaces) {
222
223 // Map in the regions in the shared file.
224 FileMapInfo* mapinfo = FileMapInfo::current_info();
225 size_t image_alignment = mapinfo->alignment();
226 CollectedHeap* ch = Universe::heap();
227 if ((!mapinfo->map_space(ro, ro_rs, _ro_space)) ||
228 (!mapinfo->map_space(rw, rw_rs, _rw_space)) ||
229 (!mapinfo->map_space(md, md_rs, NULL)) ||
230 (!mapinfo->map_space(mc, mc_rs, NULL)) ||
231 // check the alignment constraints
232 (ch == NULL || ch->kind() != CollectedHeap::GenCollectedHeap ||
233 image_alignment !=
234 ((GenCollectedHeap*)ch)->gen_policy()->max_alignment())) {
235 // Base addresses didn't match; skip sharing, but continue
236 shared_rs.release();
237 spec()->disable_sharing();
238 // If -Xshare:on is specified, print out the error message and exit VM,
239 // otherwise, set UseSharedSpaces to false and continue.
240 if (RequireSharedSpaces) {
241 vm_exit_during_initialization("Unable to use shared archive.", NULL);
242 } else {
243 FLAG_SET_DEFAULT(UseSharedSpaces, false);
244 }
245
246 // Note: freeing the block offset array objects does not
247 // currently free up the underlying storage.
248 delete _ro_bts;
249 _ro_bts = NULL;
250 delete _ro_space;
251 _ro_space = NULL;
252 delete _rw_bts;
253 _rw_bts = NULL;
254 delete _rw_space;
255 _rw_space = NULL;
256 shared_end = (HeapWord*)(rs.base() + rs.size());
257 _rs->resize_covered_region(MemRegion(shared_bottom, shared_bottom));
258 }
259 }
260
261 // Reserved region includes shared spaces for oop.is_in_reserved().
262 _reserved.set_end(shared_end);
263
264 } else {
265 _ro_space = NULL;
266 _rw_space = NULL;
267 }
268 }
269
270
271 // Do a complete scan of the shared read write space to catch all
272 // objects which contain references to any younger generation. Forward
273 // the pointers. Avoid space_iterate, as actually visiting all the
274 // objects in the space will page in more objects than we need.
275 // Instead, use the system dictionary as strong roots into the read
276 // write space.
277
278 void CompactingPermGenGen::pre_adjust_pointers() {
279 if (spec()->enable_shared_spaces()) {
280 RecursiveAdjustSharedObjectClosure blk;
281 Universe::oops_do(&blk);
282 StringTable::oops_do(&blk);
283 SystemDictionary::always_strong_classes_do(&blk);
284 TraversePlaceholdersClosure tpc;
285 SystemDictionary::placeholders_do(&tpc);
286 }
287 }
288
289
290 #ifdef ASSERT
291 class VerifyMarksClearedClosure : public ObjectClosure {
292 public:
293 void do_object(oop obj) {
294 assert(SharedSkipVerify || !obj->mark()->is_marked(),
295 "Shared oop still marked?");
296 }
297 };
298 #endif
299
300
301 void CompactingPermGenGen::post_compact() {
302 #ifdef ASSERT
303 if (!SharedSkipVerify && spec()->enable_shared_spaces()) {
304 VerifyMarksClearedClosure blk;
305 rw_space()->object_iterate(&blk);
306 }
307 #endif
308 }
309
310
311 void CompactingPermGenGen::space_iterate(SpaceClosure* blk, bool usedOnly) {
312 OneContigSpaceCardGeneration::space_iterate(blk, usedOnly);
313 if (spec()->enable_shared_spaces()) {
314 #ifdef PRODUCT
315 // Making the rw_space walkable will page in the entire space, and
316 // is to be avoided. However, this is required for Verify options.
317 ShouldNotReachHere();
318 #endif
319
320 blk->do_space(ro_space());
321 blk->do_space(rw_space());
322 }
323 }
324
325
326 void CompactingPermGenGen::print_on(outputStream* st) const {
327 OneContigSpaceCardGeneration::print_on(st);
328 if (spec()->enable_shared_spaces()) {
329 st->print(" ro");
330 ro_space()->print_on(st);
331 st->print(" rw");
332 rw_space()->print_on(st);
333 } else {
334 st->print_cr("No shared spaces configured.");
335 }
336 }
337
338
339 // References from the perm gen to the younger generation objects may
340 // occur in static fields in Java classes or in constant pool references
341 // to String objects.
342
343 void CompactingPermGenGen::younger_refs_iterate(OopsInGenClosure* blk) {
344 OneContigSpaceCardGeneration::younger_refs_iterate(blk);
345 if (spec()->enable_shared_spaces()) {
346 blk->set_generation(this);
347 // ro_space has no younger gen refs.
348 _rs->younger_refs_in_space_iterate(rw_space(), blk);
349 blk->reset_generation();
350 }
351 }
352
353
354 // Shared spaces are addressed in pre_adjust_pointers.
355 void CompactingPermGenGen::adjust_pointers() {
356 the_space()->adjust_pointers();
357 }
358
359
360 void CompactingPermGenGen::compact() {
361 the_space()->compact();
362 }
363
364
365 size_t CompactingPermGenGen::contiguous_available() const {
366 // Don't include shared spaces.
367 return OneContigSpaceCardGeneration::contiguous_available()
368 - _shared_space_size;
369 }
370
371 size_t CompactingPermGenGen::max_capacity() const {
372 // Don't include shared spaces.
373 assert(UseSharedSpaces || (_shared_space_size == 0),
374 "If not used, the size of shared spaces should be 0");
375 return OneContigSpaceCardGeneration::max_capacity()
376 - _shared_space_size;
377 }
378
379
380
381 bool CompactingPermGenGen::grow_by(size_t bytes) {
382 // Don't allow _virtual_size to expand into shared spaces.
383 size_t max_bytes = _virtual_space.uncommitted_size() - _shared_space_size;
384 if (bytes > _shared_space_size) {
385 bytes = _shared_space_size;
386 }
387 return OneContigSpaceCardGeneration::grow_by(bytes);
388 }
389
390
391 void CompactingPermGenGen::grow_to_reserved() {
392 // Don't allow _virtual_size to expand into shared spaces.
393 if (_virtual_space.uncommitted_size() > _shared_space_size) {
394 size_t remaining_bytes =
395 _virtual_space.uncommitted_size() - _shared_space_size;
396 bool success = OneContigSpaceCardGeneration::grow_by(remaining_bytes);
397 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
398 }
399 }
400
401
402 // No young generation references, clear this generation's main space's
403 // card table entries. Do NOT clear the card table entries for the
404 // read-only space (always clear) or the read-write space (valuable
405 // information).
406
407 void CompactingPermGenGen::clear_remembered_set() {
408 _rs->clear(MemRegion(the_space()->bottom(), the_space()->end()));
409 }
410
411
412 // Objects in this generation's main space may have moved, invalidate
413 // that space's cards. Do NOT invalidate the card table entries for the
414 // read-only or read-write spaces, as those objects never move.
415
416 void CompactingPermGenGen::invalidate_remembered_set() {
417 _rs->invalidate(used_region());
418 }
419
420
421 void CompactingPermGenGen::verify(bool allow_dirty) {
422 the_space()->verify(allow_dirty);
423 if (!SharedSkipVerify && spec()->enable_shared_spaces()) {
424 ro_space()->verify(allow_dirty);
425 rw_space()->verify(allow_dirty);
426 }
427 }
428
429
430 HeapWord* CompactingPermGenGen::unshared_bottom;
431 HeapWord* CompactingPermGenGen::unshared_end;
432 HeapWord* CompactingPermGenGen::shared_bottom;
433 HeapWord* CompactingPermGenGen::shared_end;
434 HeapWord* CompactingPermGenGen::readonly_bottom;
435 HeapWord* CompactingPermGenGen::readonly_end;
436 HeapWord* CompactingPermGenGen::readwrite_bottom;
437 HeapWord* CompactingPermGenGen::readwrite_end;
438 HeapWord* CompactingPermGenGen::miscdata_bottom;
439 HeapWord* CompactingPermGenGen::miscdata_end;
440 HeapWord* CompactingPermGenGen::misccode_bottom;
441 HeapWord* CompactingPermGenGen::misccode_end;
442
443 // JVM/TI RedefineClasses() support:
444 bool CompactingPermGenGen::remap_shared_readonly_as_readwrite() {
445 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
446
447 if (UseSharedSpaces) {
448 // remap the shared readonly space to shared readwrite, private
449 FileMapInfo* mapinfo = FileMapInfo::current_info();
450 if (!mapinfo->remap_shared_readonly_as_readwrite()) {
451 return false;
452 }
453 }
454 return true;
455 }
456
457 void** CompactingPermGenGen::_vtbl_list;