Mercurial > hg > graal-compiler
annotate src/share/vm/memory/dump.cpp @ 452:00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
Summary: When we encounter marking stack overflow during precleaning of Reference lists, we were using the overflow list mechanism, which can cause problems on account of mutating the mark word of the header because of conflicts with mutator accesses and updates of that field. Instead we should use the usual mechanism for overflow handling in concurrent phases, namely dirtying of the card on which the overflowed object lies. Since precleaning effectively does a form of discovered list processing, albeit with discovery enabled, we needed to adjust some code to be correct in the face of interleaved processing and discovery.
Reviewed-by: apetrusenko, jcoomes
author | ysr |
---|---|
date | Thu, 20 Nov 2008 12:27:41 -0800 |
parents | 52e32c8b317e |
children | e5b0439ef4ae |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_dump.cpp.incl" | |
27 | |
28 | |
29 // Closure to set up the fingerprint field for all methods. | |
30 | |
31 class FingerprintMethodsClosure: public ObjectClosure { | |
32 public: | |
33 void do_object(oop obj) { | |
34 if (obj->is_method()) { | |
35 methodOop mobj = (methodOop)obj; | |
36 ResourceMark rm; | |
37 (new Fingerprinter(mobj))->fingerprint(); | |
38 } | |
39 } | |
40 }; | |
41 | |
42 | |
43 | |
44 // Closure to set the hash value (String.hash field) in all of the | |
45 // String objects in the heap. Setting the hash value is not required. | |
46 // However, setting the value in advance prevents the value from being | |
47 // written later, increasing the likelihood that the shared page contain | |
48 // the hash can be shared. | |
49 // | |
50 // NOTE THAT the algorithm in StringTable::hash_string() MUST MATCH the | |
51 // algorithm in java.lang.String.hashCode(). | |
52 | |
53 class StringHashCodeClosure: public OopClosure { | |
54 private: | |
55 Thread* THREAD; | |
56 int hash_offset; | |
57 public: | |
58 StringHashCodeClosure(Thread* t) { | |
59 THREAD = t; | |
60 hash_offset = java_lang_String::hash_offset_in_bytes(); | |
61 } | |
62 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
63 void do_oop(oop* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
64 if (p != NULL) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
65 oop obj = *p; |
0 | 66 if (obj->klass() == SystemDictionary::string_klass()) { |
67 | |
68 int hash; | |
69 typeArrayOop value = java_lang_String::value(obj); | |
70 int length = java_lang_String::length(obj); | |
71 if (length == 0) { | |
72 hash = 0; | |
73 } else { | |
74 int offset = java_lang_String::offset(obj); | |
75 jchar* s = value->char_at_addr(offset); | |
76 hash = StringTable::hash_string(s, length); | |
77 } | |
78 obj->int_field_put(hash_offset, hash); | |
79 } | |
80 } | |
81 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
82 void do_oop(narrowOop* p) { ShouldNotReachHere(); } |
0 | 83 }; |
84 | |
85 | |
86 // Remove data from objects which should not appear in the shared file | |
87 // (as it pertains only to the current JVM). | |
88 | |
89 class RemoveUnshareableInfoClosure : public ObjectClosure { | |
90 public: | |
91 void do_object(oop obj) { | |
92 // Zap data from the objects which is pertains only to this JVM. We | |
93 // want that data recreated in new JVMs when the shared file is used. | |
94 if (obj->is_method()) { | |
95 ((methodOop)obj)->remove_unshareable_info(); | |
96 } | |
97 else if (obj->is_klass()) { | |
98 Klass::cast((klassOop)obj)->remove_unshareable_info(); | |
99 } | |
100 | |
101 // Don't save compiler related special oops (shouldn't be any yet). | |
102 if (obj->is_methodData() || obj->is_compiledICHolder()) { | |
103 ShouldNotReachHere(); | |
104 } | |
105 } | |
106 }; | |
107 | |
108 | |
109 static bool mark_object(oop obj) { | |
110 if (obj != NULL && | |
111 !obj->is_shared() && | |
112 !obj->is_forwarded() && | |
113 !obj->is_gc_marked()) { | |
114 obj->set_mark(markOopDesc::prototype()->set_marked()); | |
115 return true; | |
116 } | |
117 | |
118 return false; | |
119 } | |
120 | |
121 // Closure: mark objects closure. | |
122 | |
123 class MarkObjectsOopClosure : public OopClosure { | |
124 public: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
125 void do_oop(oop* p) { mark_object(*p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
126 void do_oop(narrowOop* p) { ShouldNotReachHere(); } |
0 | 127 }; |
128 | |
129 | |
130 class MarkObjectsSkippingKlassesOopClosure : public OopClosure { | |
131 public: | |
132 void do_oop(oop* pobj) { | |
133 oop obj = *pobj; | |
134 if (obj != NULL && | |
135 !obj->is_klass()) { | |
136 mark_object(obj); | |
137 } | |
138 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
139 void do_oop(narrowOop* pobj) { ShouldNotReachHere(); } |
0 | 140 }; |
141 | |
142 | |
143 static void mark_object_recursive_skipping_klasses(oop obj) { | |
144 mark_object(obj); | |
145 if (obj != NULL) { | |
146 MarkObjectsSkippingKlassesOopClosure mark_all; | |
147 obj->oop_iterate(&mark_all); | |
148 } | |
149 } | |
150 | |
151 | |
152 // Closure: mark common read-only objects, excluding symbols | |
153 | |
154 class MarkCommonReadOnly : public ObjectClosure { | |
155 private: | |
156 MarkObjectsOopClosure mark_all; | |
157 public: | |
158 void do_object(oop obj) { | |
159 | |
160 // Mark all constMethod objects. | |
161 | |
162 if (obj->is_constMethod()) { | |
163 mark_object(obj); | |
164 mark_object(constMethodOop(obj)->stackmap_data()); | |
165 // Exception tables are needed by ci code during compilation. | |
166 mark_object(constMethodOop(obj)->exception_table()); | |
167 } | |
168 | |
169 // Mark objects referenced by klass objects which are read-only. | |
170 | |
171 else if (obj->is_klass()) { | |
172 Klass* k = Klass::cast((klassOop)obj); | |
173 mark_object(k->secondary_supers()); | |
174 | |
175 // The METHODS() OBJARRAYS CANNOT BE MADE READ-ONLY, even though | |
176 // it is never modified. Otherwise, they will be pre-marked; the | |
177 // GC marking phase will skip them; and by skipping them will fail | |
178 // to mark the methods objects referenced by the array. | |
179 | |
180 if (obj->blueprint()->oop_is_instanceKlass()) { | |
181 instanceKlass* ik = instanceKlass::cast((klassOop)obj); | |
182 mark_object(ik->method_ordering()); | |
183 mark_object(ik->local_interfaces()); | |
184 mark_object(ik->transitive_interfaces()); | |
185 mark_object(ik->fields()); | |
186 | |
187 mark_object(ik->class_annotations()); | |
188 | |
189 mark_object_recursive_skipping_klasses(ik->fields_annotations()); | |
190 mark_object_recursive_skipping_klasses(ik->methods_annotations()); | |
191 mark_object_recursive_skipping_klasses(ik->methods_parameter_annotations()); | |
192 mark_object_recursive_skipping_klasses(ik->methods_default_annotations()); | |
193 | |
194 typeArrayOop inner_classes = ik->inner_classes(); | |
195 if (inner_classes != NULL) { | |
196 mark_object(inner_classes); | |
197 } | |
198 } | |
199 } | |
200 } | |
201 }; | |
202 | |
203 | |
204 // Closure: mark common symbols | |
205 | |
206 class MarkCommonSymbols : public ObjectClosure { | |
207 private: | |
208 MarkObjectsOopClosure mark_all; | |
209 public: | |
210 void do_object(oop obj) { | |
211 | |
212 // Mark symbols refered to by method objects. | |
213 | |
214 if (obj->is_method()) { | |
215 methodOop m = methodOop(obj); | |
216 mark_object(m->name()); | |
217 mark_object(m->signature()); | |
218 } | |
219 | |
220 // Mark symbols referenced by klass objects which are read-only. | |
221 | |
222 else if (obj->is_klass()) { | |
223 | |
224 if (obj->blueprint()->oop_is_instanceKlass()) { | |
225 instanceKlass* ik = instanceKlass::cast((klassOop)obj); | |
226 mark_object(ik->name()); | |
227 mark_object(ik->generic_signature()); | |
228 mark_object(ik->source_file_name()); | |
229 mark_object(ik->source_debug_extension()); | |
230 | |
231 typeArrayOop inner_classes = ik->inner_classes(); | |
232 if (inner_classes != NULL) { | |
233 int length = inner_classes->length(); | |
234 for (int i = 0; | |
235 i < length; | |
236 i += instanceKlass::inner_class_next_offset) { | |
237 int ioff = i + instanceKlass::inner_class_inner_name_offset; | |
238 int index = inner_classes->ushort_at(ioff); | |
239 if (index != 0) { | |
240 mark_object(ik->constants()->symbol_at(index)); | |
241 } | |
242 } | |
243 } | |
244 ik->field_names_and_sigs_iterate(&mark_all); | |
245 } | |
246 } | |
247 | |
248 // Mark symbols referenced by other constantpool entries. | |
249 | |
250 if (obj->is_constantPool()) { | |
251 constantPoolOop(obj)->shared_symbols_iterate(&mark_all); | |
252 } | |
253 } | |
254 }; | |
255 | |
256 | |
257 // Closure: mark char arrays used by strings | |
258 | |
259 class MarkStringValues : public ObjectClosure { | |
260 private: | |
261 MarkObjectsOopClosure mark_all; | |
262 public: | |
263 void do_object(oop obj) { | |
264 | |
265 // Character arrays referenced by String objects are read-only. | |
266 | |
267 if (java_lang_String::is_instance(obj)) { | |
268 mark_object(java_lang_String::value(obj)); | |
269 } | |
270 } | |
271 }; | |
272 | |
273 | |
274 #ifdef DEBUG | |
275 // Closure: Check for objects left in the heap which have not been moved. | |
276 | |
277 class CheckRemainingObjects : public ObjectClosure { | |
278 private: | |
279 int count; | |
280 | |
281 public: | |
282 CheckRemainingObjects() { | |
283 count = 0; | |
284 } | |
285 | |
286 void do_object(oop obj) { | |
287 if (!obj->is_shared() && | |
288 !obj->is_forwarded()) { | |
289 ++count; | |
290 if (Verbose) { | |
291 tty->print("Unreferenced object: "); | |
292 obj->print_on(tty); | |
293 } | |
294 } | |
295 } | |
296 | |
297 void status() { | |
298 tty->print_cr("%d objects no longer referenced, not shared.", count); | |
299 } | |
300 }; | |
301 #endif | |
302 | |
303 | |
304 // Closure: Mark remaining objects read-write, except Strings. | |
305 | |
306 class MarkReadWriteObjects : public ObjectClosure { | |
307 private: | |
308 MarkObjectsOopClosure mark_objects; | |
309 public: | |
310 void do_object(oop obj) { | |
311 | |
312 // The METHODS() OBJARRAYS CANNOT BE MADE READ-ONLY, even though | |
313 // it is never modified. Otherwise, they will be pre-marked; the | |
314 // GC marking phase will skip them; and by skipping them will fail | |
315 // to mark the methods objects referenced by the array. | |
316 | |
317 if (obj->is_klass()) { | |
318 mark_object(obj); | |
319 Klass* k = klassOop(obj)->klass_part(); | |
320 mark_object(k->java_mirror()); | |
321 if (obj->blueprint()->oop_is_instanceKlass()) { | |
322 instanceKlass* ik = (instanceKlass*)k; | |
323 mark_object(ik->methods()); | |
324 mark_object(ik->constants()); | |
325 } | |
326 if (obj->blueprint()->oop_is_javaArray()) { | |
327 arrayKlass* ak = (arrayKlass*)k; | |
328 mark_object(ak->component_mirror()); | |
329 } | |
330 return; | |
331 } | |
332 | |
333 // Mark constantPool tags and the constantPoolCache. | |
334 | |
335 else if (obj->is_constantPool()) { | |
336 constantPoolOop pool = constantPoolOop(obj); | |
337 mark_object(pool->cache()); | |
338 pool->shared_tags_iterate(&mark_objects); | |
339 return; | |
340 } | |
341 | |
342 // Mark all method objects. | |
343 | |
344 if (obj->is_method()) { | |
345 mark_object(obj); | |
346 } | |
347 } | |
348 }; | |
349 | |
350 | |
351 // Closure: Mark String objects read-write. | |
352 | |
353 class MarkStringObjects : public ObjectClosure { | |
354 private: | |
355 MarkObjectsOopClosure mark_objects; | |
356 public: | |
357 void do_object(oop obj) { | |
358 | |
359 // Mark String objects referenced by constant pool entries. | |
360 | |
361 if (obj->is_constantPool()) { | |
362 constantPoolOop pool = constantPoolOop(obj); | |
363 pool->shared_strings_iterate(&mark_objects); | |
364 return; | |
365 } | |
366 } | |
367 }; | |
368 | |
369 | |
370 // Move objects matching specified type (ie. lock_bits) to the specified | |
371 // space. | |
372 | |
373 class MoveMarkedObjects : public ObjectClosure { | |
374 private: | |
375 OffsetTableContigSpace* _space; | |
376 bool _read_only; | |
377 | |
378 public: | |
379 MoveMarkedObjects(OffsetTableContigSpace* space, bool read_only) { | |
380 _space = space; | |
381 _read_only = read_only; | |
382 } | |
383 | |
384 void do_object(oop obj) { | |
385 if (obj->is_shared()) { | |
386 return; | |
387 } | |
388 if (obj->is_gc_marked() && obj->forwardee() == NULL) { | |
389 int s = obj->size(); | |
390 oop sh_obj = (oop)_space->allocate(s); | |
391 if (sh_obj == NULL) { | |
392 if (_read_only) { | |
393 warning("\nThe permanent generation read only space is not large " | |
394 "enough to \npreload requested classes. Use " | |
395 "-XX:SharedReadOnlySize= to increase \nthe initial " | |
396 "size of the read only space.\n"); | |
397 } else { | |
398 warning("\nThe permanent generation read write space is not large " | |
399 "enough to \npreload requested classes. Use " | |
400 "-XX:SharedReadWriteSize= to increase \nthe initial " | |
401 "size of the read write space.\n"); | |
402 } | |
403 exit(2); | |
404 } | |
405 if (PrintSharedSpaces && Verbose && WizardMode) { | |
406 tty->print_cr("\nMoveMarkedObjects: " PTR_FORMAT " -> " PTR_FORMAT " %s", obj, sh_obj, | |
407 (_read_only ? "ro" : "rw")); | |
408 } | |
409 Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)sh_obj, s); | |
410 obj->forward_to(sh_obj); | |
411 if (_read_only) { | |
412 // Readonly objects: set hash value to self pointer and make gc_marked. | |
413 sh_obj->forward_to(sh_obj); | |
414 } else { | |
415 sh_obj->init_mark(); | |
416 } | |
417 } | |
418 } | |
419 }; | |
420 | |
421 static void mark_and_move(oop obj, MoveMarkedObjects* move) { | |
422 if (mark_object(obj)) move->do_object(obj); | |
423 } | |
424 | |
425 enum order_policy { | |
426 OP_favor_startup = 0, | |
427 OP_balanced = 1, | |
428 OP_favor_runtime = 2 | |
429 }; | |
430 | |
431 static void mark_and_move_for_policy(order_policy policy, oop obj, MoveMarkedObjects* move) { | |
432 if (SharedOptimizeColdStartPolicy >= policy) mark_and_move(obj, move); | |
433 } | |
434 | |
435 class MarkAndMoveOrderedReadOnly : public ObjectClosure { | |
436 private: | |
437 MoveMarkedObjects *_move_ro; | |
438 | |
439 public: | |
440 MarkAndMoveOrderedReadOnly(MoveMarkedObjects *move_ro) : _move_ro(move_ro) {} | |
441 | |
442 void do_object(oop obj) { | |
443 if (obj->is_klass() && obj->blueprint()->oop_is_instanceKlass()) { | |
444 instanceKlass* ik = instanceKlass::cast((klassOop)obj); | |
445 int i; | |
446 | |
447 mark_and_move_for_policy(OP_favor_startup, ik->name(), _move_ro); | |
448 | |
449 if (ik->super() != NULL) { | |
450 do_object(ik->super()); | |
451 } | |
452 | |
453 objArrayOop interfaces = ik->local_interfaces(); | |
454 mark_and_move_for_policy(OP_favor_startup, interfaces, _move_ro); | |
455 for(i = 0; i < interfaces->length(); i++) { | |
456 klassOop k = klassOop(interfaces->obj_at(i)); | |
457 mark_and_move_for_policy(OP_favor_startup, k->klass_part()->name(), _move_ro); | |
458 do_object(k); | |
459 } | |
460 | |
461 objArrayOop methods = ik->methods(); | |
462 for(i = 0; i < methods->length(); i++) { | |
463 methodOop m = methodOop(methods->obj_at(i)); | |
464 mark_and_move_for_policy(OP_favor_startup, m->constMethod(), _move_ro); | |
465 mark_and_move_for_policy(OP_favor_runtime, m->constMethod()->exception_table(), _move_ro); | |
466 mark_and_move_for_policy(OP_favor_runtime, m->constMethod()->stackmap_data(), _move_ro); | |
467 | |
468 // We don't move the name symbolOop here because it may invalidate | |
469 // method ordering, which is dependent on the address of the name | |
470 // symbolOop. It will get promoted later with the other symbols. | |
471 // Method name is rarely accessed during classloading anyway. | |
472 // mark_and_move_for_policy(OP_balanced, m->name(), _move_ro); | |
473 | |
474 mark_and_move_for_policy(OP_favor_startup, m->signature(), _move_ro); | |
475 } | |
476 | |
477 mark_and_move_for_policy(OP_favor_startup, ik->transitive_interfaces(), _move_ro); | |
478 mark_and_move_for_policy(OP_favor_startup, ik->fields(), _move_ro); | |
479 | |
480 mark_and_move_for_policy(OP_favor_runtime, ik->secondary_supers(), _move_ro); | |
481 mark_and_move_for_policy(OP_favor_runtime, ik->method_ordering(), _move_ro); | |
482 mark_and_move_for_policy(OP_favor_runtime, ik->class_annotations(), _move_ro); | |
483 mark_and_move_for_policy(OP_favor_runtime, ik->fields_annotations(), _move_ro); | |
484 mark_and_move_for_policy(OP_favor_runtime, ik->methods_annotations(), _move_ro); | |
485 mark_and_move_for_policy(OP_favor_runtime, ik->methods_parameter_annotations(), _move_ro); | |
486 mark_and_move_for_policy(OP_favor_runtime, ik->methods_default_annotations(), _move_ro); | |
487 mark_and_move_for_policy(OP_favor_runtime, ik->inner_classes(), _move_ro); | |
488 mark_and_move_for_policy(OP_favor_runtime, ik->secondary_supers(), _move_ro); | |
489 } | |
490 } | |
491 }; | |
492 | |
493 class MarkAndMoveOrderedReadWrite: public ObjectClosure { | |
494 private: | |
495 MoveMarkedObjects *_move_rw; | |
496 | |
497 public: | |
498 MarkAndMoveOrderedReadWrite(MoveMarkedObjects *move_rw) : _move_rw(move_rw) {} | |
499 | |
500 void do_object(oop obj) { | |
501 if (obj->is_klass() && obj->blueprint()->oop_is_instanceKlass()) { | |
502 instanceKlass* ik = instanceKlass::cast((klassOop)obj); | |
503 int i; | |
504 | |
505 mark_and_move_for_policy(OP_favor_startup, ik->as_klassOop(), _move_rw); | |
506 | |
507 if (ik->super() != NULL) { | |
508 do_object(ik->super()); | |
509 } | |
510 | |
511 objArrayOop interfaces = ik->local_interfaces(); | |
512 for(i = 0; i < interfaces->length(); i++) { | |
513 klassOop k = klassOop(interfaces->obj_at(i)); | |
514 mark_and_move_for_policy(OP_favor_startup, k, _move_rw); | |
515 do_object(k); | |
516 } | |
517 | |
518 objArrayOop methods = ik->methods(); | |
519 mark_and_move_for_policy(OP_favor_startup, methods, _move_rw); | |
520 for(i = 0; i < methods->length(); i++) { | |
521 methodOop m = methodOop(methods->obj_at(i)); | |
522 mark_and_move_for_policy(OP_favor_startup, m, _move_rw); | |
523 mark_and_move_for_policy(OP_favor_startup, ik->constants(), _move_rw); // idempotent | |
524 mark_and_move_for_policy(OP_balanced, ik->constants()->cache(), _move_rw); // idempotent | |
525 mark_and_move_for_policy(OP_balanced, ik->constants()->tags(), _move_rw); // idempotent | |
526 } | |
527 | |
528 mark_and_move_for_policy(OP_favor_startup, ik->as_klassOop()->klass(), _move_rw); | |
529 mark_and_move_for_policy(OP_favor_startup, ik->constants()->klass(), _move_rw); | |
530 | |
531 // Although Java mirrors are marked in MarkReadWriteObjects, | |
532 // apparently they were never moved into shared spaces since | |
533 // MoveMarkedObjects skips marked instance oops. This may | |
534 // be a bug in the original implementation or simply the vestige | |
535 // of an abandoned experiment. Nevertheless we leave a hint | |
536 // here in case this capability is ever correctly implemented. | |
537 // | |
538 // mark_and_move_for_policy(OP_favor_runtime, ik->java_mirror(), _move_rw); | |
539 } | |
540 } | |
541 | |
542 }; | |
543 | |
544 // Adjust references in oops to refer to shared spaces. | |
545 | |
546 class ResolveForwardingClosure: public OopClosure { | |
547 public: | |
548 void do_oop(oop* p) { | |
549 oop obj = *p; | |
550 if (!obj->is_shared()) { | |
551 if (obj != NULL) { | |
552 oop f = obj->forwardee(); | |
553 guarantee(f->is_shared(), "Oop doesn't refer to shared space."); | |
554 *p = f; | |
555 } | |
556 } | |
557 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
558 void do_oop(narrowOop* pobj) { ShouldNotReachHere(); } |
0 | 559 }; |
560 | |
561 | |
562 void sort_methods(instanceKlass* ik, TRAPS) { | |
563 klassOop super = ik->super(); | |
564 if (super != NULL) { | |
565 sort_methods(instanceKlass::cast(super), THREAD); | |
566 } | |
567 | |
568 // The methods array must be ordered by symbolOop address. (See | |
569 // classFileParser.cpp where methods in a class are originally | |
570 // sorted.) Since objects have just be reordered, this must be | |
571 // corrected. | |
572 methodOopDesc::sort_methods(ik->methods(), | |
573 ik->methods_annotations(), | |
574 ik->methods_parameter_annotations(), | |
575 ik->methods_default_annotations(), | |
576 true /* idempotent, slow */); | |
577 | |
578 // Itable indices are calculated based on methods array order | |
579 // (see klassItable::compute_itable_index()). Must reinitialize. | |
580 // We assume that since checkconstraints is false, this method | |
581 // cannot throw an exception. An exception here would be | |
582 // problematic since this is the VMThread, not a JavaThread. | |
583 ik->itable()->initialize_itable(false, THREAD); | |
584 } | |
585 | |
586 // Sort methods if the oop is an instanceKlass. | |
587 | |
588 class SortMethodsClosure: public ObjectClosure { | |
589 private: | |
590 Thread* _thread; | |
591 | |
592 public: | |
593 SortMethodsClosure(Thread* thread) : _thread(thread) {} | |
594 | |
595 void do_object(oop obj) { | |
596 // instanceKlass objects need some adjustment. | |
597 if (obj->blueprint()->oop_is_instanceKlass()) { | |
598 instanceKlass* ik = instanceKlass::cast((klassOop)obj); | |
599 | |
600 sort_methods(ik, _thread); | |
601 } | |
602 } | |
603 }; | |
604 | |
605 | |
606 // Adjust references in oops to refer to shared spaces. | |
607 | |
608 class PatchOopsClosure: public ObjectClosure { | |
609 private: | |
610 Thread* _thread; | |
611 ResolveForwardingClosure resolve; | |
612 | |
613 public: | |
614 PatchOopsClosure(Thread* thread) : _thread(thread) {} | |
615 | |
616 void do_object(oop obj) { | |
617 obj->oop_iterate_header(&resolve); | |
618 obj->oop_iterate(&resolve); | |
619 | |
620 assert(obj->klass()->is_shared(), "Klass not pointing into shared space."); | |
621 | |
622 // If the object is a Java object or class which might (in the | |
623 // future) contain a reference to a young gen object, add it to the | |
624 // list. | |
625 | |
626 if (obj->is_klass() || obj->is_instance()) { | |
627 if (obj->is_klass() || | |
628 obj->is_a(SystemDictionary::class_klass()) || | |
629 obj->is_a(SystemDictionary::throwable_klass())) { | |
630 // Do nothing | |
631 } | |
632 else if (obj->is_a(SystemDictionary::string_klass())) { | |
633 // immutable objects. | |
634 } else { | |
635 // someone added an object we hadn't accounted for. | |
636 ShouldNotReachHere(); | |
637 } | |
638 } | |
639 } | |
640 }; | |
641 | |
642 | |
643 // Empty the young and old generations. | |
644 | |
645 class ClearSpaceClosure : public SpaceClosure { | |
646 public: | |
647 void do_space(Space* s) { | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
648 s->clear(SpaceDecorator::Mangle); |
0 | 649 } |
650 }; | |
651 | |
652 | |
653 // Closure for serializing initialization data out to a data area to be | |
654 // written to the shared file. | |
655 | |
656 class WriteClosure : public SerializeOopClosure { | |
657 private: | |
658 oop* top; | |
659 char* end; | |
660 | |
661 void out_of_space() { | |
662 warning("\nThe shared miscellaneous data space is not large " | |
663 "enough to \npreload requested classes. Use " | |
664 "-XX:SharedMiscDataSize= to increase \nthe initial " | |
665 "size of the miscellaneous data space.\n"); | |
666 exit(2); | |
667 } | |
668 | |
669 | |
670 inline void check_space() { | |
671 if ((char*)top + sizeof(oop) > end) { | |
672 out_of_space(); | |
673 } | |
674 } | |
675 | |
676 | |
677 public: | |
678 WriteClosure(char* md_top, char* md_end) { | |
679 top = (oop*)md_top; | |
680 end = md_end; | |
681 } | |
682 | |
683 char* get_top() { return (char*)top; } | |
684 | |
685 void do_oop(oop* p) { | |
686 check_space(); | |
687 oop obj = *p; | |
688 assert(obj->is_oop_or_null(), "invalid oop"); | |
689 assert(obj == NULL || obj->is_shared(), | |
690 "Oop in shared space not pointing into shared space."); | |
691 *top = obj; | |
692 ++top; | |
693 } | |
694 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
695 void do_oop(narrowOop* pobj) { ShouldNotReachHere(); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
696 |
0 | 697 void do_int(int* p) { |
698 check_space(); | |
699 *top = (oop)(intptr_t)*p; | |
700 ++top; | |
701 } | |
702 | |
703 void do_size_t(size_t* p) { | |
704 check_space(); | |
705 *top = (oop)(intptr_t)*p; | |
706 ++top; | |
707 } | |
708 | |
709 void do_ptr(void** p) { | |
710 check_space(); | |
711 *top = (oop)*p; | |
712 ++top; | |
713 } | |
714 | |
715 void do_ptr(HeapWord** p) { do_ptr((void **) p); } | |
716 | |
717 void do_tag(int tag) { | |
718 check_space(); | |
719 *top = (oop)(intptr_t)tag; | |
720 ++top; | |
721 } | |
722 | |
723 void do_region(u_char* start, size_t size) { | |
724 if ((char*)top + size > end) { | |
725 out_of_space(); | |
726 } | |
727 assert((intptr_t)start % sizeof(oop) == 0, "bad alignment"); | |
728 assert(size % sizeof(oop) == 0, "bad size"); | |
729 do_tag((int)size); | |
730 while (size > 0) { | |
731 *top = *(oop*)start; | |
732 ++top; | |
733 start += sizeof(oop); | |
734 size -= sizeof(oop); | |
735 } | |
736 } | |
737 | |
738 bool reading() const { return false; } | |
739 }; | |
740 | |
741 | |
742 class ResolveConstantPoolsClosure : public ObjectClosure { | |
743 private: | |
744 TRAPS; | |
745 public: | |
746 ResolveConstantPoolsClosure(Thread *t) { | |
747 __the_thread__ = t; | |
748 } | |
749 void do_object(oop obj) { | |
750 if (obj->is_constantPool()) { | |
751 constantPoolOop cpool = (constantPoolOop)obj; | |
752 int unresolved = cpool->pre_resolve_shared_klasses(THREAD); | |
753 } | |
754 } | |
755 }; | |
756 | |
757 | |
758 // Print a summary of the contents of the read/write spaces to help | |
759 // identify objects which might be able to be made read-only. At this | |
760 // point, the objects have been written, and we can trash them as | |
761 // needed. | |
762 | |
763 static void print_contents() { | |
764 if (PrintSharedSpaces) { | |
765 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
766 CompactingPermGenGen* gen = (CompactingPermGenGen*)gch->perm_gen(); | |
767 | |
768 // High level summary of the read-only space: | |
769 | |
770 ClassifyObjectClosure coc; | |
771 tty->cr(); tty->print_cr("ReadOnly space:"); | |
772 gen->ro_space()->object_iterate(&coc); | |
773 coc.print(); | |
774 | |
775 // High level summary of the read-write space: | |
776 | |
777 coc.reset(); | |
778 tty->cr(); tty->print_cr("ReadWrite space:"); | |
779 gen->rw_space()->object_iterate(&coc); | |
780 coc.print(); | |
781 | |
782 // Reset counters | |
783 | |
784 ClearAllocCountClosure cacc; | |
785 gen->ro_space()->object_iterate(&cacc); | |
786 gen->rw_space()->object_iterate(&cacc); | |
787 coc.reset(); | |
788 | |
789 // Lower level summary of the read-only space: | |
790 | |
791 gen->ro_space()->object_iterate(&coc); | |
792 tty->cr(); tty->print_cr("ReadOnly space:"); | |
793 ClassifyInstanceKlassClosure cikc; | |
794 gen->rw_space()->object_iterate(&cikc); | |
795 cikc.print(); | |
796 | |
797 // Reset counters | |
798 | |
799 gen->ro_space()->object_iterate(&cacc); | |
800 gen->rw_space()->object_iterate(&cacc); | |
801 coc.reset(); | |
802 | |
803 // Lower level summary of the read-write space: | |
804 | |
805 gen->rw_space()->object_iterate(&coc); | |
806 cikc.reset(); | |
807 tty->cr(); tty->print_cr("ReadWrite space:"); | |
808 gen->rw_space()->object_iterate(&cikc); | |
809 cikc.print(); | |
810 } | |
811 } | |
812 | |
813 | |
814 // Patch C++ vtable pointer in klass oops. | |
815 | |
816 // Klass objects contain references to c++ vtables in the JVM library. | |
817 // Fix them to point to our constructed vtables. However, don't iterate | |
818 // across the space while doing this, as that causes the vtables to be | |
819 // patched, undoing our useful work. Instead, iterate to make a list, | |
820 // then use the list to do the fixing. | |
408 | 821 // |
822 // Our constructed vtables: | |
823 // Dump time: | |
824 // 1. init_self_patching_vtbl_list: table of pointers to current virtual method addrs | |
825 // 2. generate_vtable_methods: create jump table, appended to above vtbl_list | |
826 // 3. PatchKlassVtables: for Klass list, patch the vtable entry to point to jump table | |
827 // rather than to current vtbl | |
828 // Table layout: NOTE FIXED SIZE | |
829 // 1. vtbl pointers | |
830 // 2. #Klass X #virtual methods per Klass | |
831 // 1 entry for each, in the order: | |
832 // Klass1:method1 entry, Klass1:method2 entry, ... Klass1:method<num_virtuals> entry | |
833 // Klass2:method1 entry, Klass2:method2 entry, ... Klass2:method<num_virtuals> entry | |
834 // ... | |
835 // Klass<vtbl_list_size>:method1 entry, Klass<vtbl_list_size>:method2 entry, | |
836 // ... Klass<vtbl_list_size>:method<num_virtuals> entry | |
837 // Sample entry: (Sparc): | |
838 // save(sp, -256, sp) | |
839 // ba,pt common_code | |
840 // mov XXX, %L0 %L0 gets: Klass index <<8 + method index (note: max method index 255) | |
841 // | |
842 // Restore time: | |
843 // 1. initialize_oops: reserve space for table | |
844 // 2. init_self_patching_vtbl_list: update pointers to NEW virtual method addrs in text | |
845 // | |
846 // Execution time: | |
847 // First virtual method call for any object of these Klass types: | |
848 // 1. object->klass->klass_part | |
849 // 2. vtable entry for that klass_part points to the jump table entries | |
850 // 3. branches to common_code with %O0/klass_part, %L0: Klass index <<8 + method index | |
851 // 4. common_code: | |
852 // Get address of new vtbl pointer for this Klass from updated table | |
853 // Update new vtbl pointer in the Klass: future virtual calls go direct | |
854 // Jump to method, using new vtbl pointer and method index | |
0 | 855 |
856 class PatchKlassVtables: public ObjectClosure { | |
857 private: | |
858 void* _vtbl_ptr; | |
859 VirtualSpace* _md_vs; | |
860 GrowableArray<klassOop>* _klass_objects; | |
861 | |
862 public: | |
863 | |
864 PatchKlassVtables(void* vtbl_ptr, VirtualSpace* md_vs) { | |
865 _vtbl_ptr = vtbl_ptr; | |
866 _md_vs = md_vs; | |
867 _klass_objects = new GrowableArray<klassOop>(); | |
868 } | |
869 | |
870 | |
871 void do_object(oop obj) { | |
872 if (obj->is_klass()) { | |
873 _klass_objects->append(klassOop(obj)); | |
874 } | |
875 } | |
876 | |
877 | |
878 void patch(void** vtbl_list, int vtbl_list_size) { | |
879 for (int i = 0; i < _klass_objects->length(); ++i) { | |
880 klassOop obj = (klassOop)_klass_objects->at(i); | |
881 Klass* k = obj->klass_part(); | |
882 void* v = *(void**)k; | |
883 | |
884 int n; | |
885 for (n = 0; n < vtbl_list_size; ++n) { | |
886 *(void**)k = NULL; | |
887 if (vtbl_list[n] == v) { | |
888 *(void**)k = (void**)_vtbl_ptr + | |
889 (n * CompactingPermGenGen::num_virtuals); | |
890 break; | |
891 } | |
892 } | |
893 guarantee(n < vtbl_list_size, "unable to find matching vtbl pointer"); | |
894 } | |
895 } | |
896 }; | |
897 | |
898 | |
899 // Populate the shared space. | |
900 | |
901 class VM_PopulateDumpSharedSpace: public VM_Operation { | |
902 private: | |
903 GrowableArray<oop> *_class_promote_order; | |
904 OffsetTableContigSpace* _ro_space; | |
905 OffsetTableContigSpace* _rw_space; | |
906 VirtualSpace* _md_vs; | |
907 VirtualSpace* _mc_vs; | |
908 | |
909 public: | |
910 VM_PopulateDumpSharedSpace(GrowableArray<oop> *class_promote_order, | |
911 OffsetTableContigSpace* ro_space, | |
912 OffsetTableContigSpace* rw_space, | |
913 VirtualSpace* md_vs, VirtualSpace* mc_vs) { | |
914 _class_promote_order = class_promote_order; | |
915 _ro_space = ro_space; | |
916 _rw_space = rw_space; | |
917 _md_vs = md_vs; | |
918 _mc_vs = mc_vs; | |
919 } | |
920 | |
921 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } | |
922 void doit() { | |
923 Thread* THREAD = VMThread::vm_thread(); | |
924 NOT_PRODUCT(SystemDictionary::verify();) | |
925 // The following guarantee is meant to ensure that no loader constraints | |
926 // exist yet, since the constraints table is not shared. This becomes | |
927 // more important now that we don't re-initialize vtables/itables for | |
928 // shared classes at runtime, where constraints were previously created. | |
929 guarantee(SystemDictionary::constraints()->number_of_entries() == 0, | |
930 "loader constraints are not saved"); | |
931 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
932 | |
933 // At this point, many classes have been loaded. | |
934 | |
935 // Update all the fingerprints in the shared methods. | |
936 | |
937 tty->print("Calculating fingerprints ... "); | |
938 FingerprintMethodsClosure fpmc; | |
939 gch->object_iterate(&fpmc); | |
940 tty->print_cr("done. "); | |
941 | |
942 // Remove all references outside the heap. | |
943 | |
944 tty->print("Removing unshareable information ... "); | |
945 RemoveUnshareableInfoClosure ruic; | |
946 gch->object_iterate(&ruic); | |
947 tty->print_cr("done. "); | |
948 | |
949 // Move the objects in three passes. | |
950 | |
951 MarkObjectsOopClosure mark_all; | |
952 MarkCommonReadOnly mark_common_ro; | |
953 MarkCommonSymbols mark_common_symbols; | |
954 MarkStringValues mark_string_values; | |
955 MarkReadWriteObjects mark_rw; | |
956 MarkStringObjects mark_strings; | |
957 MoveMarkedObjects move_ro(_ro_space, true); | |
958 MoveMarkedObjects move_rw(_rw_space, false); | |
959 | |
960 // The SharedOptimizeColdStart VM option governs the new layout | |
961 // algorithm for promoting classes into the shared archive. | |
962 // The general idea is to minimize cold start time by laying | |
963 // out the objects in the order they are accessed at startup time. | |
964 // By doing this we are trying to eliminate out-of-order accesses | |
965 // in the shared archive. This benefits cold startup time by making | |
966 // disk reads as sequential as possible during class loading and | |
967 // bootstrapping activities. There may also be a small secondary | |
968 // effect of better "packing" of more commonly used data on a smaller | |
969 // number of pages, although no direct benefit has been measured from | |
970 // this effect. | |
971 // | |
972 // At the class level of granularity, the promotion order is dictated | |
973 // by the classlist file whose generation is discussed elsewhere. | |
974 // | |
975 // At smaller granularity, optimal ordering was determined by an | |
976 // offline analysis of object access order in the shared archive. | |
977 // The dbx watchpoint facility, combined with SA post-processing, | |
978 // was used to observe common access patterns primarily during | |
979 // classloading. This information was used to craft the promotion | |
980 // order seen in the following closures. | |
981 // | |
982 // The observed access order is mostly governed by what happens | |
983 // in SystemDictionary::load_shared_class(). NOTE WELL - care | |
984 // should be taken when making changes to this method, because it | |
985 // may invalidate assumptions made about access order! | |
986 // | |
987 // (Ideally, there would be a better way to manage changes to | |
988 // the access order. Unfortunately a generic in-VM solution for | |
989 // dynamically observing access order and optimizing shared | |
990 // archive layout is pretty difficult. We go with the static | |
991 // analysis because the code is fairly mature at this point | |
992 // and we're betting that the access order won't change much.) | |
993 | |
994 MarkAndMoveOrderedReadOnly mark_and_move_ordered_ro(&move_ro); | |
995 MarkAndMoveOrderedReadWrite mark_and_move_ordered_rw(&move_rw); | |
996 | |
997 // Phase 1a: move commonly used read-only objects to the read-only space. | |
998 | |
999 if (SharedOptimizeColdStart) { | |
1000 tty->print("Moving pre-ordered read-only objects to shared space at " PTR_FORMAT " ... ", | |
1001 _ro_space->top()); | |
1002 for (int i = 0; i < _class_promote_order->length(); i++) { | |
1003 oop obj = _class_promote_order->at(i); | |
1004 mark_and_move_ordered_ro.do_object(obj); | |
1005 } | |
1006 tty->print_cr("done. "); | |
1007 } | |
1008 | |
1009 tty->print("Moving read-only objects to shared space at " PTR_FORMAT " ... ", | |
1010 _ro_space->top()); | |
1011 gch->object_iterate(&mark_common_ro); | |
1012 gch->object_iterate(&move_ro); | |
1013 tty->print_cr("done. "); | |
1014 | |
1015 // Phase 1b: move commonly used symbols to the read-only space. | |
1016 | |
1017 tty->print("Moving common symbols to shared space at " PTR_FORMAT " ... ", | |
1018 _ro_space->top()); | |
1019 gch->object_iterate(&mark_common_symbols); | |
1020 gch->object_iterate(&move_ro); | |
1021 tty->print_cr("done. "); | |
1022 | |
1023 // Phase 1c: move remaining symbols to the read-only space | |
1024 // (e.g. String initializers). | |
1025 | |
1026 tty->print("Moving remaining symbols to shared space at " PTR_FORMAT " ... ", | |
1027 _ro_space->top()); | |
1028 vmSymbols::oops_do(&mark_all, true); | |
1029 gch->object_iterate(&move_ro); | |
1030 tty->print_cr("done. "); | |
1031 | |
1032 // Phase 1d: move String character arrays to the read-only space. | |
1033 | |
1034 tty->print("Moving string char arrays to shared space at " PTR_FORMAT " ... ", | |
1035 _ro_space->top()); | |
1036 gch->object_iterate(&mark_string_values); | |
1037 gch->object_iterate(&move_ro); | |
1038 tty->print_cr("done. "); | |
1039 | |
1040 // Phase 2: move all remaining symbols to the read-only space. The | |
1041 // remaining symbols are assumed to be string initializers no longer | |
1042 // referenced. | |
1043 | |
1044 void* extra_symbols = _ro_space->top(); | |
1045 tty->print("Moving additional symbols to shared space at " PTR_FORMAT " ... ", | |
1046 _ro_space->top()); | |
1047 SymbolTable::oops_do(&mark_all); | |
1048 gch->object_iterate(&move_ro); | |
1049 tty->print_cr("done. "); | |
1050 tty->print_cr("Read-only space ends at " PTR_FORMAT ", %d bytes.", | |
1051 _ro_space->top(), _ro_space->used()); | |
1052 | |
1053 // Phase 3: move read-write objects to the read-write space, except | |
1054 // Strings. | |
1055 | |
1056 if (SharedOptimizeColdStart) { | |
1057 tty->print("Moving pre-ordered read-write objects to shared space at " PTR_FORMAT " ... ", | |
1058 _rw_space->top()); | |
1059 for (int i = 0; i < _class_promote_order->length(); i++) { | |
1060 oop obj = _class_promote_order->at(i); | |
1061 mark_and_move_ordered_rw.do_object(obj); | |
1062 } | |
1063 tty->print_cr("done. "); | |
1064 } | |
1065 tty->print("Moving read-write objects to shared space at " PTR_FORMAT " ... ", | |
1066 _rw_space->top()); | |
1067 Universe::oops_do(&mark_all, true); | |
1068 SystemDictionary::oops_do(&mark_all); | |
1069 oop tmp = Universe::arithmetic_exception_instance(); | |
1070 mark_object(java_lang_Throwable::message(tmp)); | |
1071 gch->object_iterate(&mark_rw); | |
1072 gch->object_iterate(&move_rw); | |
1073 tty->print_cr("done. "); | |
1074 | |
1075 // Phase 4: move String objects to the read-write space. | |
1076 | |
1077 tty->print("Moving String objects to shared space at " PTR_FORMAT " ... ", | |
1078 _rw_space->top()); | |
1079 StringTable::oops_do(&mark_all); | |
1080 gch->object_iterate(&mark_strings); | |
1081 gch->object_iterate(&move_rw); | |
1082 tty->print_cr("done. "); | |
1083 tty->print_cr("Read-write space ends at " PTR_FORMAT ", %d bytes.", | |
1084 _rw_space->top(), _rw_space->used()); | |
1085 | |
1086 #ifdef DEBUG | |
1087 // Check: scan for objects which were not moved. | |
1088 | |
1089 CheckRemainingObjects check_objects; | |
1090 gch->object_iterate(&check_objects); | |
1091 check_objects.status(); | |
1092 #endif | |
1093 | |
1094 // Resolve forwarding in objects and saved C++ structures | |
1095 tty->print("Updating references to shared objects ... "); | |
1096 ResolveForwardingClosure resolve; | |
1097 Universe::oops_do(&resolve); | |
1098 SystemDictionary::oops_do(&resolve); | |
1099 StringTable::oops_do(&resolve); | |
1100 SymbolTable::oops_do(&resolve); | |
1101 vmSymbols::oops_do(&resolve); | |
1102 | |
1103 // Set up the share data and shared code segments. | |
1104 | |
1105 char* md_top = _md_vs->low(); | |
1106 char* md_end = _md_vs->high(); | |
1107 char* mc_top = _mc_vs->low(); | |
1108 char* mc_end = _mc_vs->high(); | |
1109 | |
1110 // Reserve space for the list of klassOops whose vtables are used | |
1111 // for patching others as needed. | |
1112 | |
1113 void** vtbl_list = (void**)md_top; | |
1114 int vtbl_list_size = CompactingPermGenGen::vtbl_list_size; | |
1115 Universe::init_self_patching_vtbl_list(vtbl_list, vtbl_list_size); | |
1116 | |
1117 md_top += vtbl_list_size * sizeof(void*); | |
1118 void* vtable = md_top; | |
1119 | |
1120 // Reserve space for a new dummy vtable for klass objects in the | |
1121 // heap. Generate self-patching vtable entries. | |
1122 | |
1123 CompactingPermGenGen::generate_vtable_methods(vtbl_list, | |
1124 &vtable, | |
1125 &md_top, md_end, | |
1126 &mc_top, mc_end); | |
1127 | |
1128 // Fix (forward) all of the references in these shared objects (which | |
1129 // are required to point ONLY to objects in the shared spaces). | |
1130 // Also, create a list of all objects which might later contain a | |
1131 // reference to a younger generation object. | |
1132 | |
1133 CompactingPermGenGen* gen = (CompactingPermGenGen*)gch->perm_gen(); | |
1134 PatchOopsClosure patch(THREAD); | |
1135 gen->ro_space()->object_iterate(&patch); | |
1136 gen->rw_space()->object_iterate(&patch); | |
1137 | |
1138 // Previously method sorting was done concurrently with forwarding | |
1139 // pointer resolution in the shared spaces. This imposed an ordering | |
1140 // restriction in that methods were required to be promoted/patched | |
1141 // before their holder classes. (Because constant pool pointers in | |
1142 // methodKlasses are required to be resolved before their holder class | |
1143 // is visited for sorting, otherwise methods are sorted by incorrect, | |
1144 // pre-forwarding addresses.) | |
1145 // | |
1146 // Now, we reorder methods as a separate step after ALL forwarding | |
1147 // pointer resolution, so that methods can be promoted in any order | |
1148 // with respect to their holder classes. | |
1149 | |
1150 SortMethodsClosure sort(THREAD); | |
1151 gen->ro_space()->object_iterate(&sort); | |
1152 gen->rw_space()->object_iterate(&sort); | |
1153 tty->print_cr("done. "); | |
1154 tty->cr(); | |
1155 | |
1156 // Reorder the system dictionary. (Moving the symbols opps affects | |
1157 // how the hash table indices are calculated.) | |
1158 | |
1159 SystemDictionary::reorder_dictionary(); | |
1160 | |
1161 // Empty the non-shared heap (because most of the objects were | |
1162 // copied out, and the remainder cannot be considered valid oops). | |
1163 | |
1164 ClearSpaceClosure csc; | |
1165 for (int i = 0; i < gch->n_gens(); ++i) { | |
1166 gch->get_gen(i)->space_iterate(&csc); | |
1167 } | |
1168 csc.do_space(gen->the_space()); | |
1169 NOT_PRODUCT(SystemDictionary::verify();) | |
1170 | |
1171 // Copy the String table, the symbol table, and the system | |
1172 // dictionary to the shared space in usable form. Copy the hastable | |
1173 // buckets first [read-write], then copy the linked lists of entries | |
1174 // [read-only]. | |
1175 | |
1176 SymbolTable::reverse(extra_symbols); | |
1177 NOT_PRODUCT(SymbolTable::verify()); | |
1178 SymbolTable::copy_buckets(&md_top, md_end); | |
1179 | |
1180 StringTable::reverse(); | |
1181 NOT_PRODUCT(StringTable::verify()); | |
1182 StringTable::copy_buckets(&md_top, md_end); | |
1183 | |
1184 SystemDictionary::reverse(); | |
1185 SystemDictionary::copy_buckets(&md_top, md_end); | |
1186 | |
1187 ClassLoader::verify(); | |
1188 ClassLoader::copy_package_info_buckets(&md_top, md_end); | |
1189 ClassLoader::verify(); | |
1190 | |
1191 SymbolTable::copy_table(&md_top, md_end); | |
1192 StringTable::copy_table(&md_top, md_end); | |
1193 SystemDictionary::copy_table(&md_top, md_end); | |
1194 ClassLoader::verify(); | |
1195 ClassLoader::copy_package_info_table(&md_top, md_end); | |
1196 ClassLoader::verify(); | |
1197 | |
1198 // Print debug data. | |
1199 | |
1200 if (PrintSharedSpaces) { | |
1201 const char* fmt = "%s space: " PTR_FORMAT " out of " PTR_FORMAT " bytes allocated at " PTR_FORMAT "."; | |
1202 tty->print_cr(fmt, "ro", _ro_space->used(), _ro_space->capacity(), | |
1203 _ro_space->bottom()); | |
1204 tty->print_cr(fmt, "rw", _rw_space->used(), _rw_space->capacity(), | |
1205 _rw_space->bottom()); | |
1206 } | |
1207 | |
1208 // Write the oop data to the output array. | |
1209 | |
1210 WriteClosure wc(md_top, md_end); | |
1211 CompactingPermGenGen::serialize_oops(&wc); | |
1212 md_top = wc.get_top(); | |
1213 | |
1214 // Update the vtable pointers in all of the Klass objects in the | |
1215 // heap. They should point to newly generated vtable. | |
1216 | |
1217 PatchKlassVtables pkvt(vtable, _md_vs); | |
1218 _rw_space->object_iterate(&pkvt); | |
1219 pkvt.patch(vtbl_list, vtbl_list_size); | |
1220 | |
1221 char* saved_vtbl = (char*)malloc(vtbl_list_size * sizeof(void*)); | |
1222 memmove(saved_vtbl, vtbl_list, vtbl_list_size * sizeof(void*)); | |
1223 memset(vtbl_list, 0, vtbl_list_size * sizeof(void*)); | |
1224 | |
1225 // Create and write the archive file that maps the shared spaces. | |
1226 | |
1227 FileMapInfo* mapinfo = new FileMapInfo(); | |
1228 mapinfo->populate_header(gch->gen_policy()->max_alignment()); | |
1229 | |
1230 // Pass 1 - update file offsets in header. | |
1231 mapinfo->write_header(); | |
1232 mapinfo->write_space(CompactingPermGenGen::ro, _ro_space, true); | |
1233 _ro_space->set_saved_mark(); | |
1234 mapinfo->write_space(CompactingPermGenGen::rw, _rw_space, false); | |
1235 _rw_space->set_saved_mark(); | |
1236 mapinfo->write_region(CompactingPermGenGen::md, _md_vs->low(), | |
287
6e76352f1f62
6459085: naked pointer subtractions in class data sharing code
xlu
parents:
196
diff
changeset
|
1237 pointer_delta(md_top, _md_vs->low(), sizeof(char)), |
6e76352f1f62
6459085: naked pointer subtractions in class data sharing code
xlu
parents:
196
diff
changeset
|
1238 SharedMiscDataSize, |
0 | 1239 false, false); |
1240 mapinfo->write_region(CompactingPermGenGen::mc, _mc_vs->low(), | |
287
6e76352f1f62
6459085: naked pointer subtractions in class data sharing code
xlu
parents:
196
diff
changeset
|
1241 pointer_delta(mc_top, _mc_vs->low(), sizeof(char)), |
6e76352f1f62
6459085: naked pointer subtractions in class data sharing code
xlu
parents:
196
diff
changeset
|
1242 SharedMiscCodeSize, |
0 | 1243 true, true); |
1244 | |
1245 // Pass 2 - write data. | |
1246 mapinfo->open_for_write(); | |
1247 mapinfo->write_header(); | |
1248 mapinfo->write_space(CompactingPermGenGen::ro, _ro_space, true); | |
1249 mapinfo->write_space(CompactingPermGenGen::rw, _rw_space, false); | |
1250 mapinfo->write_region(CompactingPermGenGen::md, _md_vs->low(), | |
287
6e76352f1f62
6459085: naked pointer subtractions in class data sharing code
xlu
parents:
196
diff
changeset
|
1251 pointer_delta(md_top, _md_vs->low(), sizeof(char)), |
6e76352f1f62
6459085: naked pointer subtractions in class data sharing code
xlu
parents:
196
diff
changeset
|
1252 SharedMiscDataSize, |
0 | 1253 false, false); |
1254 mapinfo->write_region(CompactingPermGenGen::mc, _mc_vs->low(), | |
287
6e76352f1f62
6459085: naked pointer subtractions in class data sharing code
xlu
parents:
196
diff
changeset
|
1255 pointer_delta(mc_top, _mc_vs->low(), sizeof(char)), |
6e76352f1f62
6459085: naked pointer subtractions in class data sharing code
xlu
parents:
196
diff
changeset
|
1256 SharedMiscCodeSize, |
0 | 1257 true, true); |
1258 mapinfo->close(); | |
1259 | |
1260 // Summarize heap. | |
1261 memmove(vtbl_list, saved_vtbl, vtbl_list_size * sizeof(void*)); | |
1262 print_contents(); | |
1263 } | |
1264 }; // class VM_PopulateDumpSharedSpace | |
1265 | |
1266 | |
1267 // Populate the shared spaces and dump to a file. | |
1268 | |
1269 jint CompactingPermGenGen::dump_shared(GrowableArray<oop>* class_promote_order, TRAPS) { | |
1270 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1271 | |
1272 // Calculate hash values for all of the (interned) strings to avoid | |
1273 // writes to shared pages in the future. | |
1274 | |
1275 tty->print("Calculating hash values for String objects .. "); | |
1276 StringHashCodeClosure shcc(THREAD); | |
1277 StringTable::oops_do(&shcc); | |
1278 tty->print_cr("done. "); | |
1279 | |
1280 CompactingPermGenGen* gen = (CompactingPermGenGen*)gch->perm_gen(); | |
1281 VM_PopulateDumpSharedSpace op(class_promote_order, | |
1282 gen->ro_space(), gen->rw_space(), | |
1283 gen->md_space(), gen->mc_space()); | |
1284 VMThread::execute(&op); | |
1285 return JNI_OK; | |
1286 } | |
1287 | |
1288 | |
1289 class LinkClassesClosure : public ObjectClosure { | |
1290 private: | |
1291 Thread* THREAD; | |
1292 | |
1293 public: | |
1294 LinkClassesClosure(Thread* thread) : THREAD(thread) {} | |
1295 | |
1296 void do_object(oop obj) { | |
1297 if (obj->is_klass()) { | |
1298 Klass* k = Klass::cast((klassOop) obj); | |
1299 if (k->oop_is_instance()) { | |
1300 instanceKlass* ik = (instanceKlass*) k; | |
1301 // Link the class to cause the bytecodes to be rewritten and the | |
1302 // cpcache to be created. | |
1303 if (ik->get_init_state() < instanceKlass::linked) { | |
1304 ik->link_class(THREAD); | |
1305 guarantee(!HAS_PENDING_EXCEPTION, "exception in class rewriting"); | |
1306 } | |
1307 | |
1308 // Create String objects from string initializer symbols. | |
1309 ik->constants()->resolve_string_constants(THREAD); | |
1310 guarantee(!HAS_PENDING_EXCEPTION, "exception resolving string constants"); | |
1311 } | |
1312 } | |
1313 } | |
1314 }; | |
1315 | |
1316 | |
1317 // Support for a simple checksum of the contents of the class list | |
1318 // file to prevent trivial tampering. The algorithm matches that in | |
1319 // the MakeClassList program used by the J2SE build process. | |
1320 #define JSUM_SEED ((jlong)CONST64(0xcafebabebabecafe)) | |
1321 static jlong | |
1322 jsum(jlong start, const char *buf, const int len) | |
1323 { | |
1324 jlong h = start; | |
1325 char *p = (char *)buf, *e = p + len; | |
1326 while (p < e) { | |
1327 char c = *p++; | |
1328 if (c <= ' ') { | |
1329 /* Skip spaces and control characters */ | |
1330 continue; | |
1331 } | |
1332 h = 31 * h + c; | |
1333 } | |
1334 return h; | |
1335 } | |
1336 | |
1337 | |
1338 | |
1339 | |
1340 | |
1341 // Preload classes from a list, populate the shared spaces and dump to a | |
1342 // file. | |
1343 | |
1344 void GenCollectedHeap::preload_and_dump(TRAPS) { | |
1345 TraceTime timer("Dump Shared Spaces", TraceStartupTime); | |
1346 ResourceMark rm; | |
1347 | |
1348 // Preload classes to be shared. | |
1349 // Should use some hpi:: method rather than fopen() here. aB. | |
1350 // Construct the path to the class list (in jre/lib) | |
1351 // Walk up two directories from the location of the VM and | |
1352 // optionally tack on "lib" (depending on platform) | |
1353 char class_list_path[JVM_MAXPATHLEN]; | |
1354 os::jvm_path(class_list_path, sizeof(class_list_path)); | |
1355 for (int i = 0; i < 3; i++) { | |
1356 char *end = strrchr(class_list_path, *os::file_separator()); | |
1357 if (end != NULL) *end = '\0'; | |
1358 } | |
1359 int class_list_path_len = (int)strlen(class_list_path); | |
1360 if (class_list_path_len >= 3) { | |
1361 if (strcmp(class_list_path + class_list_path_len - 3, "lib") != 0) { | |
1362 strcat(class_list_path, os::file_separator()); | |
1363 strcat(class_list_path, "lib"); | |
1364 } | |
1365 } | |
1366 strcat(class_list_path, os::file_separator()); | |
1367 strcat(class_list_path, "classlist"); | |
1368 | |
1369 FILE* file = fopen(class_list_path, "r"); | |
1370 if (file != NULL) { | |
1371 jlong computed_jsum = JSUM_SEED; | |
1372 jlong file_jsum = 0; | |
1373 | |
1374 char class_name[256]; | |
1375 int class_count = 0; | |
1376 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1377 gch->_preloading_shared_classes = true; | |
1378 GrowableArray<oop>* class_promote_order = new GrowableArray<oop>(); | |
1379 | |
1380 // Preload (and intern) strings which will be used later. | |
1381 | |
1382 StringTable::intern("main", THREAD); | |
1383 StringTable::intern("([Ljava/lang/String;)V", THREAD); | |
1384 StringTable::intern("Ljava/lang/Class;", THREAD); | |
1385 | |
1386 StringTable::intern("I", THREAD); // Needed for StringBuffer persistence? | |
1387 StringTable::intern("Z", THREAD); // Needed for StringBuffer persistence? | |
1388 | |
1389 // sun.io.Converters | |
1390 static const char obj_array_sig[] = "[[Ljava/lang/Object;"; | |
1391 SymbolTable::lookup(obj_array_sig, (int)strlen(obj_array_sig), THREAD); | |
1392 | |
1393 // java.util.HashMap | |
1394 static const char map_entry_array_sig[] = "[Ljava/util/Map$Entry;"; | |
1395 SymbolTable::lookup(map_entry_array_sig, (int)strlen(map_entry_array_sig), | |
1396 THREAD); | |
1397 | |
1398 tty->print("Loading classes to share ... "); | |
1399 while ((fgets(class_name, sizeof class_name, file)) != NULL) { | |
1400 if (*class_name == '#') { | |
1401 jint fsh, fsl; | |
1402 if (sscanf(class_name, "# %8x%8x\n", &fsh, &fsl) == 2) { | |
1403 file_jsum = ((jlong)(fsh) << 32) | (fsl & 0xffffffff); | |
1404 } | |
1405 | |
1406 continue; | |
1407 } | |
1408 // Remove trailing newline | |
1409 size_t name_len = strlen(class_name); | |
1410 class_name[name_len-1] = '\0'; | |
1411 | |
1412 computed_jsum = jsum(computed_jsum, class_name, (const int)name_len - 1); | |
1413 | |
1414 // Got a class name - load it. | |
1415 symbolHandle class_name_symbol = oopFactory::new_symbol(class_name, | |
1416 THREAD); | |
1417 guarantee(!HAS_PENDING_EXCEPTION, "Exception creating a symbol."); | |
1418 klassOop klass = SystemDictionary::resolve_or_null(class_name_symbol, | |
1419 THREAD); | |
1420 guarantee(!HAS_PENDING_EXCEPTION, "Exception resolving a class."); | |
1421 if (klass != NULL) { | |
1422 if (PrintSharedSpaces) { | |
1423 tty->print_cr("Shared spaces preloaded: %s", class_name); | |
1424 } | |
1425 | |
1426 | |
1427 instanceKlass* ik = instanceKlass::cast(klass); | |
1428 | |
1429 // Should be class load order as per -XX:+TraceClassLoadingPreorder | |
1430 class_promote_order->append(ik->as_klassOop()); | |
1431 | |
1432 // Link the class to cause the bytecodes to be rewritten and the | |
1433 // cpcache to be created. The linking is done as soon as classes | |
1434 // are loaded in order that the related data structures (klass, | |
1435 // cpCache, Sting constants) are located together. | |
1436 | |
1437 if (ik->get_init_state() < instanceKlass::linked) { | |
1438 ik->link_class(THREAD); | |
1439 guarantee(!(HAS_PENDING_EXCEPTION), "exception in class rewriting"); | |
1440 } | |
1441 | |
1442 // Create String objects from string initializer symbols. | |
1443 | |
1444 ik->constants()->resolve_string_constants(THREAD); | |
1445 | |
1446 class_count++; | |
1447 } else { | |
1448 if (PrintSharedSpaces) { | |
1449 tty->cr(); | |
1450 tty->print_cr(" Preload failed: %s", class_name); | |
1451 } | |
1452 } | |
1453 file_jsum = 0; // Checksum must be on last line of file | |
1454 } | |
1455 if (computed_jsum != file_jsum) { | |
1456 tty->cr(); | |
1457 tty->print_cr("Preload failed: checksum of class list was incorrect."); | |
1458 exit(1); | |
1459 } | |
1460 | |
1461 tty->print_cr("done. "); | |
1462 | |
1463 if (PrintSharedSpaces) { | |
1464 tty->print_cr("Shared spaces: preloaded %d classes", class_count); | |
1465 } | |
1466 | |
1467 // Rewrite and unlink classes. | |
1468 tty->print("Rewriting and unlinking classes ... "); | |
1469 // Make heap parsable | |
1470 ensure_parsability(false); // arg is actually don't care | |
1471 | |
1472 // Link any classes which got missed. (It's not quite clear why | |
1473 // they got missed.) This iteration would be unsafe if we weren't | |
1474 // single-threaded at this point; however we can't do it on the VM | |
1475 // thread because it requires object allocation. | |
1476 LinkClassesClosure lcc(Thread::current()); | |
1477 object_iterate(&lcc); | |
1478 tty->print_cr("done. "); | |
1479 | |
1480 // Create and dump the shared spaces. | |
1481 jint err = CompactingPermGenGen::dump_shared(class_promote_order, THREAD); | |
1482 if (err != JNI_OK) { | |
1483 fatal("Dumping shared spaces failed."); | |
1484 } | |
1485 | |
1486 } else { | |
1487 char errmsg[JVM_MAXPATHLEN]; | |
1488 hpi::lasterror(errmsg, JVM_MAXPATHLEN); | |
1489 tty->print_cr("Loading classlist failed: %s", errmsg); | |
1490 exit(1); | |
1491 } | |
1492 | |
1493 // Since various initialization steps have been undone by this process, | |
1494 // it is not reasonable to continue running a java process. | |
1495 exit(0); | |
1496 } |