Mercurial > hg > graal-jvmci-8
annotate src/share/vm/memory/dump.cpp @ 1091:6aa7255741f3
6906727: UseCompressedOops: some card-marking fixes related to object arrays
Summary: Introduced a new write_ref_array(HeapWords* start, size_t count) method that does the requisite MemRegion range calculation so (some of the) clients of the erstwhile write_ref_array(MemRegion mr) do not need to worry. This removed all external uses of array_size(), which was also simplified and made private. Asserts were added to catch other possible issues. Further, less essential, fixes stemming from this investigation are deferred to CR 6904516 (to follow shortly in hs17).
Reviewed-by: kvn, coleenp, jmasa
author | ysr |
---|---|
date | Thu, 03 Dec 2009 15:01:57 -0800 |
parents | 981375ca07b7 |
children | 4ce7240d622c |
rev | line source |
---|---|
0 | 1 /* |
710 | 2 * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_dump.cpp.incl" | |
27 | |
28 | |
29 // Closure to set up the fingerprint field for all methods. | |
30 | |
31 class FingerprintMethodsClosure: public ObjectClosure { | |
32 public: | |
33 void do_object(oop obj) { | |
34 if (obj->is_method()) { | |
35 methodOop mobj = (methodOop)obj; | |
36 ResourceMark rm; | |
37 (new Fingerprinter(mobj))->fingerprint(); | |
38 } | |
39 } | |
40 }; | |
41 | |
42 | |
43 | |
44 // Closure to set the hash value (String.hash field) in all of the | |
45 // String objects in the heap. Setting the hash value is not required. | |
46 // However, setting the value in advance prevents the value from being | |
47 // written later, increasing the likelihood that the shared page contain | |
48 // the hash can be shared. | |
49 // | |
50 // NOTE THAT the algorithm in StringTable::hash_string() MUST MATCH the | |
51 // algorithm in java.lang.String.hashCode(). | |
52 | |
53 class StringHashCodeClosure: public OopClosure { | |
54 private: | |
55 Thread* THREAD; | |
56 int hash_offset; | |
57 public: | |
58 StringHashCodeClosure(Thread* t) { | |
59 THREAD = t; | |
60 hash_offset = java_lang_String::hash_offset_in_bytes(); | |
61 } | |
62 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
63 void do_oop(oop* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
64 if (p != NULL) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
65 oop obj = *p; |
0 | 66 if (obj->klass() == SystemDictionary::string_klass()) { |
67 | |
68 int hash; | |
69 typeArrayOop value = java_lang_String::value(obj); | |
70 int length = java_lang_String::length(obj); | |
71 if (length == 0) { | |
72 hash = 0; | |
73 } else { | |
74 int offset = java_lang_String::offset(obj); | |
75 jchar* s = value->char_at_addr(offset); | |
76 hash = StringTable::hash_string(s, length); | |
77 } | |
78 obj->int_field_put(hash_offset, hash); | |
79 } | |
80 } | |
81 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
82 void do_oop(narrowOop* p) { ShouldNotReachHere(); } |
0 | 83 }; |
84 | |
85 | |
86 // Remove data from objects which should not appear in the shared file | |
87 // (as it pertains only to the current JVM). | |
88 | |
89 class RemoveUnshareableInfoClosure : public ObjectClosure { | |
90 public: | |
91 void do_object(oop obj) { | |
92 // Zap data from the objects which is pertains only to this JVM. We | |
93 // want that data recreated in new JVMs when the shared file is used. | |
94 if (obj->is_method()) { | |
95 ((methodOop)obj)->remove_unshareable_info(); | |
96 } | |
97 else if (obj->is_klass()) { | |
98 Klass::cast((klassOop)obj)->remove_unshareable_info(); | |
99 } | |
100 | |
101 // Don't save compiler related special oops (shouldn't be any yet). | |
102 if (obj->is_methodData() || obj->is_compiledICHolder()) { | |
103 ShouldNotReachHere(); | |
104 } | |
105 } | |
106 }; | |
107 | |
108 | |
109 static bool mark_object(oop obj) { | |
110 if (obj != NULL && | |
111 !obj->is_shared() && | |
112 !obj->is_forwarded() && | |
113 !obj->is_gc_marked()) { | |
114 obj->set_mark(markOopDesc::prototype()->set_marked()); | |
115 return true; | |
116 } | |
117 | |
118 return false; | |
119 } | |
120 | |
121 // Closure: mark objects closure. | |
122 | |
123 class MarkObjectsOopClosure : public OopClosure { | |
124 public: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
125 void do_oop(oop* p) { mark_object(*p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
126 void do_oop(narrowOop* p) { ShouldNotReachHere(); } |
0 | 127 }; |
128 | |
129 | |
130 class MarkObjectsSkippingKlassesOopClosure : public OopClosure { | |
131 public: | |
132 void do_oop(oop* pobj) { | |
133 oop obj = *pobj; | |
134 if (obj != NULL && | |
135 !obj->is_klass()) { | |
136 mark_object(obj); | |
137 } | |
138 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
139 void do_oop(narrowOop* pobj) { ShouldNotReachHere(); } |
0 | 140 }; |
141 | |
142 | |
143 static void mark_object_recursive_skipping_klasses(oop obj) { | |
144 mark_object(obj); | |
145 if (obj != NULL) { | |
146 MarkObjectsSkippingKlassesOopClosure mark_all; | |
147 obj->oop_iterate(&mark_all); | |
148 } | |
149 } | |
150 | |
151 | |
152 // Closure: mark common read-only objects, excluding symbols | |
153 | |
154 class MarkCommonReadOnly : public ObjectClosure { | |
155 private: | |
156 MarkObjectsOopClosure mark_all; | |
157 public: | |
158 void do_object(oop obj) { | |
159 | |
160 // Mark all constMethod objects. | |
161 | |
162 if (obj->is_constMethod()) { | |
163 mark_object(obj); | |
164 mark_object(constMethodOop(obj)->stackmap_data()); | |
165 // Exception tables are needed by ci code during compilation. | |
166 mark_object(constMethodOop(obj)->exception_table()); | |
167 } | |
168 | |
169 // Mark objects referenced by klass objects which are read-only. | |
170 | |
171 else if (obj->is_klass()) { | |
172 Klass* k = Klass::cast((klassOop)obj); | |
173 mark_object(k->secondary_supers()); | |
174 | |
175 // The METHODS() OBJARRAYS CANNOT BE MADE READ-ONLY, even though | |
176 // it is never modified. Otherwise, they will be pre-marked; the | |
177 // GC marking phase will skip them; and by skipping them will fail | |
178 // to mark the methods objects referenced by the array. | |
179 | |
180 if (obj->blueprint()->oop_is_instanceKlass()) { | |
181 instanceKlass* ik = instanceKlass::cast((klassOop)obj); | |
182 mark_object(ik->method_ordering()); | |
183 mark_object(ik->local_interfaces()); | |
184 mark_object(ik->transitive_interfaces()); | |
185 mark_object(ik->fields()); | |
186 | |
187 mark_object(ik->class_annotations()); | |
188 | |
189 mark_object_recursive_skipping_klasses(ik->fields_annotations()); | |
190 mark_object_recursive_skipping_klasses(ik->methods_annotations()); | |
191 mark_object_recursive_skipping_klasses(ik->methods_parameter_annotations()); | |
192 mark_object_recursive_skipping_klasses(ik->methods_default_annotations()); | |
193 | |
194 typeArrayOop inner_classes = ik->inner_classes(); | |
195 if (inner_classes != NULL) { | |
196 mark_object(inner_classes); | |
197 } | |
198 } | |
199 } | |
200 } | |
201 }; | |
202 | |
203 | |
204 // Closure: mark common symbols | |
205 | |
206 class MarkCommonSymbols : public ObjectClosure { | |
207 private: | |
208 MarkObjectsOopClosure mark_all; | |
209 public: | |
210 void do_object(oop obj) { | |
211 | |
212 // Mark symbols refered to by method objects. | |
213 | |
214 if (obj->is_method()) { | |
215 methodOop m = methodOop(obj); | |
216 mark_object(m->name()); | |
217 mark_object(m->signature()); | |
218 } | |
219 | |
220 // Mark symbols referenced by klass objects which are read-only. | |
221 | |
222 else if (obj->is_klass()) { | |
223 | |
224 if (obj->blueprint()->oop_is_instanceKlass()) { | |
225 instanceKlass* ik = instanceKlass::cast((klassOop)obj); | |
226 mark_object(ik->name()); | |
227 mark_object(ik->generic_signature()); | |
228 mark_object(ik->source_file_name()); | |
229 mark_object(ik->source_debug_extension()); | |
230 | |
231 typeArrayOop inner_classes = ik->inner_classes(); | |
232 if (inner_classes != NULL) { | |
233 int length = inner_classes->length(); | |
234 for (int i = 0; | |
235 i < length; | |
236 i += instanceKlass::inner_class_next_offset) { | |
237 int ioff = i + instanceKlass::inner_class_inner_name_offset; | |
238 int index = inner_classes->ushort_at(ioff); | |
239 if (index != 0) { | |
240 mark_object(ik->constants()->symbol_at(index)); | |
241 } | |
242 } | |
243 } | |
244 ik->field_names_and_sigs_iterate(&mark_all); | |
245 } | |
246 } | |
247 | |
248 // Mark symbols referenced by other constantpool entries. | |
249 | |
250 if (obj->is_constantPool()) { | |
251 constantPoolOop(obj)->shared_symbols_iterate(&mark_all); | |
252 } | |
253 } | |
254 }; | |
255 | |
256 | |
257 // Closure: mark char arrays used by strings | |
258 | |
259 class MarkStringValues : public ObjectClosure { | |
260 private: | |
261 MarkObjectsOopClosure mark_all; | |
262 public: | |
263 void do_object(oop obj) { | |
264 | |
265 // Character arrays referenced by String objects are read-only. | |
266 | |
267 if (java_lang_String::is_instance(obj)) { | |
268 mark_object(java_lang_String::value(obj)); | |
269 } | |
270 } | |
271 }; | |
272 | |
273 | |
274 #ifdef DEBUG | |
275 // Closure: Check for objects left in the heap which have not been moved. | |
276 | |
277 class CheckRemainingObjects : public ObjectClosure { | |
278 private: | |
279 int count; | |
280 | |
281 public: | |
282 CheckRemainingObjects() { | |
283 count = 0; | |
284 } | |
285 | |
286 void do_object(oop obj) { | |
287 if (!obj->is_shared() && | |
288 !obj->is_forwarded()) { | |
289 ++count; | |
290 if (Verbose) { | |
291 tty->print("Unreferenced object: "); | |
292 obj->print_on(tty); | |
293 } | |
294 } | |
295 } | |
296 | |
297 void status() { | |
298 tty->print_cr("%d objects no longer referenced, not shared.", count); | |
299 } | |
300 }; | |
301 #endif | |
302 | |
303 | |
304 // Closure: Mark remaining objects read-write, except Strings. | |
305 | |
306 class MarkReadWriteObjects : public ObjectClosure { | |
307 private: | |
308 MarkObjectsOopClosure mark_objects; | |
309 public: | |
310 void do_object(oop obj) { | |
311 | |
312 // The METHODS() OBJARRAYS CANNOT BE MADE READ-ONLY, even though | |
313 // it is never modified. Otherwise, they will be pre-marked; the | |
314 // GC marking phase will skip them; and by skipping them will fail | |
315 // to mark the methods objects referenced by the array. | |
316 | |
317 if (obj->is_klass()) { | |
318 mark_object(obj); | |
319 Klass* k = klassOop(obj)->klass_part(); | |
320 mark_object(k->java_mirror()); | |
321 if (obj->blueprint()->oop_is_instanceKlass()) { | |
322 instanceKlass* ik = (instanceKlass*)k; | |
323 mark_object(ik->methods()); | |
324 mark_object(ik->constants()); | |
325 } | |
326 if (obj->blueprint()->oop_is_javaArray()) { | |
327 arrayKlass* ak = (arrayKlass*)k; | |
328 mark_object(ak->component_mirror()); | |
329 } | |
330 return; | |
331 } | |
332 | |
333 // Mark constantPool tags and the constantPoolCache. | |
334 | |
335 else if (obj->is_constantPool()) { | |
336 constantPoolOop pool = constantPoolOop(obj); | |
337 mark_object(pool->cache()); | |
338 pool->shared_tags_iterate(&mark_objects); | |
339 return; | |
340 } | |
341 | |
342 // Mark all method objects. | |
343 | |
344 if (obj->is_method()) { | |
345 mark_object(obj); | |
346 } | |
347 } | |
348 }; | |
349 | |
350 | |
351 // Closure: Mark String objects read-write. | |
352 | |
353 class MarkStringObjects : public ObjectClosure { | |
354 private: | |
355 MarkObjectsOopClosure mark_objects; | |
356 public: | |
357 void do_object(oop obj) { | |
358 | |
359 // Mark String objects referenced by constant pool entries. | |
360 | |
361 if (obj->is_constantPool()) { | |
362 constantPoolOop pool = constantPoolOop(obj); | |
363 pool->shared_strings_iterate(&mark_objects); | |
364 return; | |
365 } | |
366 } | |
367 }; | |
368 | |
369 | |
370 // Move objects matching specified type (ie. lock_bits) to the specified | |
371 // space. | |
372 | |
373 class MoveMarkedObjects : public ObjectClosure { | |
374 private: | |
375 OffsetTableContigSpace* _space; | |
376 bool _read_only; | |
377 | |
378 public: | |
379 MoveMarkedObjects(OffsetTableContigSpace* space, bool read_only) { | |
380 _space = space; | |
381 _read_only = read_only; | |
382 } | |
383 | |
384 void do_object(oop obj) { | |
385 if (obj->is_shared()) { | |
386 return; | |
387 } | |
388 if (obj->is_gc_marked() && obj->forwardee() == NULL) { | |
389 int s = obj->size(); | |
390 oop sh_obj = (oop)_space->allocate(s); | |
391 if (sh_obj == NULL) { | |
392 if (_read_only) { | |
393 warning("\nThe permanent generation read only space is not large " | |
394 "enough to \npreload requested classes. Use " | |
395 "-XX:SharedReadOnlySize= to increase \nthe initial " | |
396 "size of the read only space.\n"); | |
397 } else { | |
398 warning("\nThe permanent generation read write space is not large " | |
399 "enough to \npreload requested classes. Use " | |
400 "-XX:SharedReadWriteSize= to increase \nthe initial " | |
401 "size of the read write space.\n"); | |
402 } | |
403 exit(2); | |
404 } | |
405 if (PrintSharedSpaces && Verbose && WizardMode) { | |
406 tty->print_cr("\nMoveMarkedObjects: " PTR_FORMAT " -> " PTR_FORMAT " %s", obj, sh_obj, | |
407 (_read_only ? "ro" : "rw")); | |
408 } | |
409 Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)sh_obj, s); | |
410 obj->forward_to(sh_obj); | |
411 if (_read_only) { | |
412 // Readonly objects: set hash value to self pointer and make gc_marked. | |
413 sh_obj->forward_to(sh_obj); | |
414 } else { | |
415 sh_obj->init_mark(); | |
416 } | |
417 } | |
418 } | |
419 }; | |
420 | |
421 static void mark_and_move(oop obj, MoveMarkedObjects* move) { | |
422 if (mark_object(obj)) move->do_object(obj); | |
423 } | |
424 | |
425 enum order_policy { | |
426 OP_favor_startup = 0, | |
427 OP_balanced = 1, | |
428 OP_favor_runtime = 2 | |
429 }; | |
430 | |
431 static void mark_and_move_for_policy(order_policy policy, oop obj, MoveMarkedObjects* move) { | |
432 if (SharedOptimizeColdStartPolicy >= policy) mark_and_move(obj, move); | |
433 } | |
434 | |
435 class MarkAndMoveOrderedReadOnly : public ObjectClosure { | |
436 private: | |
437 MoveMarkedObjects *_move_ro; | |
438 | |
439 public: | |
440 MarkAndMoveOrderedReadOnly(MoveMarkedObjects *move_ro) : _move_ro(move_ro) {} | |
441 | |
442 void do_object(oop obj) { | |
443 if (obj->is_klass() && obj->blueprint()->oop_is_instanceKlass()) { | |
444 instanceKlass* ik = instanceKlass::cast((klassOop)obj); | |
445 int i; | |
446 | |
447 mark_and_move_for_policy(OP_favor_startup, ik->name(), _move_ro); | |
448 | |
449 if (ik->super() != NULL) { | |
450 do_object(ik->super()); | |
451 } | |
452 | |
453 objArrayOop interfaces = ik->local_interfaces(); | |
454 mark_and_move_for_policy(OP_favor_startup, interfaces, _move_ro); | |
455 for(i = 0; i < interfaces->length(); i++) { | |
456 klassOop k = klassOop(interfaces->obj_at(i)); | |
457 mark_and_move_for_policy(OP_favor_startup, k->klass_part()->name(), _move_ro); | |
458 do_object(k); | |
459 } | |
460 | |
461 objArrayOop methods = ik->methods(); | |
462 for(i = 0; i < methods->length(); i++) { | |
463 methodOop m = methodOop(methods->obj_at(i)); | |
464 mark_and_move_for_policy(OP_favor_startup, m->constMethod(), _move_ro); | |
465 mark_and_move_for_policy(OP_favor_runtime, m->constMethod()->exception_table(), _move_ro); | |
466 mark_and_move_for_policy(OP_favor_runtime, m->constMethod()->stackmap_data(), _move_ro); | |
467 | |
468 // We don't move the name symbolOop here because it may invalidate | |
469 // method ordering, which is dependent on the address of the name | |
470 // symbolOop. It will get promoted later with the other symbols. | |
471 // Method name is rarely accessed during classloading anyway. | |
472 // mark_and_move_for_policy(OP_balanced, m->name(), _move_ro); | |
473 | |
474 mark_and_move_for_policy(OP_favor_startup, m->signature(), _move_ro); | |
475 } | |
476 | |
477 mark_and_move_for_policy(OP_favor_startup, ik->transitive_interfaces(), _move_ro); | |
478 mark_and_move_for_policy(OP_favor_startup, ik->fields(), _move_ro); | |
479 | |
480 mark_and_move_for_policy(OP_favor_runtime, ik->secondary_supers(), _move_ro); | |
481 mark_and_move_for_policy(OP_favor_runtime, ik->method_ordering(), _move_ro); | |
482 mark_and_move_for_policy(OP_favor_runtime, ik->class_annotations(), _move_ro); | |
483 mark_and_move_for_policy(OP_favor_runtime, ik->fields_annotations(), _move_ro); | |
484 mark_and_move_for_policy(OP_favor_runtime, ik->methods_annotations(), _move_ro); | |
485 mark_and_move_for_policy(OP_favor_runtime, ik->methods_parameter_annotations(), _move_ro); | |
486 mark_and_move_for_policy(OP_favor_runtime, ik->methods_default_annotations(), _move_ro); | |
487 mark_and_move_for_policy(OP_favor_runtime, ik->inner_classes(), _move_ro); | |
488 mark_and_move_for_policy(OP_favor_runtime, ik->secondary_supers(), _move_ro); | |
489 } | |
490 } | |
491 }; | |
492 | |
493 class MarkAndMoveOrderedReadWrite: public ObjectClosure { | |
494 private: | |
495 MoveMarkedObjects *_move_rw; | |
496 | |
497 public: | |
498 MarkAndMoveOrderedReadWrite(MoveMarkedObjects *move_rw) : _move_rw(move_rw) {} | |
499 | |
500 void do_object(oop obj) { | |
501 if (obj->is_klass() && obj->blueprint()->oop_is_instanceKlass()) { | |
502 instanceKlass* ik = instanceKlass::cast((klassOop)obj); | |
503 int i; | |
504 | |
505 mark_and_move_for_policy(OP_favor_startup, ik->as_klassOop(), _move_rw); | |
506 | |
507 if (ik->super() != NULL) { | |
508 do_object(ik->super()); | |
509 } | |
510 | |
511 objArrayOop interfaces = ik->local_interfaces(); | |
512 for(i = 0; i < interfaces->length(); i++) { | |
513 klassOop k = klassOop(interfaces->obj_at(i)); | |
514 mark_and_move_for_policy(OP_favor_startup, k, _move_rw); | |
515 do_object(k); | |
516 } | |
517 | |
518 objArrayOop methods = ik->methods(); | |
519 mark_and_move_for_policy(OP_favor_startup, methods, _move_rw); | |
520 for(i = 0; i < methods->length(); i++) { | |
521 methodOop m = methodOop(methods->obj_at(i)); | |
522 mark_and_move_for_policy(OP_favor_startup, m, _move_rw); | |
523 mark_and_move_for_policy(OP_favor_startup, ik->constants(), _move_rw); // idempotent | |
524 mark_and_move_for_policy(OP_balanced, ik->constants()->cache(), _move_rw); // idempotent | |
525 mark_and_move_for_policy(OP_balanced, ik->constants()->tags(), _move_rw); // idempotent | |
526 } | |
527 | |
528 mark_and_move_for_policy(OP_favor_startup, ik->as_klassOop()->klass(), _move_rw); | |
529 mark_and_move_for_policy(OP_favor_startup, ik->constants()->klass(), _move_rw); | |
530 | |
531 // Although Java mirrors are marked in MarkReadWriteObjects, | |
532 // apparently they were never moved into shared spaces since | |
533 // MoveMarkedObjects skips marked instance oops. This may | |
534 // be a bug in the original implementation or simply the vestige | |
535 // of an abandoned experiment. Nevertheless we leave a hint | |
536 // here in case this capability is ever correctly implemented. | |
537 // | |
538 // mark_and_move_for_policy(OP_favor_runtime, ik->java_mirror(), _move_rw); | |
539 } | |
540 } | |
541 | |
542 }; | |
543 | |
544 // Adjust references in oops to refer to shared spaces. | |
545 | |
546 class ResolveForwardingClosure: public OopClosure { | |
547 public: | |
548 void do_oop(oop* p) { | |
549 oop obj = *p; | |
550 if (!obj->is_shared()) { | |
551 if (obj != NULL) { | |
552 oop f = obj->forwardee(); | |
553 guarantee(f->is_shared(), "Oop doesn't refer to shared space."); | |
554 *p = f; | |
555 } | |
556 } | |
557 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
558 void do_oop(narrowOop* pobj) { ShouldNotReachHere(); } |
0 | 559 }; |
560 | |
561 | |
562 void sort_methods(instanceKlass* ik, TRAPS) { | |
563 klassOop super = ik->super(); | |
564 if (super != NULL) { | |
565 sort_methods(instanceKlass::cast(super), THREAD); | |
566 } | |
567 | |
568 // The methods array must be ordered by symbolOop address. (See | |
569 // classFileParser.cpp where methods in a class are originally | |
570 // sorted.) Since objects have just be reordered, this must be | |
571 // corrected. | |
572 methodOopDesc::sort_methods(ik->methods(), | |
573 ik->methods_annotations(), | |
574 ik->methods_parameter_annotations(), | |
575 ik->methods_default_annotations(), | |
576 true /* idempotent, slow */); | |
577 | |
578 // Itable indices are calculated based on methods array order | |
579 // (see klassItable::compute_itable_index()). Must reinitialize. | |
580 // We assume that since checkconstraints is false, this method | |
581 // cannot throw an exception. An exception here would be | |
582 // problematic since this is the VMThread, not a JavaThread. | |
583 ik->itable()->initialize_itable(false, THREAD); | |
584 } | |
585 | |
586 // Sort methods if the oop is an instanceKlass. | |
587 | |
588 class SortMethodsClosure: public ObjectClosure { | |
589 private: | |
590 Thread* _thread; | |
591 | |
592 public: | |
593 SortMethodsClosure(Thread* thread) : _thread(thread) {} | |
594 | |
595 void do_object(oop obj) { | |
596 // instanceKlass objects need some adjustment. | |
597 if (obj->blueprint()->oop_is_instanceKlass()) { | |
598 instanceKlass* ik = instanceKlass::cast((klassOop)obj); | |
599 | |
600 sort_methods(ik, _thread); | |
601 } | |
602 } | |
603 }; | |
604 | |
605 | |
606 // Adjust references in oops to refer to shared spaces. | |
607 | |
608 class PatchOopsClosure: public ObjectClosure { | |
609 private: | |
610 Thread* _thread; | |
611 ResolveForwardingClosure resolve; | |
612 | |
613 public: | |
614 PatchOopsClosure(Thread* thread) : _thread(thread) {} | |
615 | |
616 void do_object(oop obj) { | |
617 obj->oop_iterate_header(&resolve); | |
618 obj->oop_iterate(&resolve); | |
619 | |
620 assert(obj->klass()->is_shared(), "Klass not pointing into shared space."); | |
621 | |
622 // If the object is a Java object or class which might (in the | |
623 // future) contain a reference to a young gen object, add it to the | |
624 // list. | |
625 | |
626 if (obj->is_klass() || obj->is_instance()) { | |
627 if (obj->is_klass() || | |
628 obj->is_a(SystemDictionary::class_klass()) || | |
629 obj->is_a(SystemDictionary::throwable_klass())) { | |
630 // Do nothing | |
631 } | |
632 else if (obj->is_a(SystemDictionary::string_klass())) { | |
633 // immutable objects. | |
634 } else { | |
635 // someone added an object we hadn't accounted for. | |
636 ShouldNotReachHere(); | |
637 } | |
638 } | |
639 } | |
640 }; | |
641 | |
642 | |
643 // Empty the young and old generations. | |
644 | |
645 class ClearSpaceClosure : public SpaceClosure { | |
646 public: | |
647 void do_space(Space* s) { | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
648 s->clear(SpaceDecorator::Mangle); |
0 | 649 } |
650 }; | |
651 | |
652 | |
653 // Closure for serializing initialization data out to a data area to be | |
654 // written to the shared file. | |
655 | |
656 class WriteClosure : public SerializeOopClosure { | |
657 private: | |
658 oop* top; | |
659 char* end; | |
660 | |
661 void out_of_space() { | |
662 warning("\nThe shared miscellaneous data space is not large " | |
663 "enough to \npreload requested classes. Use " | |
664 "-XX:SharedMiscDataSize= to increase \nthe initial " | |
665 "size of the miscellaneous data space.\n"); | |
666 exit(2); | |
667 } | |
668 | |
669 | |
670 inline void check_space() { | |
671 if ((char*)top + sizeof(oop) > end) { | |
672 out_of_space(); | |
673 } | |
674 } | |
675 | |
676 | |
677 public: | |
678 WriteClosure(char* md_top, char* md_end) { | |
679 top = (oop*)md_top; | |
680 end = md_end; | |
681 } | |
682 | |
683 char* get_top() { return (char*)top; } | |
684 | |
685 void do_oop(oop* p) { | |
686 check_space(); | |
687 oop obj = *p; | |
688 assert(obj->is_oop_or_null(), "invalid oop"); | |
689 assert(obj == NULL || obj->is_shared(), | |
690 "Oop in shared space not pointing into shared space."); | |
691 *top = obj; | |
692 ++top; | |
693 } | |
694 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
695 void do_oop(narrowOop* pobj) { ShouldNotReachHere(); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
696 |
0 | 697 void do_int(int* p) { |
698 check_space(); | |
699 *top = (oop)(intptr_t)*p; | |
700 ++top; | |
701 } | |
702 | |
703 void do_size_t(size_t* p) { | |
704 check_space(); | |
705 *top = (oop)(intptr_t)*p; | |
706 ++top; | |
707 } | |
708 | |
709 void do_ptr(void** p) { | |
710 check_space(); | |
711 *top = (oop)*p; | |
712 ++top; | |
713 } | |
714 | |
715 void do_ptr(HeapWord** p) { do_ptr((void **) p); } | |
716 | |
717 void do_tag(int tag) { | |
718 check_space(); | |
719 *top = (oop)(intptr_t)tag; | |
720 ++top; | |
721 } | |
722 | |
723 void do_region(u_char* start, size_t size) { | |
724 if ((char*)top + size > end) { | |
725 out_of_space(); | |
726 } | |
727 assert((intptr_t)start % sizeof(oop) == 0, "bad alignment"); | |
728 assert(size % sizeof(oop) == 0, "bad size"); | |
729 do_tag((int)size); | |
730 while (size > 0) { | |
731 *top = *(oop*)start; | |
732 ++top; | |
733 start += sizeof(oop); | |
734 size -= sizeof(oop); | |
735 } | |
736 } | |
737 | |
738 bool reading() const { return false; } | |
739 }; | |
740 | |
741 | |
742 class ResolveConstantPoolsClosure : public ObjectClosure { | |
743 private: | |
744 TRAPS; | |
745 public: | |
746 ResolveConstantPoolsClosure(Thread *t) { | |
747 __the_thread__ = t; | |
748 } | |
749 void do_object(oop obj) { | |
750 if (obj->is_constantPool()) { | |
751 constantPoolOop cpool = (constantPoolOop)obj; | |
752 int unresolved = cpool->pre_resolve_shared_klasses(THREAD); | |
753 } | |
754 } | |
755 }; | |
756 | |
757 | |
758 // Print a summary of the contents of the read/write spaces to help | |
759 // identify objects which might be able to be made read-only. At this | |
760 // point, the objects have been written, and we can trash them as | |
761 // needed. | |
762 | |
763 static void print_contents() { | |
764 if (PrintSharedSpaces) { | |
765 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
766 CompactingPermGenGen* gen = (CompactingPermGenGen*)gch->perm_gen(); | |
767 | |
768 // High level summary of the read-only space: | |
769 | |
770 ClassifyObjectClosure coc; | |
771 tty->cr(); tty->print_cr("ReadOnly space:"); | |
772 gen->ro_space()->object_iterate(&coc); | |
773 coc.print(); | |
774 | |
775 // High level summary of the read-write space: | |
776 | |
777 coc.reset(); | |
778 tty->cr(); tty->print_cr("ReadWrite space:"); | |
779 gen->rw_space()->object_iterate(&coc); | |
780 coc.print(); | |
781 | |
782 // Reset counters | |
783 | |
784 ClearAllocCountClosure cacc; | |
785 gen->ro_space()->object_iterate(&cacc); | |
786 gen->rw_space()->object_iterate(&cacc); | |
787 coc.reset(); | |
788 | |
789 // Lower level summary of the read-only space: | |
790 | |
791 gen->ro_space()->object_iterate(&coc); | |
792 tty->cr(); tty->print_cr("ReadOnly space:"); | |
793 ClassifyInstanceKlassClosure cikc; | |
794 gen->rw_space()->object_iterate(&cikc); | |
795 cikc.print(); | |
796 | |
797 // Reset counters | |
798 | |
799 gen->ro_space()->object_iterate(&cacc); | |
800 gen->rw_space()->object_iterate(&cacc); | |
801 coc.reset(); | |
802 | |
803 // Lower level summary of the read-write space: | |
804 | |
805 gen->rw_space()->object_iterate(&coc); | |
806 cikc.reset(); | |
807 tty->cr(); tty->print_cr("ReadWrite space:"); | |
808 gen->rw_space()->object_iterate(&cikc); | |
809 cikc.print(); | |
810 } | |
811 } | |
812 | |
813 | |
814 // Patch C++ vtable pointer in klass oops. | |
815 | |
816 // Klass objects contain references to c++ vtables in the JVM library. | |
817 // Fix them to point to our constructed vtables. However, don't iterate | |
818 // across the space while doing this, as that causes the vtables to be | |
819 // patched, undoing our useful work. Instead, iterate to make a list, | |
820 // then use the list to do the fixing. | |
408 | 821 // |
822 // Our constructed vtables: | |
823 // Dump time: | |
824 // 1. init_self_patching_vtbl_list: table of pointers to current virtual method addrs | |
825 // 2. generate_vtable_methods: create jump table, appended to above vtbl_list | |
826 // 3. PatchKlassVtables: for Klass list, patch the vtable entry to point to jump table | |
827 // rather than to current vtbl | |
828 // Table layout: NOTE FIXED SIZE | |
829 // 1. vtbl pointers | |
830 // 2. #Klass X #virtual methods per Klass | |
831 // 1 entry for each, in the order: | |
832 // Klass1:method1 entry, Klass1:method2 entry, ... Klass1:method<num_virtuals> entry | |
833 // Klass2:method1 entry, Klass2:method2 entry, ... Klass2:method<num_virtuals> entry | |
834 // ... | |
835 // Klass<vtbl_list_size>:method1 entry, Klass<vtbl_list_size>:method2 entry, | |
836 // ... Klass<vtbl_list_size>:method<num_virtuals> entry | |
837 // Sample entry: (Sparc): | |
838 // save(sp, -256, sp) | |
839 // ba,pt common_code | |
840 // mov XXX, %L0 %L0 gets: Klass index <<8 + method index (note: max method index 255) | |
841 // | |
842 // Restore time: | |
843 // 1. initialize_oops: reserve space for table | |
844 // 2. init_self_patching_vtbl_list: update pointers to NEW virtual method addrs in text | |
845 // | |
846 // Execution time: | |
847 // First virtual method call for any object of these Klass types: | |
848 // 1. object->klass->klass_part | |
849 // 2. vtable entry for that klass_part points to the jump table entries | |
850 // 3. branches to common_code with %O0/klass_part, %L0: Klass index <<8 + method index | |
851 // 4. common_code: | |
852 // Get address of new vtbl pointer for this Klass from updated table | |
853 // Update new vtbl pointer in the Klass: future virtual calls go direct | |
854 // Jump to method, using new vtbl pointer and method index | |
0 | 855 |
856 class PatchKlassVtables: public ObjectClosure { | |
857 private: | |
858 void* _vtbl_ptr; | |
859 VirtualSpace* _md_vs; | |
860 GrowableArray<klassOop>* _klass_objects; | |
861 | |
862 public: | |
863 | |
864 PatchKlassVtables(void* vtbl_ptr, VirtualSpace* md_vs) { | |
865 _vtbl_ptr = vtbl_ptr; | |
866 _md_vs = md_vs; | |
867 _klass_objects = new GrowableArray<klassOop>(); | |
868 } | |
869 | |
870 | |
871 void do_object(oop obj) { | |
872 if (obj->is_klass()) { | |
873 _klass_objects->append(klassOop(obj)); | |
874 } | |
875 } | |
876 | |
877 | |
878 void patch(void** vtbl_list, int vtbl_list_size) { | |
879 for (int i = 0; i < _klass_objects->length(); ++i) { | |
880 klassOop obj = (klassOop)_klass_objects->at(i); | |
881 Klass* k = obj->klass_part(); | |
882 void* v = *(void**)k; | |
883 | |
884 int n; | |
885 for (n = 0; n < vtbl_list_size; ++n) { | |
886 *(void**)k = NULL; | |
887 if (vtbl_list[n] == v) { | |
888 *(void**)k = (void**)_vtbl_ptr + | |
889 (n * CompactingPermGenGen::num_virtuals); | |
890 break; | |
891 } | |
892 } | |
893 guarantee(n < vtbl_list_size, "unable to find matching vtbl pointer"); | |
894 } | |
895 } | |
896 }; | |
897 | |
898 | |
899 // Populate the shared space. | |
900 | |
901 class VM_PopulateDumpSharedSpace: public VM_Operation { | |
902 private: | |
903 GrowableArray<oop> *_class_promote_order; | |
904 OffsetTableContigSpace* _ro_space; | |
905 OffsetTableContigSpace* _rw_space; | |
906 VirtualSpace* _md_vs; | |
907 VirtualSpace* _mc_vs; | |
908 | |
909 public: | |
910 VM_PopulateDumpSharedSpace(GrowableArray<oop> *class_promote_order, | |
911 OffsetTableContigSpace* ro_space, | |
912 OffsetTableContigSpace* rw_space, | |
913 VirtualSpace* md_vs, VirtualSpace* mc_vs) { | |
914 _class_promote_order = class_promote_order; | |
915 _ro_space = ro_space; | |
916 _rw_space = rw_space; | |
917 _md_vs = md_vs; | |
918 _mc_vs = mc_vs; | |
919 } | |
920 | |
921 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } | |
922 void doit() { | |
923 Thread* THREAD = VMThread::vm_thread(); | |
924 NOT_PRODUCT(SystemDictionary::verify();) | |
925 // The following guarantee is meant to ensure that no loader constraints | |
926 // exist yet, since the constraints table is not shared. This becomes | |
927 // more important now that we don't re-initialize vtables/itables for | |
928 // shared classes at runtime, where constraints were previously created. | |
929 guarantee(SystemDictionary::constraints()->number_of_entries() == 0, | |
930 "loader constraints are not saved"); | |
710 | 931 // Revisit and implement this if we prelink method handle call sites: |
714 | 932 guarantee(SystemDictionary::invoke_method_table() == NULL || |
933 SystemDictionary::invoke_method_table()->number_of_entries() == 0, | |
710 | 934 "invoke method table is not saved"); |
0 | 935 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
936 | |
937 // At this point, many classes have been loaded. | |
938 | |
939 // Update all the fingerprints in the shared methods. | |
940 | |
941 tty->print("Calculating fingerprints ... "); | |
942 FingerprintMethodsClosure fpmc; | |
943 gch->object_iterate(&fpmc); | |
944 tty->print_cr("done. "); | |
945 | |
946 // Remove all references outside the heap. | |
947 | |
948 tty->print("Removing unshareable information ... "); | |
949 RemoveUnshareableInfoClosure ruic; | |
950 gch->object_iterate(&ruic); | |
951 tty->print_cr("done. "); | |
952 | |
953 // Move the objects in three passes. | |
954 | |
955 MarkObjectsOopClosure mark_all; | |
956 MarkCommonReadOnly mark_common_ro; | |
957 MarkCommonSymbols mark_common_symbols; | |
958 MarkStringValues mark_string_values; | |
959 MarkReadWriteObjects mark_rw; | |
960 MarkStringObjects mark_strings; | |
961 MoveMarkedObjects move_ro(_ro_space, true); | |
962 MoveMarkedObjects move_rw(_rw_space, false); | |
963 | |
964 // The SharedOptimizeColdStart VM option governs the new layout | |
965 // algorithm for promoting classes into the shared archive. | |
966 // The general idea is to minimize cold start time by laying | |
967 // out the objects in the order they are accessed at startup time. | |
968 // By doing this we are trying to eliminate out-of-order accesses | |
969 // in the shared archive. This benefits cold startup time by making | |
970 // disk reads as sequential as possible during class loading and | |
971 // bootstrapping activities. There may also be a small secondary | |
972 // effect of better "packing" of more commonly used data on a smaller | |
973 // number of pages, although no direct benefit has been measured from | |
974 // this effect. | |
975 // | |
976 // At the class level of granularity, the promotion order is dictated | |
977 // by the classlist file whose generation is discussed elsewhere. | |
978 // | |
979 // At smaller granularity, optimal ordering was determined by an | |
980 // offline analysis of object access order in the shared archive. | |
981 // The dbx watchpoint facility, combined with SA post-processing, | |
982 // was used to observe common access patterns primarily during | |
983 // classloading. This information was used to craft the promotion | |
984 // order seen in the following closures. | |
985 // | |
986 // The observed access order is mostly governed by what happens | |
987 // in SystemDictionary::load_shared_class(). NOTE WELL - care | |
988 // should be taken when making changes to this method, because it | |
989 // may invalidate assumptions made about access order! | |
990 // | |
991 // (Ideally, there would be a better way to manage changes to | |
992 // the access order. Unfortunately a generic in-VM solution for | |
993 // dynamically observing access order and optimizing shared | |
994 // archive layout is pretty difficult. We go with the static | |
995 // analysis because the code is fairly mature at this point | |
996 // and we're betting that the access order won't change much.) | |
997 | |
998 MarkAndMoveOrderedReadOnly mark_and_move_ordered_ro(&move_ro); | |
999 MarkAndMoveOrderedReadWrite mark_and_move_ordered_rw(&move_rw); | |
1000 | |
1001 // Phase 1a: move commonly used read-only objects to the read-only space. | |
1002 | |
1003 if (SharedOptimizeColdStart) { | |
1004 tty->print("Moving pre-ordered read-only objects to shared space at " PTR_FORMAT " ... ", | |
1005 _ro_space->top()); | |
1006 for (int i = 0; i < _class_promote_order->length(); i++) { | |
1007 oop obj = _class_promote_order->at(i); | |
1008 mark_and_move_ordered_ro.do_object(obj); | |
1009 } | |
1010 tty->print_cr("done. "); | |
1011 } | |
1012 | |
1013 tty->print("Moving read-only objects to shared space at " PTR_FORMAT " ... ", | |
1014 _ro_space->top()); | |
1015 gch->object_iterate(&mark_common_ro); | |
1016 gch->object_iterate(&move_ro); | |
1017 tty->print_cr("done. "); | |
1018 | |
1019 // Phase 1b: move commonly used symbols to the read-only space. | |
1020 | |
1021 tty->print("Moving common symbols to shared space at " PTR_FORMAT " ... ", | |
1022 _ro_space->top()); | |
1023 gch->object_iterate(&mark_common_symbols); | |
1024 gch->object_iterate(&move_ro); | |
1025 tty->print_cr("done. "); | |
1026 | |
1027 // Phase 1c: move remaining symbols to the read-only space | |
1028 // (e.g. String initializers). | |
1029 | |
1030 tty->print("Moving remaining symbols to shared space at " PTR_FORMAT " ... ", | |
1031 _ro_space->top()); | |
1032 vmSymbols::oops_do(&mark_all, true); | |
1033 gch->object_iterate(&move_ro); | |
1034 tty->print_cr("done. "); | |
1035 | |
1036 // Phase 1d: move String character arrays to the read-only space. | |
1037 | |
1038 tty->print("Moving string char arrays to shared space at " PTR_FORMAT " ... ", | |
1039 _ro_space->top()); | |
1040 gch->object_iterate(&mark_string_values); | |
1041 gch->object_iterate(&move_ro); | |
1042 tty->print_cr("done. "); | |
1043 | |
1044 // Phase 2: move all remaining symbols to the read-only space. The | |
1045 // remaining symbols are assumed to be string initializers no longer | |
1046 // referenced. | |
1047 | |
1048 void* extra_symbols = _ro_space->top(); | |
1049 tty->print("Moving additional symbols to shared space at " PTR_FORMAT " ... ", | |
1050 _ro_space->top()); | |
1051 SymbolTable::oops_do(&mark_all); | |
1052 gch->object_iterate(&move_ro); | |
1053 tty->print_cr("done. "); | |
1054 tty->print_cr("Read-only space ends at " PTR_FORMAT ", %d bytes.", | |
1055 _ro_space->top(), _ro_space->used()); | |
1056 | |
1057 // Phase 3: move read-write objects to the read-write space, except | |
1058 // Strings. | |
1059 | |
1060 if (SharedOptimizeColdStart) { | |
1061 tty->print("Moving pre-ordered read-write objects to shared space at " PTR_FORMAT " ... ", | |
1062 _rw_space->top()); | |
1063 for (int i = 0; i < _class_promote_order->length(); i++) { | |
1064 oop obj = _class_promote_order->at(i); | |
1065 mark_and_move_ordered_rw.do_object(obj); | |
1066 } | |
1067 tty->print_cr("done. "); | |
1068 } | |
1069 tty->print("Moving read-write objects to shared space at " PTR_FORMAT " ... ", | |
1070 _rw_space->top()); | |
1071 Universe::oops_do(&mark_all, true); | |
1072 SystemDictionary::oops_do(&mark_all); | |
1073 oop tmp = Universe::arithmetic_exception_instance(); | |
1074 mark_object(java_lang_Throwable::message(tmp)); | |
1075 gch->object_iterate(&mark_rw); | |
1076 gch->object_iterate(&move_rw); | |
1077 tty->print_cr("done. "); | |
1078 | |
1079 // Phase 4: move String objects to the read-write space. | |
1080 | |
1081 tty->print("Moving String objects to shared space at " PTR_FORMAT " ... ", | |
1082 _rw_space->top()); | |
1083 StringTable::oops_do(&mark_all); | |
1084 gch->object_iterate(&mark_strings); | |
1085 gch->object_iterate(&move_rw); | |
1086 tty->print_cr("done. "); | |
1087 tty->print_cr("Read-write space ends at " PTR_FORMAT ", %d bytes.", | |
1088 _rw_space->top(), _rw_space->used()); | |
1089 | |
1090 #ifdef DEBUG | |
1091 // Check: scan for objects which were not moved. | |
1092 | |
1093 CheckRemainingObjects check_objects; | |
1094 gch->object_iterate(&check_objects); | |
1095 check_objects.status(); | |
1096 #endif | |
1097 | |
1098 // Resolve forwarding in objects and saved C++ structures | |
1099 tty->print("Updating references to shared objects ... "); | |
1100 ResolveForwardingClosure resolve; | |
1101 Universe::oops_do(&resolve); | |
1102 SystemDictionary::oops_do(&resolve); | |
1103 StringTable::oops_do(&resolve); | |
1104 SymbolTable::oops_do(&resolve); | |
1105 vmSymbols::oops_do(&resolve); | |
1106 | |
1107 // Set up the share data and shared code segments. | |
1108 | |
1109 char* md_top = _md_vs->low(); | |
1110 char* md_end = _md_vs->high(); | |
1111 char* mc_top = _mc_vs->low(); | |
1112 char* mc_end = _mc_vs->high(); | |
1113 | |
1114 // Reserve space for the list of klassOops whose vtables are used | |
1115 // for patching others as needed. | |
1116 | |
1117 void** vtbl_list = (void**)md_top; | |
1118 int vtbl_list_size = CompactingPermGenGen::vtbl_list_size; | |
1119 Universe::init_self_patching_vtbl_list(vtbl_list, vtbl_list_size); | |
1120 | |
1121 md_top += vtbl_list_size * sizeof(void*); | |
1122 void* vtable = md_top; | |
1123 | |
1124 // Reserve space for a new dummy vtable for klass objects in the | |
1125 // heap. Generate self-patching vtable entries. | |
1126 | |
1127 CompactingPermGenGen::generate_vtable_methods(vtbl_list, | |
1128 &vtable, | |
1129 &md_top, md_end, | |
1130 &mc_top, mc_end); | |
1131 | |
1132 // Fix (forward) all of the references in these shared objects (which | |
1133 // are required to point ONLY to objects in the shared spaces). | |
1134 // Also, create a list of all objects which might later contain a | |
1135 // reference to a younger generation object. | |
1136 | |
1137 CompactingPermGenGen* gen = (CompactingPermGenGen*)gch->perm_gen(); | |
1138 PatchOopsClosure patch(THREAD); | |
1139 gen->ro_space()->object_iterate(&patch); | |
1140 gen->rw_space()->object_iterate(&patch); | |
1141 | |
1142 // Previously method sorting was done concurrently with forwarding | |
1143 // pointer resolution in the shared spaces. This imposed an ordering | |
1144 // restriction in that methods were required to be promoted/patched | |
1145 // before their holder classes. (Because constant pool pointers in | |
1146 // methodKlasses are required to be resolved before their holder class | |
1147 // is visited for sorting, otherwise methods are sorted by incorrect, | |
1148 // pre-forwarding addresses.) | |
1149 // | |
1150 // Now, we reorder methods as a separate step after ALL forwarding | |
1151 // pointer resolution, so that methods can be promoted in any order | |
1152 // with respect to their holder classes. | |
1153 | |
1154 SortMethodsClosure sort(THREAD); | |
1155 gen->ro_space()->object_iterate(&sort); | |
1156 gen->rw_space()->object_iterate(&sort); | |
1157 tty->print_cr("done. "); | |
1158 tty->cr(); | |
1159 | |
1160 // Reorder the system dictionary. (Moving the symbols opps affects | |
1161 // how the hash table indices are calculated.) | |
1162 | |
1163 SystemDictionary::reorder_dictionary(); | |
1164 | |
1165 // Empty the non-shared heap (because most of the objects were | |
1166 // copied out, and the remainder cannot be considered valid oops). | |
1167 | |
1168 ClearSpaceClosure csc; | |
1169 for (int i = 0; i < gch->n_gens(); ++i) { | |
1170 gch->get_gen(i)->space_iterate(&csc); | |
1171 } | |
1172 csc.do_space(gen->the_space()); | |
1173 NOT_PRODUCT(SystemDictionary::verify();) | |
1174 | |
1175 // Copy the String table, the symbol table, and the system | |
1176 // dictionary to the shared space in usable form. Copy the hastable | |
1177 // buckets first [read-write], then copy the linked lists of entries | |
1178 // [read-only]. | |
1179 | |
1180 SymbolTable::reverse(extra_symbols); | |
1181 NOT_PRODUCT(SymbolTable::verify()); | |
1182 SymbolTable::copy_buckets(&md_top, md_end); | |
1183 | |
1184 StringTable::reverse(); | |
1185 NOT_PRODUCT(StringTable::verify()); | |
1186 StringTable::copy_buckets(&md_top, md_end); | |
1187 | |
1188 SystemDictionary::reverse(); | |
1189 SystemDictionary::copy_buckets(&md_top, md_end); | |
1190 | |
1191 ClassLoader::verify(); | |
1192 ClassLoader::copy_package_info_buckets(&md_top, md_end); | |
1193 ClassLoader::verify(); | |
1194 | |
1195 SymbolTable::copy_table(&md_top, md_end); | |
1196 StringTable::copy_table(&md_top, md_end); | |
1197 SystemDictionary::copy_table(&md_top, md_end); | |
1198 ClassLoader::verify(); | |
1199 ClassLoader::copy_package_info_table(&md_top, md_end); | |
1200 ClassLoader::verify(); | |
1201 | |
1202 // Print debug data. | |
1203 | |
1204 if (PrintSharedSpaces) { | |
1205 const char* fmt = "%s space: " PTR_FORMAT " out of " PTR_FORMAT " bytes allocated at " PTR_FORMAT "."; | |
1206 tty->print_cr(fmt, "ro", _ro_space->used(), _ro_space->capacity(), | |
1207 _ro_space->bottom()); | |
1208 tty->print_cr(fmt, "rw", _rw_space->used(), _rw_space->capacity(), | |
1209 _rw_space->bottom()); | |
1210 } | |
1211 | |
1212 // Write the oop data to the output array. | |
1213 | |
1214 WriteClosure wc(md_top, md_end); | |
1215 CompactingPermGenGen::serialize_oops(&wc); | |
1216 md_top = wc.get_top(); | |
1217 | |
1218 // Update the vtable pointers in all of the Klass objects in the | |
1219 // heap. They should point to newly generated vtable. | |
1220 | |
1221 PatchKlassVtables pkvt(vtable, _md_vs); | |
1222 _rw_space->object_iterate(&pkvt); | |
1223 pkvt.patch(vtbl_list, vtbl_list_size); | |
1224 | |
1225 char* saved_vtbl = (char*)malloc(vtbl_list_size * sizeof(void*)); | |
1226 memmove(saved_vtbl, vtbl_list, vtbl_list_size * sizeof(void*)); | |
1227 memset(vtbl_list, 0, vtbl_list_size * sizeof(void*)); | |
1228 | |
1229 // Create and write the archive file that maps the shared spaces. | |
1230 | |
1231 FileMapInfo* mapinfo = new FileMapInfo(); | |
1232 mapinfo->populate_header(gch->gen_policy()->max_alignment()); | |
1233 | |
1234 // Pass 1 - update file offsets in header. | |
1235 mapinfo->write_header(); | |
1236 mapinfo->write_space(CompactingPermGenGen::ro, _ro_space, true); | |
1237 _ro_space->set_saved_mark(); | |
1238 mapinfo->write_space(CompactingPermGenGen::rw, _rw_space, false); | |
1239 _rw_space->set_saved_mark(); | |
1240 mapinfo->write_region(CompactingPermGenGen::md, _md_vs->low(), | |
287
6e76352f1f62
6459085: naked pointer subtractions in class data sharing code
xlu
parents:
196
diff
changeset
|
1241 pointer_delta(md_top, _md_vs->low(), sizeof(char)), |
6e76352f1f62
6459085: naked pointer subtractions in class data sharing code
xlu
parents:
196
diff
changeset
|
1242 SharedMiscDataSize, |
0 | 1243 false, false); |
1244 mapinfo->write_region(CompactingPermGenGen::mc, _mc_vs->low(), | |
287
6e76352f1f62
6459085: naked pointer subtractions in class data sharing code
xlu
parents:
196
diff
changeset
|
1245 pointer_delta(mc_top, _mc_vs->low(), sizeof(char)), |
6e76352f1f62
6459085: naked pointer subtractions in class data sharing code
xlu
parents:
196
diff
changeset
|
1246 SharedMiscCodeSize, |
0 | 1247 true, true); |
1248 | |
1249 // Pass 2 - write data. | |
1250 mapinfo->open_for_write(); | |
1251 mapinfo->write_header(); | |
1252 mapinfo->write_space(CompactingPermGenGen::ro, _ro_space, true); | |
1253 mapinfo->write_space(CompactingPermGenGen::rw, _rw_space, false); | |
1254 mapinfo->write_region(CompactingPermGenGen::md, _md_vs->low(), | |
287
6e76352f1f62
6459085: naked pointer subtractions in class data sharing code
xlu
parents:
196
diff
changeset
|
1255 pointer_delta(md_top, _md_vs->low(), sizeof(char)), |
6e76352f1f62
6459085: naked pointer subtractions in class data sharing code
xlu
parents:
196
diff
changeset
|
1256 SharedMiscDataSize, |
0 | 1257 false, false); |
1258 mapinfo->write_region(CompactingPermGenGen::mc, _mc_vs->low(), | |
287
6e76352f1f62
6459085: naked pointer subtractions in class data sharing code
xlu
parents:
196
diff
changeset
|
1259 pointer_delta(mc_top, _mc_vs->low(), sizeof(char)), |
6e76352f1f62
6459085: naked pointer subtractions in class data sharing code
xlu
parents:
196
diff
changeset
|
1260 SharedMiscCodeSize, |
0 | 1261 true, true); |
1262 mapinfo->close(); | |
1263 | |
1264 // Summarize heap. | |
1265 memmove(vtbl_list, saved_vtbl, vtbl_list_size * sizeof(void*)); | |
1266 print_contents(); | |
1267 } | |
1268 }; // class VM_PopulateDumpSharedSpace | |
1269 | |
1270 | |
1271 // Populate the shared spaces and dump to a file. | |
1272 | |
1273 jint CompactingPermGenGen::dump_shared(GrowableArray<oop>* class_promote_order, TRAPS) { | |
1274 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1275 | |
1276 // Calculate hash values for all of the (interned) strings to avoid | |
1277 // writes to shared pages in the future. | |
1278 | |
1279 tty->print("Calculating hash values for String objects .. "); | |
1280 StringHashCodeClosure shcc(THREAD); | |
1281 StringTable::oops_do(&shcc); | |
1282 tty->print_cr("done. "); | |
1283 | |
1284 CompactingPermGenGen* gen = (CompactingPermGenGen*)gch->perm_gen(); | |
1285 VM_PopulateDumpSharedSpace op(class_promote_order, | |
1286 gen->ro_space(), gen->rw_space(), | |
1287 gen->md_space(), gen->mc_space()); | |
1288 VMThread::execute(&op); | |
1289 return JNI_OK; | |
1290 } | |
1291 | |
1292 | |
1293 class LinkClassesClosure : public ObjectClosure { | |
1294 private: | |
1295 Thread* THREAD; | |
1296 | |
1297 public: | |
1298 LinkClassesClosure(Thread* thread) : THREAD(thread) {} | |
1299 | |
1300 void do_object(oop obj) { | |
1301 if (obj->is_klass()) { | |
1302 Klass* k = Klass::cast((klassOop) obj); | |
1303 if (k->oop_is_instance()) { | |
1304 instanceKlass* ik = (instanceKlass*) k; | |
1305 // Link the class to cause the bytecodes to be rewritten and the | |
1306 // cpcache to be created. | |
1307 if (ik->get_init_state() < instanceKlass::linked) { | |
1308 ik->link_class(THREAD); | |
1309 guarantee(!HAS_PENDING_EXCEPTION, "exception in class rewriting"); | |
1310 } | |
1311 | |
1312 // Create String objects from string initializer symbols. | |
1313 ik->constants()->resolve_string_constants(THREAD); | |
1314 guarantee(!HAS_PENDING_EXCEPTION, "exception resolving string constants"); | |
1315 } | |
1316 } | |
1317 } | |
1318 }; | |
1319 | |
1320 | |
1321 // Support for a simple checksum of the contents of the class list | |
1322 // file to prevent trivial tampering. The algorithm matches that in | |
1323 // the MakeClassList program used by the J2SE build process. | |
1324 #define JSUM_SEED ((jlong)CONST64(0xcafebabebabecafe)) | |
1325 static jlong | |
1326 jsum(jlong start, const char *buf, const int len) | |
1327 { | |
1328 jlong h = start; | |
1329 char *p = (char *)buf, *e = p + len; | |
1330 while (p < e) { | |
1331 char c = *p++; | |
1332 if (c <= ' ') { | |
1333 /* Skip spaces and control characters */ | |
1334 continue; | |
1335 } | |
1336 h = 31 * h + c; | |
1337 } | |
1338 return h; | |
1339 } | |
1340 | |
1341 | |
1342 | |
1343 | |
1344 | |
1345 // Preload classes from a list, populate the shared spaces and dump to a | |
1346 // file. | |
1347 | |
1348 void GenCollectedHeap::preload_and_dump(TRAPS) { | |
1349 TraceTime timer("Dump Shared Spaces", TraceStartupTime); | |
1350 ResourceMark rm; | |
1351 | |
1352 // Preload classes to be shared. | |
1353 // Should use some hpi:: method rather than fopen() here. aB. | |
1354 // Construct the path to the class list (in jre/lib) | |
1355 // Walk up two directories from the location of the VM and | |
1356 // optionally tack on "lib" (depending on platform) | |
1357 char class_list_path[JVM_MAXPATHLEN]; | |
1358 os::jvm_path(class_list_path, sizeof(class_list_path)); | |
1359 for (int i = 0; i < 3; i++) { | |
1360 char *end = strrchr(class_list_path, *os::file_separator()); | |
1361 if (end != NULL) *end = '\0'; | |
1362 } | |
1363 int class_list_path_len = (int)strlen(class_list_path); | |
1364 if (class_list_path_len >= 3) { | |
1365 if (strcmp(class_list_path + class_list_path_len - 3, "lib") != 0) { | |
1366 strcat(class_list_path, os::file_separator()); | |
1367 strcat(class_list_path, "lib"); | |
1368 } | |
1369 } | |
1370 strcat(class_list_path, os::file_separator()); | |
1371 strcat(class_list_path, "classlist"); | |
1372 | |
1373 FILE* file = fopen(class_list_path, "r"); | |
1374 if (file != NULL) { | |
1375 jlong computed_jsum = JSUM_SEED; | |
1376 jlong file_jsum = 0; | |
1377 | |
1378 char class_name[256]; | |
1379 int class_count = 0; | |
1380 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1381 gch->_preloading_shared_classes = true; | |
1382 GrowableArray<oop>* class_promote_order = new GrowableArray<oop>(); | |
1383 | |
1384 // Preload (and intern) strings which will be used later. | |
1385 | |
1386 StringTable::intern("main", THREAD); | |
1387 StringTable::intern("([Ljava/lang/String;)V", THREAD); | |
1388 StringTable::intern("Ljava/lang/Class;", THREAD); | |
1389 | |
1390 StringTable::intern("I", THREAD); // Needed for StringBuffer persistence? | |
1391 StringTable::intern("Z", THREAD); // Needed for StringBuffer persistence? | |
1392 | |
1393 // sun.io.Converters | |
1394 static const char obj_array_sig[] = "[[Ljava/lang/Object;"; | |
1395 SymbolTable::lookup(obj_array_sig, (int)strlen(obj_array_sig), THREAD); | |
1396 | |
1397 // java.util.HashMap | |
1398 static const char map_entry_array_sig[] = "[Ljava/util/Map$Entry;"; | |
1399 SymbolTable::lookup(map_entry_array_sig, (int)strlen(map_entry_array_sig), | |
1400 THREAD); | |
1401 | |
1402 tty->print("Loading classes to share ... "); | |
1403 while ((fgets(class_name, sizeof class_name, file)) != NULL) { | |
1404 if (*class_name == '#') { | |
1405 jint fsh, fsl; | |
1406 if (sscanf(class_name, "# %8x%8x\n", &fsh, &fsl) == 2) { | |
1407 file_jsum = ((jlong)(fsh) << 32) | (fsl & 0xffffffff); | |
1408 } | |
1409 | |
1410 continue; | |
1411 } | |
1412 // Remove trailing newline | |
1413 size_t name_len = strlen(class_name); | |
1414 class_name[name_len-1] = '\0'; | |
1415 | |
1416 computed_jsum = jsum(computed_jsum, class_name, (const int)name_len - 1); | |
1417 | |
1418 // Got a class name - load it. | |
1419 symbolHandle class_name_symbol = oopFactory::new_symbol(class_name, | |
1420 THREAD); | |
1421 guarantee(!HAS_PENDING_EXCEPTION, "Exception creating a symbol."); | |
1422 klassOop klass = SystemDictionary::resolve_or_null(class_name_symbol, | |
1423 THREAD); | |
1424 guarantee(!HAS_PENDING_EXCEPTION, "Exception resolving a class."); | |
1425 if (klass != NULL) { | |
1426 if (PrintSharedSpaces) { | |
1427 tty->print_cr("Shared spaces preloaded: %s", class_name); | |
1428 } | |
1429 | |
1430 | |
1431 instanceKlass* ik = instanceKlass::cast(klass); | |
1432 | |
1433 // Should be class load order as per -XX:+TraceClassLoadingPreorder | |
1434 class_promote_order->append(ik->as_klassOop()); | |
1435 | |
1436 // Link the class to cause the bytecodes to be rewritten and the | |
1437 // cpcache to be created. The linking is done as soon as classes | |
1438 // are loaded in order that the related data structures (klass, | |
1439 // cpCache, Sting constants) are located together. | |
1440 | |
1441 if (ik->get_init_state() < instanceKlass::linked) { | |
1442 ik->link_class(THREAD); | |
1443 guarantee(!(HAS_PENDING_EXCEPTION), "exception in class rewriting"); | |
1444 } | |
1445 | |
1446 // Create String objects from string initializer symbols. | |
1447 | |
1448 ik->constants()->resolve_string_constants(THREAD); | |
1449 | |
1450 class_count++; | |
1451 } else { | |
1452 if (PrintSharedSpaces) { | |
1453 tty->cr(); | |
1454 tty->print_cr(" Preload failed: %s", class_name); | |
1455 } | |
1456 } | |
1457 file_jsum = 0; // Checksum must be on last line of file | |
1458 } | |
1459 if (computed_jsum != file_jsum) { | |
1460 tty->cr(); | |
1461 tty->print_cr("Preload failed: checksum of class list was incorrect."); | |
1462 exit(1); | |
1463 } | |
1464 | |
1465 tty->print_cr("done. "); | |
1466 | |
1467 if (PrintSharedSpaces) { | |
1468 tty->print_cr("Shared spaces: preloaded %d classes", class_count); | |
1469 } | |
1470 | |
1471 // Rewrite and unlink classes. | |
1472 tty->print("Rewriting and unlinking classes ... "); | |
1473 // Make heap parsable | |
1474 ensure_parsability(false); // arg is actually don't care | |
1475 | |
1476 // Link any classes which got missed. (It's not quite clear why | |
1477 // they got missed.) This iteration would be unsafe if we weren't | |
1478 // single-threaded at this point; however we can't do it on the VM | |
1479 // thread because it requires object allocation. | |
1480 LinkClassesClosure lcc(Thread::current()); | |
1481 object_iterate(&lcc); | |
1482 tty->print_cr("done. "); | |
1483 | |
1484 // Create and dump the shared spaces. | |
1485 jint err = CompactingPermGenGen::dump_shared(class_promote_order, THREAD); | |
1486 if (err != JNI_OK) { | |
1487 fatal("Dumping shared spaces failed."); | |
1488 } | |
1489 | |
1490 } else { | |
1491 char errmsg[JVM_MAXPATHLEN]; | |
1492 hpi::lasterror(errmsg, JVM_MAXPATHLEN); | |
1493 tty->print_cr("Loading classlist failed: %s", errmsg); | |
1494 exit(1); | |
1495 } | |
1496 | |
1497 // Since various initialization steps have been undone by this process, | |
1498 // it is not reasonable to continue running a java process. | |
1499 exit(0); | |
1500 } |