Mercurial > hg > truffle
annotate src/share/vm/memory/universe.cpp @ 453:c96030fff130
6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa
author | ysr |
---|---|
date | Thu, 20 Nov 2008 16:56:09 -0800 |
parents | 1ee8caae33af |
children | 7d7a7c599c17 |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_universe.cpp.incl" | |
27 | |
28 // Known objects | |
29 klassOop Universe::_boolArrayKlassObj = NULL; | |
30 klassOop Universe::_byteArrayKlassObj = NULL; | |
31 klassOop Universe::_charArrayKlassObj = NULL; | |
32 klassOop Universe::_intArrayKlassObj = NULL; | |
33 klassOop Universe::_shortArrayKlassObj = NULL; | |
34 klassOop Universe::_longArrayKlassObj = NULL; | |
35 klassOop Universe::_singleArrayKlassObj = NULL; | |
36 klassOop Universe::_doubleArrayKlassObj = NULL; | |
37 klassOop Universe::_typeArrayKlassObjs[T_VOID+1] = { NULL /*, NULL...*/ }; | |
38 klassOop Universe::_objectArrayKlassObj = NULL; | |
39 klassOop Universe::_symbolKlassObj = NULL; | |
40 klassOop Universe::_methodKlassObj = NULL; | |
41 klassOop Universe::_constMethodKlassObj = NULL; | |
42 klassOop Universe::_methodDataKlassObj = NULL; | |
43 klassOop Universe::_klassKlassObj = NULL; | |
44 klassOop Universe::_arrayKlassKlassObj = NULL; | |
45 klassOop Universe::_objArrayKlassKlassObj = NULL; | |
46 klassOop Universe::_typeArrayKlassKlassObj = NULL; | |
47 klassOop Universe::_instanceKlassKlassObj = NULL; | |
48 klassOop Universe::_constantPoolKlassObj = NULL; | |
49 klassOop Universe::_constantPoolCacheKlassObj = NULL; | |
50 klassOop Universe::_compiledICHolderKlassObj = NULL; | |
51 klassOop Universe::_systemObjArrayKlassObj = NULL; | |
52 oop Universe::_int_mirror = NULL; | |
53 oop Universe::_float_mirror = NULL; | |
54 oop Universe::_double_mirror = NULL; | |
55 oop Universe::_byte_mirror = NULL; | |
56 oop Universe::_bool_mirror = NULL; | |
57 oop Universe::_char_mirror = NULL; | |
58 oop Universe::_long_mirror = NULL; | |
59 oop Universe::_short_mirror = NULL; | |
60 oop Universe::_void_mirror = NULL; | |
61 oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ }; | |
62 oop Universe::_main_thread_group = NULL; | |
63 oop Universe::_system_thread_group = NULL; | |
64 typeArrayOop Universe::_the_empty_byte_array = NULL; | |
65 typeArrayOop Universe::_the_empty_short_array = NULL; | |
66 typeArrayOop Universe::_the_empty_int_array = NULL; | |
67 objArrayOop Universe::_the_empty_system_obj_array = NULL; | |
68 objArrayOop Universe::_the_empty_class_klass_array = NULL; | |
69 objArrayOop Universe::_the_array_interfaces_array = NULL; | |
70 LatestMethodOopCache* Universe::_finalizer_register_cache = NULL; | |
71 LatestMethodOopCache* Universe::_loader_addClass_cache = NULL; | |
72 ActiveMethodOopsCache* Universe::_reflect_invoke_cache = NULL; | |
73 oop Universe::_out_of_memory_error_java_heap = NULL; | |
74 oop Universe::_out_of_memory_error_perm_gen = NULL; | |
75 oop Universe::_out_of_memory_error_array_size = NULL; | |
76 oop Universe::_out_of_memory_error_gc_overhead_limit = NULL; | |
77 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL; | |
78 volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0; | |
79 bool Universe::_verify_in_progress = false; | |
80 oop Universe::_null_ptr_exception_instance = NULL; | |
81 oop Universe::_arithmetic_exception_instance = NULL; | |
82 oop Universe::_virtual_machine_error_instance = NULL; | |
83 oop Universe::_vm_exception = NULL; | |
84 oop Universe::_emptySymbol = NULL; | |
85 | |
86 // These variables are guarded by FullGCALot_lock. | |
87 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;) | |
88 debug_only(int Universe::_fullgc_alot_dummy_next = 0;) | |
89 | |
90 | |
91 // Heap | |
92 int Universe::_verify_count = 0; | |
93 | |
94 int Universe::_base_vtable_size = 0; | |
95 bool Universe::_bootstrapping = false; | |
96 bool Universe::_fully_initialized = false; | |
97 | |
98 size_t Universe::_heap_capacity_at_last_gc; | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
99 size_t Universe::_heap_used_at_last_gc = 0; |
0 | 100 |
101 CollectedHeap* Universe::_collectedHeap = NULL; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
102 address Universe::_heap_base = NULL; |
0 | 103 |
104 | |
105 void Universe::basic_type_classes_do(void f(klassOop)) { | |
106 f(boolArrayKlassObj()); | |
107 f(byteArrayKlassObj()); | |
108 f(charArrayKlassObj()); | |
109 f(intArrayKlassObj()); | |
110 f(shortArrayKlassObj()); | |
111 f(longArrayKlassObj()); | |
112 f(singleArrayKlassObj()); | |
113 f(doubleArrayKlassObj()); | |
114 } | |
115 | |
116 | |
117 void Universe::system_classes_do(void f(klassOop)) { | |
118 f(symbolKlassObj()); | |
119 f(methodKlassObj()); | |
120 f(constMethodKlassObj()); | |
121 f(methodDataKlassObj()); | |
122 f(klassKlassObj()); | |
123 f(arrayKlassKlassObj()); | |
124 f(objArrayKlassKlassObj()); | |
125 f(typeArrayKlassKlassObj()); | |
126 f(instanceKlassKlassObj()); | |
127 f(constantPoolKlassObj()); | |
128 f(systemObjArrayKlassObj()); | |
129 } | |
130 | |
131 void Universe::oops_do(OopClosure* f, bool do_all) { | |
132 | |
133 f->do_oop((oop*) &_int_mirror); | |
134 f->do_oop((oop*) &_float_mirror); | |
135 f->do_oop((oop*) &_double_mirror); | |
136 f->do_oop((oop*) &_byte_mirror); | |
137 f->do_oop((oop*) &_bool_mirror); | |
138 f->do_oop((oop*) &_char_mirror); | |
139 f->do_oop((oop*) &_long_mirror); | |
140 f->do_oop((oop*) &_short_mirror); | |
141 f->do_oop((oop*) &_void_mirror); | |
142 | |
143 // It's important to iterate over these guys even if they are null, | |
144 // since that's how shared heaps are restored. | |
145 for (int i = T_BOOLEAN; i < T_VOID+1; i++) { | |
146 f->do_oop((oop*) &_mirrors[i]); | |
147 } | |
148 assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking"); | |
149 | |
150 // %%% Consider moving those "shared oops" over here with the others. | |
151 f->do_oop((oop*)&_boolArrayKlassObj); | |
152 f->do_oop((oop*)&_byteArrayKlassObj); | |
153 f->do_oop((oop*)&_charArrayKlassObj); | |
154 f->do_oop((oop*)&_intArrayKlassObj); | |
155 f->do_oop((oop*)&_shortArrayKlassObj); | |
156 f->do_oop((oop*)&_longArrayKlassObj); | |
157 f->do_oop((oop*)&_singleArrayKlassObj); | |
158 f->do_oop((oop*)&_doubleArrayKlassObj); | |
159 f->do_oop((oop*)&_objectArrayKlassObj); | |
160 { | |
161 for (int i = 0; i < T_VOID+1; i++) { | |
162 if (_typeArrayKlassObjs[i] != NULL) { | |
163 assert(i >= T_BOOLEAN, "checking"); | |
164 f->do_oop((oop*)&_typeArrayKlassObjs[i]); | |
165 } else if (do_all) { | |
166 f->do_oop((oop*)&_typeArrayKlassObjs[i]); | |
167 } | |
168 } | |
169 } | |
170 f->do_oop((oop*)&_symbolKlassObj); | |
171 f->do_oop((oop*)&_methodKlassObj); | |
172 f->do_oop((oop*)&_constMethodKlassObj); | |
173 f->do_oop((oop*)&_methodDataKlassObj); | |
174 f->do_oop((oop*)&_klassKlassObj); | |
175 f->do_oop((oop*)&_arrayKlassKlassObj); | |
176 f->do_oop((oop*)&_objArrayKlassKlassObj); | |
177 f->do_oop((oop*)&_typeArrayKlassKlassObj); | |
178 f->do_oop((oop*)&_instanceKlassKlassObj); | |
179 f->do_oop((oop*)&_constantPoolKlassObj); | |
180 f->do_oop((oop*)&_constantPoolCacheKlassObj); | |
181 f->do_oop((oop*)&_compiledICHolderKlassObj); | |
182 f->do_oop((oop*)&_systemObjArrayKlassObj); | |
183 f->do_oop((oop*)&_the_empty_byte_array); | |
184 f->do_oop((oop*)&_the_empty_short_array); | |
185 f->do_oop((oop*)&_the_empty_int_array); | |
186 f->do_oop((oop*)&_the_empty_system_obj_array); | |
187 f->do_oop((oop*)&_the_empty_class_klass_array); | |
188 f->do_oop((oop*)&_the_array_interfaces_array); | |
189 _finalizer_register_cache->oops_do(f); | |
190 _loader_addClass_cache->oops_do(f); | |
191 _reflect_invoke_cache->oops_do(f); | |
192 f->do_oop((oop*)&_out_of_memory_error_java_heap); | |
193 f->do_oop((oop*)&_out_of_memory_error_perm_gen); | |
194 f->do_oop((oop*)&_out_of_memory_error_array_size); | |
195 f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit); | |
196 if (_preallocated_out_of_memory_error_array != (oop)NULL) { // NULL when DumpSharedSpaces | |
197 f->do_oop((oop*)&_preallocated_out_of_memory_error_array); | |
198 } | |
199 f->do_oop((oop*)&_null_ptr_exception_instance); | |
200 f->do_oop((oop*)&_arithmetic_exception_instance); | |
201 f->do_oop((oop*)&_virtual_machine_error_instance); | |
202 f->do_oop((oop*)&_main_thread_group); | |
203 f->do_oop((oop*)&_system_thread_group); | |
204 f->do_oop((oop*)&_vm_exception); | |
205 f->do_oop((oop*)&_emptySymbol); | |
206 debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);) | |
207 } | |
208 | |
209 | |
210 void Universe::check_alignment(uintx size, uintx alignment, const char* name) { | |
211 if (size < alignment || size % alignment != 0) { | |
212 ResourceMark rm; | |
213 stringStream st; | |
214 st.print("Size of %s (%ld bytes) must be aligned to %ld bytes", name, size, alignment); | |
215 char* error = st.as_string(); | |
216 vm_exit_during_initialization(error); | |
217 } | |
218 } | |
219 | |
220 | |
221 void Universe::genesis(TRAPS) { | |
222 ResourceMark rm; | |
223 { FlagSetting fs(_bootstrapping, true); | |
224 | |
225 { MutexLocker mc(Compile_lock); | |
226 | |
227 // determine base vtable size; without that we cannot create the array klasses | |
228 compute_base_vtable_size(); | |
229 | |
230 if (!UseSharedSpaces) { | |
231 _klassKlassObj = klassKlass::create_klass(CHECK); | |
232 _arrayKlassKlassObj = arrayKlassKlass::create_klass(CHECK); | |
233 | |
234 _objArrayKlassKlassObj = objArrayKlassKlass::create_klass(CHECK); | |
235 _instanceKlassKlassObj = instanceKlassKlass::create_klass(CHECK); | |
236 _typeArrayKlassKlassObj = typeArrayKlassKlass::create_klass(CHECK); | |
237 | |
238 _symbolKlassObj = symbolKlass::create_klass(CHECK); | |
239 | |
240 _emptySymbol = oopFactory::new_symbol("", CHECK); | |
241 | |
242 _boolArrayKlassObj = typeArrayKlass::create_klass(T_BOOLEAN, sizeof(jboolean), CHECK); | |
243 _charArrayKlassObj = typeArrayKlass::create_klass(T_CHAR, sizeof(jchar), CHECK); | |
244 _singleArrayKlassObj = typeArrayKlass::create_klass(T_FLOAT, sizeof(jfloat), CHECK); | |
245 _doubleArrayKlassObj = typeArrayKlass::create_klass(T_DOUBLE, sizeof(jdouble), CHECK); | |
246 _byteArrayKlassObj = typeArrayKlass::create_klass(T_BYTE, sizeof(jbyte), CHECK); | |
247 _shortArrayKlassObj = typeArrayKlass::create_klass(T_SHORT, sizeof(jshort), CHECK); | |
248 _intArrayKlassObj = typeArrayKlass::create_klass(T_INT, sizeof(jint), CHECK); | |
249 _longArrayKlassObj = typeArrayKlass::create_klass(T_LONG, sizeof(jlong), CHECK); | |
250 | |
251 _typeArrayKlassObjs[T_BOOLEAN] = _boolArrayKlassObj; | |
252 _typeArrayKlassObjs[T_CHAR] = _charArrayKlassObj; | |
253 _typeArrayKlassObjs[T_FLOAT] = _singleArrayKlassObj; | |
254 _typeArrayKlassObjs[T_DOUBLE] = _doubleArrayKlassObj; | |
255 _typeArrayKlassObjs[T_BYTE] = _byteArrayKlassObj; | |
256 _typeArrayKlassObjs[T_SHORT] = _shortArrayKlassObj; | |
257 _typeArrayKlassObjs[T_INT] = _intArrayKlassObj; | |
258 _typeArrayKlassObjs[T_LONG] = _longArrayKlassObj; | |
259 | |
260 _methodKlassObj = methodKlass::create_klass(CHECK); | |
261 _constMethodKlassObj = constMethodKlass::create_klass(CHECK); | |
262 _methodDataKlassObj = methodDataKlass::create_klass(CHECK); | |
263 _constantPoolKlassObj = constantPoolKlass::create_klass(CHECK); | |
264 _constantPoolCacheKlassObj = constantPoolCacheKlass::create_klass(CHECK); | |
265 | |
266 _compiledICHolderKlassObj = compiledICHolderKlass::create_klass(CHECK); | |
267 _systemObjArrayKlassObj = objArrayKlassKlass::cast(objArrayKlassKlassObj())->allocate_system_objArray_klass(CHECK); | |
268 | |
269 _the_empty_byte_array = oopFactory::new_permanent_byteArray(0, CHECK); | |
270 _the_empty_short_array = oopFactory::new_permanent_shortArray(0, CHECK); | |
271 _the_empty_int_array = oopFactory::new_permanent_intArray(0, CHECK); | |
272 _the_empty_system_obj_array = oopFactory::new_system_objArray(0, CHECK); | |
273 | |
274 _the_array_interfaces_array = oopFactory::new_system_objArray(2, CHECK); | |
275 _vm_exception = oopFactory::new_symbol("vm exception holder", CHECK); | |
276 } else { | |
277 | |
278 FileMapInfo *mapinfo = FileMapInfo::current_info(); | |
279 char* buffer = mapinfo->region_base(CompactingPermGenGen::md); | |
280 void** vtbl_list = (void**)buffer; | |
281 init_self_patching_vtbl_list(vtbl_list, | |
282 CompactingPermGenGen::vtbl_list_size); | |
283 } | |
284 } | |
285 | |
286 vmSymbols::initialize(CHECK); | |
287 | |
288 SystemDictionary::initialize(CHECK); | |
289 | |
290 klassOop ok = SystemDictionary::object_klass(); | |
291 | |
292 if (UseSharedSpaces) { | |
293 // Verify shared interfaces array. | |
294 assert(_the_array_interfaces_array->obj_at(0) == | |
295 SystemDictionary::cloneable_klass(), "u3"); | |
296 assert(_the_array_interfaces_array->obj_at(1) == | |
297 SystemDictionary::serializable_klass(), "u3"); | |
298 | |
299 // Verify element klass for system obj array klass | |
300 assert(objArrayKlass::cast(_systemObjArrayKlassObj)->element_klass() == ok, "u1"); | |
301 assert(objArrayKlass::cast(_systemObjArrayKlassObj)->bottom_klass() == ok, "u2"); | |
302 | |
303 // Verify super class for the classes created above | |
304 assert(Klass::cast(boolArrayKlassObj() )->super() == ok, "u3"); | |
305 assert(Klass::cast(charArrayKlassObj() )->super() == ok, "u3"); | |
306 assert(Klass::cast(singleArrayKlassObj() )->super() == ok, "u3"); | |
307 assert(Klass::cast(doubleArrayKlassObj() )->super() == ok, "u3"); | |
308 assert(Klass::cast(byteArrayKlassObj() )->super() == ok, "u3"); | |
309 assert(Klass::cast(shortArrayKlassObj() )->super() == ok, "u3"); | |
310 assert(Klass::cast(intArrayKlassObj() )->super() == ok, "u3"); | |
311 assert(Klass::cast(longArrayKlassObj() )->super() == ok, "u3"); | |
312 assert(Klass::cast(constantPoolKlassObj() )->super() == ok, "u3"); | |
313 assert(Klass::cast(systemObjArrayKlassObj())->super() == ok, "u3"); | |
314 } else { | |
315 // Set up shared interfaces array. (Do this before supers are set up.) | |
316 _the_array_interfaces_array->obj_at_put(0, SystemDictionary::cloneable_klass()); | |
317 _the_array_interfaces_array->obj_at_put(1, SystemDictionary::serializable_klass()); | |
318 | |
319 // Set element klass for system obj array klass | |
320 objArrayKlass::cast(_systemObjArrayKlassObj)->set_element_klass(ok); | |
321 objArrayKlass::cast(_systemObjArrayKlassObj)->set_bottom_klass(ok); | |
322 | |
323 // Set super class for the classes created above | |
324 Klass::cast(boolArrayKlassObj() )->initialize_supers(ok, CHECK); | |
325 Klass::cast(charArrayKlassObj() )->initialize_supers(ok, CHECK); | |
326 Klass::cast(singleArrayKlassObj() )->initialize_supers(ok, CHECK); | |
327 Klass::cast(doubleArrayKlassObj() )->initialize_supers(ok, CHECK); | |
328 Klass::cast(byteArrayKlassObj() )->initialize_supers(ok, CHECK); | |
329 Klass::cast(shortArrayKlassObj() )->initialize_supers(ok, CHECK); | |
330 Klass::cast(intArrayKlassObj() )->initialize_supers(ok, CHECK); | |
331 Klass::cast(longArrayKlassObj() )->initialize_supers(ok, CHECK); | |
332 Klass::cast(constantPoolKlassObj() )->initialize_supers(ok, CHECK); | |
333 Klass::cast(systemObjArrayKlassObj())->initialize_supers(ok, CHECK); | |
334 Klass::cast(boolArrayKlassObj() )->set_super(ok); | |
335 Klass::cast(charArrayKlassObj() )->set_super(ok); | |
336 Klass::cast(singleArrayKlassObj() )->set_super(ok); | |
337 Klass::cast(doubleArrayKlassObj() )->set_super(ok); | |
338 Klass::cast(byteArrayKlassObj() )->set_super(ok); | |
339 Klass::cast(shortArrayKlassObj() )->set_super(ok); | |
340 Klass::cast(intArrayKlassObj() )->set_super(ok); | |
341 Klass::cast(longArrayKlassObj() )->set_super(ok); | |
342 Klass::cast(constantPoolKlassObj() )->set_super(ok); | |
343 Klass::cast(systemObjArrayKlassObj())->set_super(ok); | |
344 } | |
345 | |
346 Klass::cast(boolArrayKlassObj() )->append_to_sibling_list(); | |
347 Klass::cast(charArrayKlassObj() )->append_to_sibling_list(); | |
348 Klass::cast(singleArrayKlassObj() )->append_to_sibling_list(); | |
349 Klass::cast(doubleArrayKlassObj() )->append_to_sibling_list(); | |
350 Klass::cast(byteArrayKlassObj() )->append_to_sibling_list(); | |
351 Klass::cast(shortArrayKlassObj() )->append_to_sibling_list(); | |
352 Klass::cast(intArrayKlassObj() )->append_to_sibling_list(); | |
353 Klass::cast(longArrayKlassObj() )->append_to_sibling_list(); | |
354 Klass::cast(constantPoolKlassObj() )->append_to_sibling_list(); | |
355 Klass::cast(systemObjArrayKlassObj())->append_to_sibling_list(); | |
356 } // end of core bootstrapping | |
357 | |
358 // Initialize _objectArrayKlass after core bootstraping to make | |
359 // sure the super class is set up properly for _objectArrayKlass. | |
360 _objectArrayKlassObj = instanceKlass:: | |
361 cast(SystemDictionary::object_klass())->array_klass(1, CHECK); | |
362 // Add the class to the class hierarchy manually to make sure that | |
363 // its vtable is initialized after core bootstrapping is completed. | |
364 Klass::cast(_objectArrayKlassObj)->append_to_sibling_list(); | |
365 | |
366 // Compute is_jdk version flags. | |
367 // Only 1.3 or later has the java.lang.Shutdown class. | |
368 // Only 1.4 or later has the java.lang.CharSequence interface. | |
369 // Only 1.5 or later has the java.lang.management.MemoryUsage class. | |
242 | 370 if (JDK_Version::is_partially_initialized()) { |
371 uint8_t jdk_version; | |
372 klassOop k = SystemDictionary::resolve_or_null( | |
373 vmSymbolHandles::java_lang_management_MemoryUsage(), THREAD); | |
0 | 374 CLEAR_PENDING_EXCEPTION; // ignore exceptions |
375 if (k == NULL) { | |
242 | 376 k = SystemDictionary::resolve_or_null( |
377 vmSymbolHandles::java_lang_CharSequence(), THREAD); | |
0 | 378 CLEAR_PENDING_EXCEPTION; // ignore exceptions |
379 if (k == NULL) { | |
242 | 380 k = SystemDictionary::resolve_or_null( |
381 vmSymbolHandles::java_lang_Shutdown(), THREAD); | |
0 | 382 CLEAR_PENDING_EXCEPTION; // ignore exceptions |
383 if (k == NULL) { | |
242 | 384 jdk_version = 2; |
0 | 385 } else { |
242 | 386 jdk_version = 3; |
0 | 387 } |
388 } else { | |
242 | 389 jdk_version = 4; |
0 | 390 } |
391 } else { | |
242 | 392 jdk_version = 5; |
0 | 393 } |
242 | 394 JDK_Version::fully_initialize(jdk_version); |
0 | 395 } |
396 | |
397 #ifdef ASSERT | |
398 if (FullGCALot) { | |
399 // Allocate an array of dummy objects. | |
400 // We'd like these to be at the bottom of the old generation, | |
401 // so that when we free one and then collect, | |
402 // (almost) the whole heap moves | |
403 // and we find out if we actually update all the oops correctly. | |
404 // But we can't allocate directly in the old generation, | |
405 // so we allocate wherever, and hope that the first collection | |
406 // moves these objects to the bottom of the old generation. | |
407 // We can allocate directly in the permanent generation, so we do. | |
408 int size; | |
409 if (UseConcMarkSweepGC) { | |
410 warning("Using +FullGCALot with concurrent mark sweep gc " | |
411 "will not force all objects to relocate"); | |
412 size = FullGCALotDummies; | |
413 } else { | |
414 size = FullGCALotDummies * 2; | |
415 } | |
416 objArrayOop naked_array = oopFactory::new_system_objArray(size, CHECK); | |
417 objArrayHandle dummy_array(THREAD, naked_array); | |
418 int i = 0; | |
419 while (i < size) { | |
420 if (!UseConcMarkSweepGC) { | |
421 // Allocate dummy in old generation | |
422 oop dummy = instanceKlass::cast(SystemDictionary::object_klass())->allocate_instance(CHECK); | |
423 dummy_array->obj_at_put(i++, dummy); | |
424 } | |
425 // Allocate dummy in permanent generation | |
426 oop dummy = instanceKlass::cast(SystemDictionary::object_klass())->allocate_permanent_instance(CHECK); | |
427 dummy_array->obj_at_put(i++, dummy); | |
428 } | |
429 { | |
430 // Only modify the global variable inside the mutex. | |
431 // If we had a race to here, the other dummy_array instances | |
432 // and their elements just get dropped on the floor, which is fine. | |
433 MutexLocker ml(FullGCALot_lock); | |
434 if (_fullgc_alot_dummy_array == NULL) { | |
435 _fullgc_alot_dummy_array = dummy_array(); | |
436 } | |
437 } | |
438 assert(i == _fullgc_alot_dummy_array->length(), "just checking"); | |
439 } | |
440 #endif | |
441 } | |
442 | |
443 | |
444 static inline void add_vtable(void** list, int* n, Klass* o, int count) { | |
445 list[(*n)++] = *(void**)&o->vtbl_value(); | |
446 guarantee((*n) <= count, "vtable list too small."); | |
447 } | |
448 | |
449 | |
450 void Universe::init_self_patching_vtbl_list(void** list, int count) { | |
451 int n = 0; | |
452 { klassKlass o; add_vtable(list, &n, &o, count); } | |
453 { arrayKlassKlass o; add_vtable(list, &n, &o, count); } | |
454 { objArrayKlassKlass o; add_vtable(list, &n, &o, count); } | |
455 { instanceKlassKlass o; add_vtable(list, &n, &o, count); } | |
456 { instanceKlass o; add_vtable(list, &n, &o, count); } | |
457 { instanceRefKlass o; add_vtable(list, &n, &o, count); } | |
458 { typeArrayKlassKlass o; add_vtable(list, &n, &o, count); } | |
459 { symbolKlass o; add_vtable(list, &n, &o, count); } | |
460 { typeArrayKlass o; add_vtable(list, &n, &o, count); } | |
461 { methodKlass o; add_vtable(list, &n, &o, count); } | |
462 { constMethodKlass o; add_vtable(list, &n, &o, count); } | |
463 { constantPoolKlass o; add_vtable(list, &n, &o, count); } | |
464 { constantPoolCacheKlass o; add_vtable(list, &n, &o, count); } | |
465 { objArrayKlass o; add_vtable(list, &n, &o, count); } | |
466 { methodDataKlass o; add_vtable(list, &n, &o, count); } | |
467 { compiledICHolderKlass o; add_vtable(list, &n, &o, count); } | |
468 } | |
469 | |
470 | |
471 class FixupMirrorClosure: public ObjectClosure { | |
472 public: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
473 virtual void do_object(oop obj) { |
0 | 474 if (obj->is_klass()) { |
475 EXCEPTION_MARK; | |
476 KlassHandle k(THREAD, klassOop(obj)); | |
477 // We will never reach the CATCH below since Exceptions::_throw will cause | |
478 // the VM to exit if an exception is thrown during initialization | |
479 java_lang_Class::create_mirror(k, CATCH); | |
480 // This call unconditionally creates a new mirror for k, | |
481 // and links in k's component_mirror field if k is an array. | |
482 // If k is an objArray, k's element type must already have | |
483 // a mirror. In other words, this closure must process | |
484 // the component type of an objArray k before it processes k. | |
485 // This works because the permgen iterator presents arrays | |
486 // and their component types in order of creation. | |
487 } | |
488 } | |
489 }; | |
490 | |
491 void Universe::initialize_basic_type_mirrors(TRAPS) { | |
492 if (UseSharedSpaces) { | |
493 assert(_int_mirror != NULL, "already loaded"); | |
494 assert(_void_mirror == _mirrors[T_VOID], "consistently loaded"); | |
495 } else { | |
496 | |
497 assert(_int_mirror==NULL, "basic type mirrors already initialized"); | |
498 _int_mirror = | |
499 java_lang_Class::create_basic_type_mirror("int", T_INT, CHECK); | |
500 _float_mirror = | |
501 java_lang_Class::create_basic_type_mirror("float", T_FLOAT, CHECK); | |
502 _double_mirror = | |
503 java_lang_Class::create_basic_type_mirror("double", T_DOUBLE, CHECK); | |
504 _byte_mirror = | |
505 java_lang_Class::create_basic_type_mirror("byte", T_BYTE, CHECK); | |
506 _bool_mirror = | |
507 java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK); | |
508 _char_mirror = | |
509 java_lang_Class::create_basic_type_mirror("char", T_CHAR, CHECK); | |
510 _long_mirror = | |
511 java_lang_Class::create_basic_type_mirror("long", T_LONG, CHECK); | |
512 _short_mirror = | |
513 java_lang_Class::create_basic_type_mirror("short", T_SHORT, CHECK); | |
514 _void_mirror = | |
515 java_lang_Class::create_basic_type_mirror("void", T_VOID, CHECK); | |
516 | |
517 _mirrors[T_INT] = _int_mirror; | |
518 _mirrors[T_FLOAT] = _float_mirror; | |
519 _mirrors[T_DOUBLE] = _double_mirror; | |
520 _mirrors[T_BYTE] = _byte_mirror; | |
521 _mirrors[T_BOOLEAN] = _bool_mirror; | |
522 _mirrors[T_CHAR] = _char_mirror; | |
523 _mirrors[T_LONG] = _long_mirror; | |
524 _mirrors[T_SHORT] = _short_mirror; | |
525 _mirrors[T_VOID] = _void_mirror; | |
526 //_mirrors[T_OBJECT] = instanceKlass::cast(_object_klass)->java_mirror(); | |
527 //_mirrors[T_ARRAY] = instanceKlass::cast(_object_klass)->java_mirror(); | |
528 } | |
529 } | |
530 | |
531 void Universe::fixup_mirrors(TRAPS) { | |
532 // Bootstrap problem: all classes gets a mirror (java.lang.Class instance) assigned eagerly, | |
533 // but we cannot do that for classes created before java.lang.Class is loaded. Here we simply | |
534 // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note | |
535 // that the number of objects allocated at this point is very small. | |
536 assert(SystemDictionary::class_klass_loaded(), "java.lang.Class should be loaded"); | |
537 FixupMirrorClosure blk; | |
538 Universe::heap()->permanent_object_iterate(&blk); | |
539 } | |
540 | |
541 | |
542 static bool has_run_finalizers_on_exit = false; | |
543 | |
544 void Universe::run_finalizers_on_exit() { | |
545 if (has_run_finalizers_on_exit) return; | |
546 has_run_finalizers_on_exit = true; | |
547 | |
548 // Called on VM exit. This ought to be run in a separate thread. | |
549 if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit"); | |
550 { | |
551 PRESERVE_EXCEPTION_MARK; | |
552 KlassHandle finalizer_klass(THREAD, SystemDictionary::finalizer_klass()); | |
553 JavaValue result(T_VOID); | |
554 JavaCalls::call_static( | |
555 &result, | |
556 finalizer_klass, | |
557 vmSymbolHandles::run_finalizers_on_exit_name(), | |
558 vmSymbolHandles::void_method_signature(), | |
559 THREAD | |
560 ); | |
561 // Ignore any pending exceptions | |
562 CLEAR_PENDING_EXCEPTION; | |
563 } | |
564 } | |
565 | |
566 | |
567 // initialize_vtable could cause gc if | |
568 // 1) we specified true to initialize_vtable and | |
569 // 2) this ran after gc was enabled | |
570 // In case those ever change we use handles for oops | |
571 void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) { | |
572 // init vtable of k and all subclasses | |
573 Klass* ko = k_h()->klass_part(); | |
574 klassVtable* vt = ko->vtable(); | |
575 if (vt) vt->initialize_vtable(false, CHECK); | |
576 if (ko->oop_is_instance()) { | |
577 instanceKlass* ik = (instanceKlass*)ko; | |
578 for (KlassHandle s_h(THREAD, ik->subklass()); s_h() != NULL; s_h = (THREAD, s_h()->klass_part()->next_sibling())) { | |
579 reinitialize_vtable_of(s_h, CHECK); | |
580 } | |
581 } | |
582 } | |
583 | |
584 | |
585 void initialize_itable_for_klass(klassOop k, TRAPS) { | |
586 instanceKlass::cast(k)->itable()->initialize_itable(false, CHECK); | |
587 } | |
588 | |
589 | |
590 void Universe::reinitialize_itables(TRAPS) { | |
591 SystemDictionary::classes_do(initialize_itable_for_klass, CHECK); | |
592 | |
593 } | |
594 | |
595 | |
596 bool Universe::on_page_boundary(void* addr) { | |
597 return ((uintptr_t) addr) % os::vm_page_size() == 0; | |
598 } | |
599 | |
600 | |
601 bool Universe::should_fill_in_stack_trace(Handle throwable) { | |
602 // never attempt to fill in the stack trace of preallocated errors that do not have | |
603 // backtrace. These errors are kept alive forever and may be "re-used" when all | |
604 // preallocated errors with backtrace have been consumed. Also need to avoid | |
605 // a potential loop which could happen if an out of memory occurs when attempting | |
606 // to allocate the backtrace. | |
607 return ((throwable() != Universe::_out_of_memory_error_java_heap) && | |
608 (throwable() != Universe::_out_of_memory_error_perm_gen) && | |
609 (throwable() != Universe::_out_of_memory_error_array_size) && | |
610 (throwable() != Universe::_out_of_memory_error_gc_overhead_limit)); | |
611 } | |
612 | |
613 | |
614 oop Universe::gen_out_of_memory_error(oop default_err) { | |
615 // generate an out of memory error: | |
616 // - if there is a preallocated error with backtrace available then return it wth | |
617 // a filled in stack trace. | |
618 // - if there are no preallocated errors with backtrace available then return | |
619 // an error without backtrace. | |
620 int next; | |
621 if (_preallocated_out_of_memory_error_avail_count > 0) { | |
622 next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count); | |
623 assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt"); | |
624 } else { | |
625 next = -1; | |
626 } | |
627 if (next < 0) { | |
628 // all preallocated errors have been used. | |
629 // return default | |
630 return default_err; | |
631 } else { | |
632 // get the error object at the slot and set set it to NULL so that the | |
633 // array isn't keeping it alive anymore. | |
634 oop exc = preallocated_out_of_memory_errors()->obj_at(next); | |
635 assert(exc != NULL, "slot has been used already"); | |
636 preallocated_out_of_memory_errors()->obj_at_put(next, NULL); | |
637 | |
638 // use the message from the default error | |
639 oop msg = java_lang_Throwable::message(default_err); | |
640 assert(msg != NULL, "no message"); | |
641 java_lang_Throwable::set_message(exc, msg); | |
642 | |
643 // populate the stack trace and return it. | |
644 java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(exc); | |
645 return exc; | |
646 } | |
647 } | |
648 | |
649 static intptr_t non_oop_bits = 0; | |
650 | |
651 void* Universe::non_oop_word() { | |
652 // Neither the high bits nor the low bits of this value is allowed | |
653 // to look like (respectively) the high or low bits of a real oop. | |
654 // | |
655 // High and low are CPU-specific notions, but low always includes | |
656 // the low-order bit. Since oops are always aligned at least mod 4, | |
657 // setting the low-order bit will ensure that the low half of the | |
658 // word will never look like that of a real oop. | |
659 // | |
660 // Using the OS-supplied non-memory-address word (usually 0 or -1) | |
661 // will take care of the high bits, however many there are. | |
662 | |
663 if (non_oop_bits == 0) { | |
664 non_oop_bits = (intptr_t)os::non_memory_address_word() | 1; | |
665 } | |
666 | |
667 return (void*)non_oop_bits; | |
668 } | |
669 | |
670 jint universe_init() { | |
671 assert(!Universe::_fully_initialized, "called after initialize_vtables"); | |
672 guarantee(1 << LogHeapWordSize == sizeof(HeapWord), | |
673 "LogHeapWordSize is incorrect."); | |
674 guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?"); | |
675 guarantee(sizeof(oop) % sizeof(HeapWord) == 0, | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
676 "oop size is not not a multiple of HeapWord size"); |
0 | 677 TraceTime timer("Genesis", TraceStartupTime); |
678 GC_locker::lock(); // do not allow gc during bootstrapping | |
679 JavaClasses::compute_hard_coded_offsets(); | |
680 | |
681 // Get map info from shared archive file. | |
682 if (DumpSharedSpaces) | |
683 UseSharedSpaces = false; | |
684 | |
685 FileMapInfo* mapinfo = NULL; | |
686 if (UseSharedSpaces) { | |
687 mapinfo = NEW_C_HEAP_OBJ(FileMapInfo); | |
688 memset(mapinfo, 0, sizeof(FileMapInfo)); | |
689 | |
690 // Open the shared archive file, read and validate the header. If | |
691 // initialization files, shared spaces [UseSharedSpaces] are | |
692 // disabled and the file is closed. | |
693 | |
694 if (mapinfo->initialize()) { | |
695 FileMapInfo::set_current_info(mapinfo); | |
696 } else { | |
697 assert(!mapinfo->is_open() && !UseSharedSpaces, | |
698 "archive file not closed or shared spaces not disabled."); | |
699 } | |
700 } | |
701 | |
702 jint status = Universe::initialize_heap(); | |
703 if (status != JNI_OK) { | |
704 return status; | |
705 } | |
706 | |
707 // We have a heap so create the methodOop caches before | |
708 // CompactingPermGenGen::initialize_oops() tries to populate them. | |
709 Universe::_finalizer_register_cache = new LatestMethodOopCache(); | |
710 Universe::_loader_addClass_cache = new LatestMethodOopCache(); | |
711 Universe::_reflect_invoke_cache = new ActiveMethodOopsCache(); | |
712 | |
713 if (UseSharedSpaces) { | |
714 | |
715 // Read the data structures supporting the shared spaces (shared | |
716 // system dictionary, symbol table, etc.). After that, access to | |
717 // the file (other than the mapped regions) is no longer needed, and | |
718 // the file is closed. Closing the file does not affect the | |
719 // currently mapped regions. | |
720 | |
721 CompactingPermGenGen::initialize_oops(); | |
722 mapinfo->close(); | |
723 | |
724 } else { | |
725 SymbolTable::create_table(); | |
726 StringTable::create_table(); | |
727 ClassLoader::create_package_info_table(); | |
728 } | |
729 | |
730 return JNI_OK; | |
731 } | |
732 | |
733 jint Universe::initialize_heap() { | |
734 | |
735 if (UseParallelGC) { | |
736 #ifndef SERIALGC | |
737 Universe::_collectedHeap = new ParallelScavengeHeap(); | |
738 #else // SERIALGC | |
739 fatal("UseParallelGC not supported in java kernel vm."); | |
740 #endif // SERIALGC | |
741 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
742 } else if (UseG1GC) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
743 #ifndef SERIALGC |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
744 G1CollectorPolicy* g1p = new G1CollectorPolicy_BestRegionsFirst(); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
745 G1CollectedHeap* g1h = new G1CollectedHeap(g1p); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
746 Universe::_collectedHeap = g1h; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
747 #else // SERIALGC |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
748 fatal("UseG1GC not supported in java kernel vm."); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
749 #endif // SERIALGC |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
750 |
0 | 751 } else { |
752 GenCollectorPolicy *gc_policy; | |
753 | |
754 if (UseSerialGC) { | |
755 gc_policy = new MarkSweepPolicy(); | |
756 } else if (UseConcMarkSweepGC) { | |
757 #ifndef SERIALGC | |
758 if (UseAdaptiveSizePolicy) { | |
759 gc_policy = new ASConcurrentMarkSweepPolicy(); | |
760 } else { | |
761 gc_policy = new ConcurrentMarkSweepPolicy(); | |
762 } | |
763 #else // SERIALGC | |
764 fatal("UseConcMarkSweepGC not supported in java kernel vm."); | |
765 #endif // SERIALGC | |
766 } else { // default old generation | |
767 gc_policy = new MarkSweepPolicy(); | |
768 } | |
769 | |
770 Universe::_collectedHeap = new GenCollectedHeap(gc_policy); | |
771 } | |
772 | |
773 jint status = Universe::heap()->initialize(); | |
774 if (status != JNI_OK) { | |
775 return status; | |
776 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
777 if (UseCompressedOops) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
778 // Subtract a page because something can get allocated at heap base. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
779 // This also makes implicit null checking work, because the |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
780 // memory+1 page below heap_base needs to cause a signal. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
781 // See needs_explicit_null_check. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
782 // Only set the heap base for compressed oops because it indicates |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
783 // compressed oops for pstack code. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
784 Universe::_heap_base = Universe::heap()->base() - os::vm_page_size(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
785 } |
0 | 786 |
787 // We will never reach the CATCH below since Exceptions::_throw will cause | |
788 // the VM to exit if an exception is thrown during initialization | |
789 | |
790 if (UseTLAB) { | |
791 assert(Universe::heap()->supports_tlab_allocation(), | |
792 "Should support thread-local allocation buffers"); | |
793 ThreadLocalAllocBuffer::startup_initialization(); | |
794 } | |
795 return JNI_OK; | |
796 } | |
797 | |
798 // It's the caller's repsonsibility to ensure glitch-freedom | |
799 // (if required). | |
800 void Universe::update_heap_info_at_gc() { | |
801 _heap_capacity_at_last_gc = heap()->capacity(); | |
802 _heap_used_at_last_gc = heap()->used(); | |
803 } | |
804 | |
805 | |
806 | |
807 void universe2_init() { | |
808 EXCEPTION_MARK; | |
809 Universe::genesis(CATCH); | |
810 // Although we'd like to verify here that the state of the heap | |
811 // is good, we can't because the main thread has not yet added | |
812 // itself to the threads list (so, using current interfaces | |
813 // we can't "fill" its TLAB), unless TLABs are disabled. | |
814 if (VerifyBeforeGC && !UseTLAB && | |
815 Universe::heap()->total_collections() >= VerifyGCStartAt) { | |
816 Universe::heap()->prepare_for_verify(); | |
817 Universe::verify(); // make sure we're starting with a clean slate | |
818 } | |
819 } | |
820 | |
821 | |
822 // This function is defined in JVM.cpp | |
823 extern void initialize_converter_functions(); | |
824 | |
825 bool universe_post_init() { | |
826 Universe::_fully_initialized = true; | |
827 EXCEPTION_MARK; | |
828 { ResourceMark rm; | |
829 Interpreter::initialize(); // needed for interpreter entry points | |
830 if (!UseSharedSpaces) { | |
831 KlassHandle ok_h(THREAD, SystemDictionary::object_klass()); | |
832 Universe::reinitialize_vtable_of(ok_h, CHECK_false); | |
833 Universe::reinitialize_itables(CHECK_false); | |
834 } | |
835 } | |
836 | |
837 klassOop k; | |
838 instanceKlassHandle k_h; | |
839 if (!UseSharedSpaces) { | |
840 // Setup preallocated empty java.lang.Class array | |
841 Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::class_klass(), 0, CHECK_false); | |
842 // Setup preallocated OutOfMemoryError errors | |
843 k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_OutOfMemoryError(), true, CHECK_false); | |
844 k_h = instanceKlassHandle(THREAD, k); | |
845 Universe::_out_of_memory_error_java_heap = k_h->allocate_permanent_instance(CHECK_false); | |
846 Universe::_out_of_memory_error_perm_gen = k_h->allocate_permanent_instance(CHECK_false); | |
847 Universe::_out_of_memory_error_array_size = k_h->allocate_permanent_instance(CHECK_false); | |
848 Universe::_out_of_memory_error_gc_overhead_limit = | |
849 k_h->allocate_permanent_instance(CHECK_false); | |
850 | |
851 // Setup preallocated NullPointerException | |
852 // (this is currently used for a cheap & dirty solution in compiler exception handling) | |
853 k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_NullPointerException(), true, CHECK_false); | |
854 Universe::_null_ptr_exception_instance = instanceKlass::cast(k)->allocate_permanent_instance(CHECK_false); | |
855 // Setup preallocated ArithmeticException | |
856 // (this is currently used for a cheap & dirty solution in compiler exception handling) | |
857 k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_ArithmeticException(), true, CHECK_false); | |
858 Universe::_arithmetic_exception_instance = instanceKlass::cast(k)->allocate_permanent_instance(CHECK_false); | |
859 // Virtual Machine Error for when we get into a situation we can't resolve | |
860 k = SystemDictionary::resolve_or_fail( | |
861 vmSymbolHandles::java_lang_VirtualMachineError(), true, CHECK_false); | |
862 bool linked = instanceKlass::cast(k)->link_class_or_fail(CHECK_false); | |
863 if (!linked) { | |
864 tty->print_cr("Unable to link/verify VirtualMachineError class"); | |
865 return false; // initialization failed | |
866 } | |
867 Universe::_virtual_machine_error_instance = | |
868 instanceKlass::cast(k)->allocate_permanent_instance(CHECK_false); | |
869 } | |
870 if (!DumpSharedSpaces) { | |
871 // These are the only Java fields that are currently set during shared space dumping. | |
872 // We prefer to not handle this generally, so we always reinitialize these detail messages. | |
873 Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false); | |
874 java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg()); | |
875 | |
876 msg = java_lang_String::create_from_str("PermGen space", CHECK_false); | |
877 java_lang_Throwable::set_message(Universe::_out_of_memory_error_perm_gen, msg()); | |
878 | |
879 msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false); | |
880 java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg()); | |
881 | |
882 msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false); | |
883 java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg()); | |
884 | |
885 msg = java_lang_String::create_from_str("/ by zero", CHECK_false); | |
886 java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg()); | |
887 | |
888 // Setup the array of errors that have preallocated backtrace | |
889 k = Universe::_out_of_memory_error_java_heap->klass(); | |
890 assert(k->klass_part()->name() == vmSymbols::java_lang_OutOfMemoryError(), "should be out of memory error"); | |
891 k_h = instanceKlassHandle(THREAD, k); | |
892 | |
893 int len = (StackTraceInThrowable) ? (int)PreallocatedOutOfMemoryErrorCount : 0; | |
894 Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(k_h(), len, CHECK_false); | |
895 for (int i=0; i<len; i++) { | |
896 oop err = k_h->allocate_permanent_instance(CHECK_false); | |
897 Handle err_h = Handle(THREAD, err); | |
898 java_lang_Throwable::allocate_backtrace(err_h, CHECK_false); | |
899 Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h()); | |
900 } | |
901 Universe::_preallocated_out_of_memory_error_avail_count = (jint)len; | |
902 } | |
903 | |
904 | |
905 // Setup static method for registering finalizers | |
906 // The finalizer klass must be linked before looking up the method, in | |
907 // case it needs to get rewritten. | |
908 instanceKlass::cast(SystemDictionary::finalizer_klass())->link_class(CHECK_false); | |
909 methodOop m = instanceKlass::cast(SystemDictionary::finalizer_klass())->find_method( | |
910 vmSymbols::register_method_name(), | |
911 vmSymbols::register_method_signature()); | |
912 if (m == NULL || !m->is_static()) { | |
913 THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(), | |
914 "java.lang.ref.Finalizer.register", false); | |
915 } | |
916 Universe::_finalizer_register_cache->init( | |
917 SystemDictionary::finalizer_klass(), m, CHECK_false); | |
918 | |
919 // Resolve on first use and initialize class. | |
920 // Note: No race-condition here, since a resolve will always return the same result | |
921 | |
922 // Setup method for security checks | |
923 k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_reflect_Method(), true, CHECK_false); | |
924 k_h = instanceKlassHandle(THREAD, k); | |
925 k_h->link_class(CHECK_false); | |
926 m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_array_object_object_signature()); | |
927 if (m == NULL || m->is_static()) { | |
928 THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(), | |
929 "java.lang.reflect.Method.invoke", false); | |
930 } | |
931 Universe::_reflect_invoke_cache->init(k_h(), m, CHECK_false); | |
932 | |
933 // Setup method for registering loaded classes in class loader vector | |
934 instanceKlass::cast(SystemDictionary::classloader_klass())->link_class(CHECK_false); | |
935 m = instanceKlass::cast(SystemDictionary::classloader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature()); | |
936 if (m == NULL || m->is_static()) { | |
937 THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(), | |
938 "java.lang.ClassLoader.addClass", false); | |
939 } | |
940 Universe::_loader_addClass_cache->init( | |
941 SystemDictionary::classloader_klass(), m, CHECK_false); | |
942 | |
943 // The folowing is initializing converter functions for serialization in | |
944 // JVM.cpp. If we clean up the StrictMath code above we may want to find | |
945 // a better solution for this as well. | |
946 initialize_converter_functions(); | |
947 | |
948 // This needs to be done before the first scavenge/gc, since | |
949 // it's an input to soft ref clearing policy. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
950 { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
951 MutexLocker x(Heap_lock); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
952 Universe::update_heap_info_at_gc(); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
953 } |
0 | 954 |
955 // ("weak") refs processing infrastructure initialization | |
956 Universe::heap()->post_initialize(); | |
957 | |
958 GC_locker::unlock(); // allow gc after bootstrapping | |
959 | |
960 MemoryService::set_universe_heap(Universe::_collectedHeap); | |
961 return true; | |
962 } | |
963 | |
964 | |
965 void Universe::compute_base_vtable_size() { | |
966 _base_vtable_size = ClassLoader::compute_Object_vtable(); | |
967 } | |
968 | |
969 | |
970 // %%% The Universe::flush_foo methods belong in CodeCache. | |
971 | |
972 // Flushes compiled methods dependent on dependee. | |
973 void Universe::flush_dependents_on(instanceKlassHandle dependee) { | |
974 assert_lock_strong(Compile_lock); | |
975 | |
976 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return; | |
977 | |
978 // CodeCache can only be updated by a thread_in_VM and they will all be | |
979 // stopped dring the safepoint so CodeCache will be safe to update without | |
980 // holding the CodeCache_lock. | |
981 | |
982 DepChange changes(dependee); | |
983 | |
984 // Compute the dependent nmethods | |
985 if (CodeCache::mark_for_deoptimization(changes) > 0) { | |
986 // At least one nmethod has been marked for deoptimization | |
987 VM_Deoptimize op; | |
988 VMThread::execute(&op); | |
989 } | |
990 } | |
991 | |
992 #ifdef HOTSWAP | |
993 // Flushes compiled methods dependent on dependee in the evolutionary sense | |
994 void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) { | |
995 // --- Compile_lock is not held. However we are at a safepoint. | |
996 assert_locked_or_safepoint(Compile_lock); | |
997 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return; | |
998 | |
999 // CodeCache can only be updated by a thread_in_VM and they will all be | |
1000 // stopped dring the safepoint so CodeCache will be safe to update without | |
1001 // holding the CodeCache_lock. | |
1002 | |
1003 // Compute the dependent nmethods | |
1004 if (CodeCache::mark_for_evol_deoptimization(ev_k_h) > 0) { | |
1005 // At least one nmethod has been marked for deoptimization | |
1006 | |
1007 // All this already happens inside a VM_Operation, so we'll do all the work here. | |
1008 // Stuff copied from VM_Deoptimize and modified slightly. | |
1009 | |
1010 // We do not want any GCs to happen while we are in the middle of this VM operation | |
1011 ResourceMark rm; | |
1012 DeoptimizationMarker dm; | |
1013 | |
1014 // Deoptimize all activations depending on marked nmethods | |
1015 Deoptimization::deoptimize_dependents(); | |
1016 | |
1017 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) | |
1018 CodeCache::make_marked_nmethods_not_entrant(); | |
1019 } | |
1020 } | |
1021 #endif // HOTSWAP | |
1022 | |
1023 | |
1024 // Flushes compiled methods dependent on dependee | |
1025 void Universe::flush_dependents_on_method(methodHandle m_h) { | |
1026 // --- Compile_lock is not held. However we are at a safepoint. | |
1027 assert_locked_or_safepoint(Compile_lock); | |
1028 | |
1029 // CodeCache can only be updated by a thread_in_VM and they will all be | |
1030 // stopped dring the safepoint so CodeCache will be safe to update without | |
1031 // holding the CodeCache_lock. | |
1032 | |
1033 // Compute the dependent nmethods | |
1034 if (CodeCache::mark_for_deoptimization(m_h()) > 0) { | |
1035 // At least one nmethod has been marked for deoptimization | |
1036 | |
1037 // All this already happens inside a VM_Operation, so we'll do all the work here. | |
1038 // Stuff copied from VM_Deoptimize and modified slightly. | |
1039 | |
1040 // We do not want any GCs to happen while we are in the middle of this VM operation | |
1041 ResourceMark rm; | |
1042 DeoptimizationMarker dm; | |
1043 | |
1044 // Deoptimize all activations depending on marked nmethods | |
1045 Deoptimization::deoptimize_dependents(); | |
1046 | |
1047 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) | |
1048 CodeCache::make_marked_nmethods_not_entrant(); | |
1049 } | |
1050 } | |
1051 | |
1052 void Universe::print() { print_on(gclog_or_tty); } | |
1053 | |
1054 void Universe::print_on(outputStream* st) { | |
1055 st->print_cr("Heap"); | |
1056 heap()->print_on(st); | |
1057 } | |
1058 | |
1059 void Universe::print_heap_at_SIGBREAK() { | |
1060 if (PrintHeapAtSIGBREAK) { | |
1061 MutexLocker hl(Heap_lock); | |
1062 print_on(tty); | |
1063 tty->cr(); | |
1064 tty->flush(); | |
1065 } | |
1066 } | |
1067 | |
1068 void Universe::print_heap_before_gc(outputStream* st) { | |
1069 st->print_cr("{Heap before GC invocations=%u (full %u):", | |
1070 heap()->total_collections(), | |
1071 heap()->total_full_collections()); | |
1072 heap()->print_on(st); | |
1073 } | |
1074 | |
1075 void Universe::print_heap_after_gc(outputStream* st) { | |
1076 st->print_cr("Heap after GC invocations=%u (full %u):", | |
1077 heap()->total_collections(), | |
1078 heap()->total_full_collections()); | |
1079 heap()->print_on(st); | |
1080 st->print_cr("}"); | |
1081 } | |
1082 | |
1083 void Universe::verify(bool allow_dirty, bool silent) { | |
1084 if (SharedSkipVerify) { | |
1085 return; | |
1086 } | |
1087 | |
1088 // The use of _verify_in_progress is a temporary work around for | |
1089 // 6320749. Don't bother with a creating a class to set and clear | |
1090 // it since it is only used in this method and the control flow is | |
1091 // straight forward. | |
1092 _verify_in_progress = true; | |
1093 | |
1094 COMPILER2_PRESENT( | |
1095 assert(!DerivedPointerTable::is_active(), | |
1096 "DPT should not be active during verification " | |
1097 "(of thread stacks below)"); | |
1098 ) | |
1099 | |
1100 ResourceMark rm; | |
1101 HandleMark hm; // Handles created during verification can be zapped | |
1102 _verify_count++; | |
1103 | |
1104 if (!silent) gclog_or_tty->print("[Verifying "); | |
1105 if (!silent) gclog_or_tty->print("threads "); | |
1106 Threads::verify(); | |
1107 heap()->verify(allow_dirty, silent); | |
1108 | |
1109 if (!silent) gclog_or_tty->print("syms "); | |
1110 SymbolTable::verify(); | |
1111 if (!silent) gclog_or_tty->print("strs "); | |
1112 StringTable::verify(); | |
1113 { | |
1114 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); | |
1115 if (!silent) gclog_or_tty->print("zone "); | |
1116 CodeCache::verify(); | |
1117 } | |
1118 if (!silent) gclog_or_tty->print("dict "); | |
1119 SystemDictionary::verify(); | |
1120 if (!silent) gclog_or_tty->print("hand "); | |
1121 JNIHandles::verify(); | |
1122 if (!silent) gclog_or_tty->print("C-heap "); | |
1123 os::check_heap(); | |
1124 if (!silent) gclog_or_tty->print_cr("]"); | |
1125 | |
1126 _verify_in_progress = false; | |
1127 } | |
1128 | |
1129 // Oop verification (see MacroAssembler::verify_oop) | |
1130 | |
1131 static uintptr_t _verify_oop_data[2] = {0, (uintptr_t)-1}; | |
1132 static uintptr_t _verify_klass_data[2] = {0, (uintptr_t)-1}; | |
1133 | |
1134 | |
1135 static void calculate_verify_data(uintptr_t verify_data[2], | |
1136 HeapWord* low_boundary, | |
1137 HeapWord* high_boundary) { | |
1138 assert(low_boundary < high_boundary, "bad interval"); | |
1139 | |
1140 // decide which low-order bits we require to be clear: | |
1141 size_t alignSize = MinObjAlignmentInBytes; | |
1142 size_t min_object_size = oopDesc::header_size(); | |
1143 | |
1144 // make an inclusive limit: | |
1145 uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize; | |
1146 uintptr_t min = (uintptr_t)low_boundary; | |
1147 assert(min < max, "bad interval"); | |
1148 uintptr_t diff = max ^ min; | |
1149 | |
1150 // throw away enough low-order bits to make the diff vanish | |
1151 uintptr_t mask = (uintptr_t)(-1); | |
1152 while ((mask & diff) != 0) | |
1153 mask <<= 1; | |
1154 uintptr_t bits = (min & mask); | |
1155 assert(bits == (max & mask), "correct mask"); | |
1156 // check an intermediate value between min and max, just to make sure: | |
1157 assert(bits == ((min + (max-min)/2) & mask), "correct mask"); | |
1158 | |
1159 // require address alignment, too: | |
1160 mask |= (alignSize - 1); | |
1161 | |
1162 if (!(verify_data[0] == 0 && verify_data[1] == (uintptr_t)-1)) { | |
1163 assert(verify_data[0] == mask && verify_data[1] == bits, "mask stability"); | |
1164 } | |
1165 verify_data[0] = mask; | |
1166 verify_data[1] = bits; | |
1167 } | |
1168 | |
1169 | |
1170 // Oop verification (see MacroAssembler::verify_oop) | |
1171 #ifndef PRODUCT | |
1172 | |
1173 uintptr_t Universe::verify_oop_mask() { | |
1174 MemRegion m = heap()->reserved_region(); | |
1175 calculate_verify_data(_verify_oop_data, | |
1176 m.start(), | |
1177 m.end()); | |
1178 return _verify_oop_data[0]; | |
1179 } | |
1180 | |
1181 | |
1182 | |
1183 uintptr_t Universe::verify_oop_bits() { | |
1184 verify_oop_mask(); | |
1185 return _verify_oop_data[1]; | |
1186 } | |
1187 | |
1188 | |
1189 uintptr_t Universe::verify_klass_mask() { | |
1190 /* $$$ | |
1191 // A klass can never live in the new space. Since the new and old | |
1192 // spaces can change size, we must settle for bounds-checking against | |
1193 // the bottom of the world, plus the smallest possible new and old | |
1194 // space sizes that may arise during execution. | |
1195 size_t min_new_size = Universe::new_size(); // in bytes | |
1196 size_t min_old_size = Universe::old_size(); // in bytes | |
1197 calculate_verify_data(_verify_klass_data, | |
1198 (HeapWord*)((uintptr_t)_new_gen->low_boundary + min_new_size + min_old_size), | |
1199 _perm_gen->high_boundary); | |
1200 */ | |
1201 // Why doesn't the above just say that klass's always live in the perm | |
1202 // gen? I'll see if that seems to work... | |
1203 MemRegion permanent_reserved; | |
1204 switch (Universe::heap()->kind()) { | |
1205 default: | |
1206 // ???: What if a CollectedHeap doesn't have a permanent generation? | |
1207 ShouldNotReachHere(); | |
1208 break; | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
1209 case CollectedHeap::GenCollectedHeap: |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
1210 case CollectedHeap::G1CollectedHeap: { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
1211 SharedHeap* sh = (SharedHeap*) Universe::heap(); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
1212 permanent_reserved = sh->perm_gen()->reserved(); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
1213 break; |
0 | 1214 } |
1215 #ifndef SERIALGC | |
1216 case CollectedHeap::ParallelScavengeHeap: { | |
1217 ParallelScavengeHeap* psh = (ParallelScavengeHeap*) Universe::heap(); | |
1218 permanent_reserved = psh->perm_gen()->reserved(); | |
1219 break; | |
1220 } | |
1221 #endif // SERIALGC | |
1222 } | |
1223 calculate_verify_data(_verify_klass_data, | |
1224 permanent_reserved.start(), | |
1225 permanent_reserved.end()); | |
1226 | |
1227 return _verify_klass_data[0]; | |
1228 } | |
1229 | |
1230 | |
1231 | |
1232 uintptr_t Universe::verify_klass_bits() { | |
1233 verify_klass_mask(); | |
1234 return _verify_klass_data[1]; | |
1235 } | |
1236 | |
1237 | |
1238 uintptr_t Universe::verify_mark_mask() { | |
1239 return markOopDesc::lock_mask_in_place; | |
1240 } | |
1241 | |
1242 | |
1243 | |
1244 uintptr_t Universe::verify_mark_bits() { | |
1245 intptr_t mask = verify_mark_mask(); | |
1246 intptr_t bits = (intptr_t)markOopDesc::prototype(); | |
1247 assert((bits & ~mask) == 0, "no stray header bits"); | |
1248 return bits; | |
1249 } | |
1250 #endif // PRODUCT | |
1251 | |
1252 | |
1253 void Universe::compute_verify_oop_data() { | |
1254 verify_oop_mask(); | |
1255 verify_oop_bits(); | |
1256 verify_mark_mask(); | |
1257 verify_mark_bits(); | |
1258 verify_klass_mask(); | |
1259 verify_klass_bits(); | |
1260 } | |
1261 | |
1262 | |
1263 void CommonMethodOopCache::init(klassOop k, methodOop m, TRAPS) { | |
1264 if (!UseSharedSpaces) { | |
1265 _klass = k; | |
1266 } | |
1267 #ifndef PRODUCT | |
1268 else { | |
1269 // sharing initilization should have already set up _klass | |
1270 assert(_klass != NULL, "just checking"); | |
1271 } | |
1272 #endif | |
1273 | |
1274 _method_idnum = m->method_idnum(); | |
1275 assert(_method_idnum >= 0, "sanity check"); | |
1276 } | |
1277 | |
1278 | |
1279 ActiveMethodOopsCache::~ActiveMethodOopsCache() { | |
1280 if (_prev_methods != NULL) { | |
1281 for (int i = _prev_methods->length() - 1; i >= 0; i--) { | |
1282 jweak method_ref = _prev_methods->at(i); | |
1283 if (method_ref != NULL) { | |
1284 JNIHandles::destroy_weak_global(method_ref); | |
1285 } | |
1286 } | |
1287 delete _prev_methods; | |
1288 _prev_methods = NULL; | |
1289 } | |
1290 } | |
1291 | |
1292 | |
1293 void ActiveMethodOopsCache::add_previous_version(const methodOop method) { | |
1294 assert(Thread::current()->is_VM_thread(), | |
1295 "only VMThread can add previous versions"); | |
1296 | |
1297 if (_prev_methods == NULL) { | |
1298 // This is the first previous version so make some space. | |
1299 // Start with 2 elements under the assumption that the class | |
1300 // won't be redefined much. | |
1301 _prev_methods = new (ResourceObj::C_HEAP) GrowableArray<jweak>(2, true); | |
1302 } | |
1303 | |
1304 // RC_TRACE macro has an embedded ResourceMark | |
1305 RC_TRACE(0x00000100, | |
1306 ("add: %s(%s): adding prev version ref for cached method @%d", | |
1307 method->name()->as_C_string(), method->signature()->as_C_string(), | |
1308 _prev_methods->length())); | |
1309 | |
1310 methodHandle method_h(method); | |
1311 jweak method_ref = JNIHandles::make_weak_global(method_h); | |
1312 _prev_methods->append(method_ref); | |
1313 | |
1314 // Using weak references allows previous versions of the cached | |
1315 // method to be GC'ed when they are no longer needed. Since the | |
1316 // caller is the VMThread and we are at a safepoint, this is a good | |
1317 // time to clear out unused weak references. | |
1318 | |
1319 for (int i = _prev_methods->length() - 1; i >= 0; i--) { | |
1320 jweak method_ref = _prev_methods->at(i); | |
1321 assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); | |
1322 if (method_ref == NULL) { | |
1323 _prev_methods->remove_at(i); | |
1324 // Since we are traversing the array backwards, we don't have to | |
1325 // do anything special with the index. | |
1326 continue; // robustness | |
1327 } | |
1328 | |
1329 methodOop m = (methodOop)JNIHandles::resolve(method_ref); | |
1330 if (m == NULL) { | |
1331 // this method entry has been GC'ed so remove it | |
1332 JNIHandles::destroy_weak_global(method_ref); | |
1333 _prev_methods->remove_at(i); | |
1334 } else { | |
1335 // RC_TRACE macro has an embedded ResourceMark | |
1336 RC_TRACE(0x00000400, ("add: %s(%s): previous cached method @%d is alive", | |
1337 m->name()->as_C_string(), m->signature()->as_C_string(), i)); | |
1338 } | |
1339 } | |
1340 } // end add_previous_version() | |
1341 | |
1342 | |
1343 bool ActiveMethodOopsCache::is_same_method(const methodOop method) const { | |
1344 instanceKlass* ik = instanceKlass::cast(klass()); | |
1345 methodOop check_method = ik->method_with_idnum(method_idnum()); | |
1346 assert(check_method != NULL, "sanity check"); | |
1347 if (check_method == method) { | |
1348 // done with the easy case | |
1349 return true; | |
1350 } | |
1351 | |
1352 if (_prev_methods != NULL) { | |
1353 // The cached method has been redefined at least once so search | |
1354 // the previous versions for a match. | |
1355 for (int i = 0; i < _prev_methods->length(); i++) { | |
1356 jweak method_ref = _prev_methods->at(i); | |
1357 assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); | |
1358 if (method_ref == NULL) { | |
1359 continue; // robustness | |
1360 } | |
1361 | |
1362 check_method = (methodOop)JNIHandles::resolve(method_ref); | |
1363 if (check_method == method) { | |
1364 // a previous version matches | |
1365 return true; | |
1366 } | |
1367 } | |
1368 } | |
1369 | |
1370 // either no previous versions or no previous version matched | |
1371 return false; | |
1372 } | |
1373 | |
1374 | |
1375 methodOop LatestMethodOopCache::get_methodOop() { | |
1376 instanceKlass* ik = instanceKlass::cast(klass()); | |
1377 methodOop m = ik->method_with_idnum(method_idnum()); | |
1378 assert(m != NULL, "sanity check"); | |
1379 return m; | |
1380 } | |
1381 | |
1382 | |
1383 #ifdef ASSERT | |
1384 // Release dummy object(s) at bottom of heap | |
1385 bool Universe::release_fullgc_alot_dummy() { | |
1386 MutexLocker ml(FullGCALot_lock); | |
1387 if (_fullgc_alot_dummy_array != NULL) { | |
1388 if (_fullgc_alot_dummy_next >= _fullgc_alot_dummy_array->length()) { | |
1389 // No more dummies to release, release entire array instead | |
1390 _fullgc_alot_dummy_array = NULL; | |
1391 return false; | |
1392 } | |
1393 if (!UseConcMarkSweepGC) { | |
1394 // Release dummy at bottom of old generation | |
1395 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL); | |
1396 } | |
1397 // Release dummy at bottom of permanent generation | |
1398 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL); | |
1399 } | |
1400 return true; | |
1401 } | |
1402 | |
1403 #endif // ASSERT |