Mercurial > hg > graal-jvmci-8
annotate src/share/vm/runtime/jniHandles.cpp @ 3979:4dfb2df418f2
6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp
author | johnc |
---|---|
date | Thu, 22 Sep 2011 10:57:37 -0700 |
parents | 1d1603768966 |
children | f08d439fab8c |
rev | line source |
---|---|
0 | 1 /* |
2426
1d1603768966
7010070: Update all 2010 Oracle-changed OpenJDK files to have the proper copyright dates - second pass
trims
parents:
2147
diff
changeset
|
2 * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1142
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1142
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1142
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "classfile/systemDictionary.hpp" | |
27 #include "oops/oop.inline.hpp" | |
2147
9afee0b9fc1d
7012505: BreakpointWithFullGC.sh fails with Internal Error (src/share/vm/oops/methodOop.cpp:220)
kamg
parents:
2125
diff
changeset
|
28 #include "prims/jvmtiExport.hpp" |
1972 | 29 #include "runtime/jniHandles.hpp" |
30 #include "runtime/mutexLocker.hpp" | |
31 #ifdef TARGET_OS_FAMILY_linux | |
32 # include "thread_linux.inline.hpp" | |
33 #endif | |
34 #ifdef TARGET_OS_FAMILY_solaris | |
35 # include "thread_solaris.inline.hpp" | |
36 #endif | |
37 #ifdef TARGET_OS_FAMILY_windows | |
38 # include "thread_windows.inline.hpp" | |
39 #endif | |
0 | 40 |
41 | |
42 JNIHandleBlock* JNIHandles::_global_handles = NULL; | |
43 JNIHandleBlock* JNIHandles::_weak_global_handles = NULL; | |
44 oop JNIHandles::_deleted_handle = NULL; | |
45 | |
46 | |
47 jobject JNIHandles::make_local(oop obj) { | |
48 if (obj == NULL) { | |
49 return NULL; // ignore null handles | |
50 } else { | |
51 Thread* thread = Thread::current(); | |
52 assert(Universe::heap()->is_in_reserved(obj), "sanity check"); | |
53 return thread->active_handles()->allocate_handle(obj); | |
54 } | |
55 } | |
56 | |
57 | |
58 // optimized versions | |
59 | |
60 jobject JNIHandles::make_local(Thread* thread, oop obj) { | |
61 if (obj == NULL) { | |
62 return NULL; // ignore null handles | |
63 } else { | |
64 assert(Universe::heap()->is_in_reserved(obj), "sanity check"); | |
65 return thread->active_handles()->allocate_handle(obj); | |
66 } | |
67 } | |
68 | |
69 | |
70 jobject JNIHandles::make_local(JNIEnv* env, oop obj) { | |
71 if (obj == NULL) { | |
72 return NULL; // ignore null handles | |
73 } else { | |
74 JavaThread* thread = JavaThread::thread_from_jni_environment(env); | |
75 assert(Universe::heap()->is_in_reserved(obj), "sanity check"); | |
76 return thread->active_handles()->allocate_handle(obj); | |
77 } | |
78 } | |
79 | |
80 | |
81 jobject JNIHandles::make_global(Handle obj) { | |
1616
38e8278318ca
6656830: assert((*p)->is_oop(),"expected an oop while scanning weak refs")
never
parents:
1552
diff
changeset
|
82 assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC"); |
0 | 83 jobject res = NULL; |
84 if (!obj.is_null()) { | |
85 // ignore null handles | |
86 MutexLocker ml(JNIGlobalHandle_lock); | |
87 assert(Universe::heap()->is_in_reserved(obj()), "sanity check"); | |
88 res = _global_handles->allocate_handle(obj()); | |
89 } else { | |
90 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); | |
91 } | |
92 | |
93 return res; | |
94 } | |
95 | |
96 | |
97 jobject JNIHandles::make_weak_global(Handle obj) { | |
1616
38e8278318ca
6656830: assert((*p)->is_oop(),"expected an oop while scanning weak refs")
never
parents:
1552
diff
changeset
|
98 assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC"); |
0 | 99 jobject res = NULL; |
100 if (!obj.is_null()) { | |
101 // ignore null handles | |
102 MutexLocker ml(JNIGlobalHandle_lock); | |
103 assert(Universe::heap()->is_in_reserved(obj()), "sanity check"); | |
104 res = _weak_global_handles->allocate_handle(obj()); | |
105 } else { | |
106 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); | |
107 } | |
108 return res; | |
109 } | |
110 | |
111 jmethodID JNIHandles::make_jmethod_id(methodHandle mh) { | |
112 return (jmethodID) make_weak_global(mh); | |
113 } | |
114 | |
115 | |
116 | |
117 void JNIHandles::change_method_associated_with_jmethod_id(jmethodID jmid, methodHandle mh) { | |
118 MutexLocker ml(JNIGlobalHandle_lock); // Is this necessary? | |
119 Handle obj = (Handle)mh; | |
120 oop* jobj = (oop*)jmid; | |
121 *jobj = obj(); | |
122 } | |
123 | |
124 | |
125 void JNIHandles::destroy_global(jobject handle) { | |
126 if (handle != NULL) { | |
127 assert(is_global_handle(handle), "Invalid delete of global JNI handle"); | |
128 *((oop*)handle) = deleted_handle(); // Mark the handle as deleted, allocate will reuse it | |
129 } | |
130 } | |
131 | |
132 | |
133 void JNIHandles::destroy_weak_global(jobject handle) { | |
134 if (handle != NULL) { | |
135 assert(!CheckJNICalls || is_weak_global_handle(handle), "Invalid delete of weak global JNI handle"); | |
136 *((oop*)handle) = deleted_handle(); // Mark the handle as deleted, allocate will reuse it | |
137 } | |
138 } | |
139 | |
140 void JNIHandles::destroy_jmethod_id(jmethodID mid) { | |
141 destroy_weak_global((jobject)mid); | |
142 } | |
143 | |
144 | |
145 void JNIHandles::oops_do(OopClosure* f) { | |
146 f->do_oop(&_deleted_handle); | |
147 _global_handles->oops_do(f); | |
148 } | |
149 | |
150 | |
151 void JNIHandles::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { | |
152 _weak_global_handles->weak_oops_do(is_alive, f); | |
153 } | |
154 | |
155 | |
156 void JNIHandles::initialize() { | |
157 _global_handles = JNIHandleBlock::allocate_block(); | |
158 _weak_global_handles = JNIHandleBlock::allocate_block(); | |
159 EXCEPTION_MARK; | |
160 // We will never reach the CATCH below since Exceptions::_throw will cause | |
161 // the VM to exit if an exception is thrown during initialization | |
1142 | 162 klassOop k = SystemDictionary::Object_klass(); |
0 | 163 _deleted_handle = instanceKlass::cast(k)->allocate_permanent_instance(CATCH); |
164 } | |
165 | |
166 | |
167 bool JNIHandles::is_local_handle(Thread* thread, jobject handle) { | |
168 JNIHandleBlock* block = thread->active_handles(); | |
169 | |
170 // Look back past possible native calls to jni_PushLocalFrame. | |
171 while (block != NULL) { | |
172 if (block->chain_contains(handle)) { | |
173 return true; | |
174 } | |
175 block = block->pop_frame_link(); | |
176 } | |
177 return false; | |
178 } | |
179 | |
180 | |
181 // Determine if the handle is somewhere in the current thread's stack. | |
182 // We easily can't isolate any particular stack frame the handle might | |
183 // come from, so we'll check the whole stack. | |
184 | |
185 bool JNIHandles::is_frame_handle(JavaThread* thr, jobject obj) { | |
186 // If there is no java frame, then this must be top level code, such | |
187 // as the java command executable, in which case, this type of handle | |
188 // is not permitted. | |
189 return (thr->has_last_Java_frame() && | |
190 (void*)obj < (void*)thr->stack_base() && | |
191 (void*)obj >= (void*)thr->last_Java_sp()); | |
192 } | |
193 | |
194 | |
195 bool JNIHandles::is_global_handle(jobject handle) { | |
196 return _global_handles->chain_contains(handle); | |
197 } | |
198 | |
199 | |
200 bool JNIHandles::is_weak_global_handle(jobject handle) { | |
201 return _weak_global_handles->chain_contains(handle); | |
202 } | |
203 | |
204 long JNIHandles::global_handle_memory_usage() { | |
205 return _global_handles->memory_usage(); | |
206 } | |
207 | |
208 long JNIHandles::weak_global_handle_memory_usage() { | |
209 return _weak_global_handles->memory_usage(); | |
210 } | |
211 | |
212 | |
213 class AlwaysAliveClosure: public BoolObjectClosure { | |
214 public: | |
215 bool do_object_b(oop obj) { return true; } | |
216 void do_object(oop obj) { assert(false, "Don't call"); } | |
217 }; | |
218 | |
219 class CountHandleClosure: public OopClosure { | |
220 private: | |
221 int _count; | |
222 public: | |
223 CountHandleClosure(): _count(0) {} | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
224 virtual void do_oop(oop* unused) { |
0 | 225 _count++; |
226 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
227 virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); } |
0 | 228 int count() { return _count; } |
229 }; | |
230 | |
231 // We assume this is called at a safepoint: no lock is needed. | |
232 void JNIHandles::print_on(outputStream* st) { | |
233 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); | |
234 assert(_global_handles != NULL && _weak_global_handles != NULL, | |
235 "JNIHandles not initialized"); | |
236 | |
237 CountHandleClosure global_handle_count; | |
238 AlwaysAliveClosure always_alive; | |
239 oops_do(&global_handle_count); | |
240 weak_oops_do(&always_alive, &global_handle_count); | |
241 | |
242 st->print_cr("JNI global references: %d", global_handle_count.count()); | |
243 st->cr(); | |
244 st->flush(); | |
245 } | |
246 | |
247 class VerifyHandleClosure: public OopClosure { | |
248 public: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
249 virtual void do_oop(oop* root) { |
0 | 250 (*root)->verify(); |
251 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
252 virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); } |
0 | 253 }; |
254 | |
255 void JNIHandles::verify() { | |
256 VerifyHandleClosure verify_handle; | |
257 AlwaysAliveClosure always_alive; | |
258 | |
259 oops_do(&verify_handle); | |
260 weak_oops_do(&always_alive, &verify_handle); | |
261 } | |
262 | |
263 | |
264 | |
265 void jni_handles_init() { | |
266 JNIHandles::initialize(); | |
267 } | |
268 | |
269 | |
270 int JNIHandleBlock::_blocks_allocated = 0; | |
271 JNIHandleBlock* JNIHandleBlock::_block_free_list = NULL; | |
272 #ifndef PRODUCT | |
273 JNIHandleBlock* JNIHandleBlock::_block_list = NULL; | |
274 #endif | |
275 | |
276 | |
277 void JNIHandleBlock::zap() { | |
278 // Zap block values | |
279 _top = 0; | |
280 for (int index = 0; index < block_size_in_oops; index++) { | |
281 _handles[index] = badJNIHandle; | |
282 } | |
283 } | |
284 | |
285 JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread) { | |
286 assert(thread == NULL || thread == Thread::current(), "sanity check"); | |
287 JNIHandleBlock* block; | |
288 // Check the thread-local free list for a block so we don't | |
289 // have to acquire a mutex. | |
290 if (thread != NULL && thread->free_handle_block() != NULL) { | |
291 block = thread->free_handle_block(); | |
292 thread->set_free_handle_block(block->_next); | |
293 } | |
294 else { | |
295 // locking with safepoint checking introduces a potential deadlock: | |
296 // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock | |
297 // - another would hold Threads_lock (jni_AttachCurrentThread) and then | |
298 // JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block) | |
299 MutexLockerEx ml(JNIHandleBlockFreeList_lock, | |
300 Mutex::_no_safepoint_check_flag); | |
301 if (_block_free_list == NULL) { | |
302 // Allocate new block | |
303 block = new JNIHandleBlock(); | |
304 _blocks_allocated++; | |
305 if (TraceJNIHandleAllocation) { | |
306 tty->print_cr("JNIHandleBlock " INTPTR_FORMAT " allocated (%d total blocks)", | |
307 block, _blocks_allocated); | |
308 } | |
309 if (ZapJNIHandleArea) block->zap(); | |
310 #ifndef PRODUCT | |
311 // Link new block to list of all allocated blocks | |
312 block->_block_list_link = _block_list; | |
313 _block_list = block; | |
314 #endif | |
315 } else { | |
316 // Get block from free list | |
317 block = _block_free_list; | |
318 _block_free_list = _block_free_list->_next; | |
319 } | |
320 } | |
321 block->_top = 0; | |
322 block->_next = NULL; | |
323 block->_pop_frame_link = NULL; | |
324 // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle | |
325 debug_only(block->_last = NULL); | |
326 debug_only(block->_free_list = NULL); | |
327 debug_only(block->_allocate_before_rebuild = -1); | |
328 return block; | |
329 } | |
330 | |
331 | |
332 void JNIHandleBlock::release_block(JNIHandleBlock* block, Thread* thread) { | |
333 assert(thread == NULL || thread == Thread::current(), "sanity check"); | |
334 JNIHandleBlock* pop_frame_link = block->pop_frame_link(); | |
335 // Put returned block at the beginning of the thread-local free list. | |
336 // Note that if thread == NULL, we use it as an implicit argument that | |
337 // we _don't_ want the block to be kept on the free_handle_block. | |
338 // See for instance JavaThread::exit(). | |
339 if (thread != NULL ) { | |
340 if (ZapJNIHandleArea) block->zap(); | |
341 JNIHandleBlock* freelist = thread->free_handle_block(); | |
342 block->_pop_frame_link = NULL; | |
343 thread->set_free_handle_block(block); | |
344 | |
345 // Add original freelist to end of chain | |
346 if ( freelist != NULL ) { | |
347 while ( block->_next != NULL ) block = block->_next; | |
348 block->_next = freelist; | |
349 } | |
350 block = NULL; | |
351 } | |
352 if (block != NULL) { | |
353 // Return blocks to free list | |
354 // locking with safepoint checking introduces a potential deadlock: | |
355 // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock | |
356 // - another would hold Threads_lock (jni_AttachCurrentThread) and then | |
357 // JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block) | |
358 MutexLockerEx ml(JNIHandleBlockFreeList_lock, | |
359 Mutex::_no_safepoint_check_flag); | |
360 while (block != NULL) { | |
361 if (ZapJNIHandleArea) block->zap(); | |
362 JNIHandleBlock* next = block->_next; | |
363 block->_next = _block_free_list; | |
364 _block_free_list = block; | |
365 block = next; | |
366 } | |
367 } | |
368 if (pop_frame_link != NULL) { | |
369 // As a sanity check we release blocks pointed to by the pop_frame_link. | |
370 // This should never happen (only if PopLocalFrame is not called the | |
371 // correct number of times). | |
372 release_block(pop_frame_link, thread); | |
373 } | |
374 } | |
375 | |
376 | |
377 void JNIHandleBlock::oops_do(OopClosure* f) { | |
378 JNIHandleBlock* current_chain = this; | |
379 // Iterate over chain of blocks, followed by chains linked through the | |
380 // pop frame links. | |
381 while (current_chain != NULL) { | |
382 for (JNIHandleBlock* current = current_chain; current != NULL; | |
383 current = current->_next) { | |
384 assert(current == current_chain || current->pop_frame_link() == NULL, | |
385 "only blocks first in chain should have pop frame link set"); | |
386 for (int index = 0; index < current->_top; index++) { | |
387 oop* root = &(current->_handles)[index]; | |
388 oop value = *root; | |
389 // traverse heap pointers only, not deleted handles or free list | |
390 // pointers | |
391 if (value != NULL && Universe::heap()->is_in_reserved(value)) { | |
392 f->do_oop(root); | |
393 } | |
394 } | |
395 // the next handle block is valid only if current block is full | |
396 if (current->_top < block_size_in_oops) { | |
397 break; | |
398 } | |
399 } | |
400 current_chain = current_chain->pop_frame_link(); | |
401 } | |
402 } | |
403 | |
404 | |
405 void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive, | |
406 OopClosure* f) { | |
407 for (JNIHandleBlock* current = this; current != NULL; current = current->_next) { | |
408 assert(current->pop_frame_link() == NULL, | |
409 "blocks holding weak global JNI handles should not have pop frame link set"); | |
410 for (int index = 0; index < current->_top; index++) { | |
411 oop* root = &(current->_handles)[index]; | |
412 oop value = *root; | |
413 // traverse heap pointers only, not deleted handles or free list pointers | |
414 if (value != NULL && Universe::heap()->is_in_reserved(value)) { | |
415 if (is_alive->do_object_b(value)) { | |
416 // The weakly referenced object is alive, update pointer | |
417 f->do_oop(root); | |
418 } else { | |
419 // The weakly referenced object is not alive, clear the reference by storing NULL | |
420 if (TraceReferenceGC) { | |
421 tty->print_cr("Clearing JNI weak reference (" INTPTR_FORMAT ")", root); | |
422 } | |
423 *root = NULL; | |
424 } | |
425 } | |
426 } | |
427 // the next handle block is valid only if current block is full | |
428 if (current->_top < block_size_in_oops) { | |
429 break; | |
430 } | |
431 } | |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
1972
diff
changeset
|
432 |
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
1972
diff
changeset
|
433 /* |
2147
9afee0b9fc1d
7012505: BreakpointWithFullGC.sh fails with Internal Error (src/share/vm/oops/methodOop.cpp:220)
kamg
parents:
2125
diff
changeset
|
434 * JVMTI data structures may also contain weak oops. The iteration of them |
9afee0b9fc1d
7012505: BreakpointWithFullGC.sh fails with Internal Error (src/share/vm/oops/methodOop.cpp:220)
kamg
parents:
2125
diff
changeset
|
435 * is placed here so that we don't need to add it to each of the collectors. |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
1972
diff
changeset
|
436 */ |
2147
9afee0b9fc1d
7012505: BreakpointWithFullGC.sh fails with Internal Error (src/share/vm/oops/methodOop.cpp:220)
kamg
parents:
2125
diff
changeset
|
437 JvmtiExport::weak_oops_do(is_alive, f); |
0 | 438 } |
439 | |
440 | |
441 jobject JNIHandleBlock::allocate_handle(oop obj) { | |
442 assert(Universe::heap()->is_in_reserved(obj), "sanity check"); | |
443 if (_top == 0) { | |
444 // This is the first allocation or the initial block got zapped when | |
445 // entering a native function. If we have any following blocks they are | |
446 // not valid anymore. | |
447 for (JNIHandleBlock* current = _next; current != NULL; | |
448 current = current->_next) { | |
449 assert(current->_last == NULL, "only first block should have _last set"); | |
450 assert(current->_free_list == NULL, | |
451 "only first block should have _free_list set"); | |
452 current->_top = 0; | |
453 if (ZapJNIHandleArea) current->zap(); | |
454 } | |
455 // Clear initial block | |
456 _free_list = NULL; | |
457 _allocate_before_rebuild = 0; | |
458 _last = this; | |
459 if (ZapJNIHandleArea) zap(); | |
460 } | |
461 | |
462 // Try last block | |
463 if (_last->_top < block_size_in_oops) { | |
464 oop* handle = &(_last->_handles)[_last->_top++]; | |
465 *handle = obj; | |
466 return (jobject) handle; | |
467 } | |
468 | |
469 // Try free list | |
470 if (_free_list != NULL) { | |
471 oop* handle = _free_list; | |
472 _free_list = (oop*) *_free_list; | |
473 *handle = obj; | |
474 return (jobject) handle; | |
475 } | |
476 // Check if unused block follow last | |
477 if (_last->_next != NULL) { | |
478 // update last and retry | |
479 _last = _last->_next; | |
480 return allocate_handle(obj); | |
481 } | |
482 | |
483 // No space available, we have to rebuild free list or expand | |
484 if (_allocate_before_rebuild == 0) { | |
485 rebuild_free_list(); // updates _allocate_before_rebuild counter | |
486 } else { | |
487 // Append new block | |
488 Thread* thread = Thread::current(); | |
489 Handle obj_handle(thread, obj); | |
490 // This can block, so we need to preserve obj accross call. | |
491 _last->_next = JNIHandleBlock::allocate_block(thread); | |
492 _last = _last->_next; | |
493 _allocate_before_rebuild--; | |
494 obj = obj_handle(); | |
495 } | |
496 return allocate_handle(obj); // retry | |
497 } | |
498 | |
499 | |
500 void JNIHandleBlock::rebuild_free_list() { | |
501 assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking"); | |
502 int free = 0; | |
503 int blocks = 0; | |
504 for (JNIHandleBlock* current = this; current != NULL; current = current->_next) { | |
505 for (int index = 0; index < current->_top; index++) { | |
506 oop* handle = &(current->_handles)[index]; | |
507 if (*handle == JNIHandles::deleted_handle()) { | |
508 // this handle was cleared out by a delete call, reuse it | |
509 *handle = (oop) _free_list; | |
510 _free_list = handle; | |
511 free++; | |
512 } | |
513 } | |
514 // we should not rebuild free list if there are unused handles at the end | |
515 assert(current->_top == block_size_in_oops, "just checking"); | |
516 blocks++; | |
517 } | |
518 // Heuristic: if more than half of the handles are free we rebuild next time | |
519 // as well, otherwise we append a corresponding number of new blocks before | |
520 // attempting a free list rebuild again. | |
521 int total = blocks * block_size_in_oops; | |
522 int extra = total - 2*free; | |
523 if (extra > 0) { | |
524 // Not as many free handles as we would like - compute number of new blocks to append | |
525 _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops; | |
526 } | |
527 if (TraceJNIHandleAllocation) { | |
528 tty->print_cr("Rebuild free list JNIHandleBlock " INTPTR_FORMAT " blocks=%d used=%d free=%d add=%d", | |
529 this, blocks, total-free, free, _allocate_before_rebuild); | |
530 } | |
531 } | |
532 | |
533 | |
534 bool JNIHandleBlock::contains(jobject handle) const { | |
535 return ((jobject)&_handles[0] <= handle && handle<(jobject)&_handles[_top]); | |
536 } | |
537 | |
538 | |
539 bool JNIHandleBlock::chain_contains(jobject handle) const { | |
540 for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != NULL; current = current->_next) { | |
541 if (current->contains(handle)) { | |
542 return true; | |
543 } | |
544 } | |
545 return false; | |
546 } | |
547 | |
548 | |
549 int JNIHandleBlock::length() const { | |
550 int result = 1; | |
551 for (JNIHandleBlock* current = _next; current != NULL; current = current->_next) { | |
552 result++; | |
553 } | |
554 return result; | |
555 } | |
556 | |
557 // This method is not thread-safe, i.e., must be called whule holding a lock on the | |
558 // structure. | |
559 long JNIHandleBlock::memory_usage() const { | |
560 return length() * sizeof(JNIHandleBlock); | |
561 } | |
562 | |
563 | |
564 #ifndef PRODUCT | |
565 | |
566 bool JNIHandleBlock::any_contains(jobject handle) { | |
567 for (JNIHandleBlock* current = _block_list; current != NULL; current = current->_block_list_link) { | |
568 if (current->contains(handle)) { | |
569 return true; | |
570 } | |
571 } | |
572 return false; | |
573 } | |
574 | |
575 void JNIHandleBlock::print_statistics() { | |
576 int used_blocks = 0; | |
577 int free_blocks = 0; | |
578 int used_handles = 0; | |
579 int free_handles = 0; | |
580 JNIHandleBlock* block = _block_list; | |
581 while (block != NULL) { | |
582 if (block->_top > 0) { | |
583 used_blocks++; | |
584 } else { | |
585 free_blocks++; | |
586 } | |
587 used_handles += block->_top; | |
588 free_handles += (block_size_in_oops - block->_top); | |
589 block = block->_block_list_link; | |
590 } | |
591 tty->print_cr("JNIHandleBlocks statistics"); | |
592 tty->print_cr("- blocks allocated: %d", used_blocks + free_blocks); | |
593 tty->print_cr("- blocks in use: %d", used_blocks); | |
594 tty->print_cr("- blocks free: %d", free_blocks); | |
595 tty->print_cr("- handles in use: %d", used_handles); | |
596 tty->print_cr("- handles free: %d", free_handles); | |
597 } | |
598 | |
599 #endif |