Mercurial > hg > graal-jvmci-8
annotate src/share/vm/runtime/jniHandles.cpp @ 2001:f2da85a9b08e
7001363: java/dyn/InvokeDynamic should not be a well-known class in the JVM
Summary: Because of the removal of language support, the JDK 7 API for JSR 292 no longer includes a public class named java/dyn/InvokeDynamic.
Reviewed-by: jrose, kvn
author | twisti |
---|---|
date | Tue, 30 Nov 2010 09:53:04 -0800 |
parents | f95d63e2154a |
children | 7246a374a9f2 |
rev | line source |
---|---|
0 | 1 /* |
1972 | 2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1142
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1142
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1142
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "classfile/systemDictionary.hpp" | |
27 #include "oops/oop.inline.hpp" | |
28 #include "runtime/jniHandles.hpp" | |
29 #include "runtime/mutexLocker.hpp" | |
30 #ifdef TARGET_OS_FAMILY_linux | |
31 # include "thread_linux.inline.hpp" | |
32 #endif | |
33 #ifdef TARGET_OS_FAMILY_solaris | |
34 # include "thread_solaris.inline.hpp" | |
35 #endif | |
36 #ifdef TARGET_OS_FAMILY_windows | |
37 # include "thread_windows.inline.hpp" | |
38 #endif | |
0 | 39 |
40 | |
41 JNIHandleBlock* JNIHandles::_global_handles = NULL; | |
42 JNIHandleBlock* JNIHandles::_weak_global_handles = NULL; | |
43 oop JNIHandles::_deleted_handle = NULL; | |
44 | |
45 | |
46 jobject JNIHandles::make_local(oop obj) { | |
47 if (obj == NULL) { | |
48 return NULL; // ignore null handles | |
49 } else { | |
50 Thread* thread = Thread::current(); | |
51 assert(Universe::heap()->is_in_reserved(obj), "sanity check"); | |
52 return thread->active_handles()->allocate_handle(obj); | |
53 } | |
54 } | |
55 | |
56 | |
57 // optimized versions | |
58 | |
59 jobject JNIHandles::make_local(Thread* thread, oop obj) { | |
60 if (obj == NULL) { | |
61 return NULL; // ignore null handles | |
62 } else { | |
63 assert(Universe::heap()->is_in_reserved(obj), "sanity check"); | |
64 return thread->active_handles()->allocate_handle(obj); | |
65 } | |
66 } | |
67 | |
68 | |
69 jobject JNIHandles::make_local(JNIEnv* env, oop obj) { | |
70 if (obj == NULL) { | |
71 return NULL; // ignore null handles | |
72 } else { | |
73 JavaThread* thread = JavaThread::thread_from_jni_environment(env); | |
74 assert(Universe::heap()->is_in_reserved(obj), "sanity check"); | |
75 return thread->active_handles()->allocate_handle(obj); | |
76 } | |
77 } | |
78 | |
79 | |
80 jobject JNIHandles::make_global(Handle obj) { | |
1616
38e8278318ca
6656830: assert((*p)->is_oop(),"expected an oop while scanning weak refs")
never
parents:
1552
diff
changeset
|
81 assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC"); |
0 | 82 jobject res = NULL; |
83 if (!obj.is_null()) { | |
84 // ignore null handles | |
85 MutexLocker ml(JNIGlobalHandle_lock); | |
86 assert(Universe::heap()->is_in_reserved(obj()), "sanity check"); | |
87 res = _global_handles->allocate_handle(obj()); | |
88 } else { | |
89 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); | |
90 } | |
91 | |
92 return res; | |
93 } | |
94 | |
95 | |
96 jobject JNIHandles::make_weak_global(Handle obj) { | |
1616
38e8278318ca
6656830: assert((*p)->is_oop(),"expected an oop while scanning weak refs")
never
parents:
1552
diff
changeset
|
97 assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC"); |
0 | 98 jobject res = NULL; |
99 if (!obj.is_null()) { | |
100 // ignore null handles | |
101 MutexLocker ml(JNIGlobalHandle_lock); | |
102 assert(Universe::heap()->is_in_reserved(obj()), "sanity check"); | |
103 res = _weak_global_handles->allocate_handle(obj()); | |
104 } else { | |
105 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); | |
106 } | |
107 return res; | |
108 } | |
109 | |
110 jmethodID JNIHandles::make_jmethod_id(methodHandle mh) { | |
111 return (jmethodID) make_weak_global(mh); | |
112 } | |
113 | |
114 | |
115 | |
116 void JNIHandles::change_method_associated_with_jmethod_id(jmethodID jmid, methodHandle mh) { | |
117 MutexLocker ml(JNIGlobalHandle_lock); // Is this necessary? | |
118 Handle obj = (Handle)mh; | |
119 oop* jobj = (oop*)jmid; | |
120 *jobj = obj(); | |
121 } | |
122 | |
123 | |
124 void JNIHandles::destroy_global(jobject handle) { | |
125 if (handle != NULL) { | |
126 assert(is_global_handle(handle), "Invalid delete of global JNI handle"); | |
127 *((oop*)handle) = deleted_handle(); // Mark the handle as deleted, allocate will reuse it | |
128 } | |
129 } | |
130 | |
131 | |
132 void JNIHandles::destroy_weak_global(jobject handle) { | |
133 if (handle != NULL) { | |
134 assert(!CheckJNICalls || is_weak_global_handle(handle), "Invalid delete of weak global JNI handle"); | |
135 *((oop*)handle) = deleted_handle(); // Mark the handle as deleted, allocate will reuse it | |
136 } | |
137 } | |
138 | |
139 void JNIHandles::destroy_jmethod_id(jmethodID mid) { | |
140 destroy_weak_global((jobject)mid); | |
141 } | |
142 | |
143 | |
144 void JNIHandles::oops_do(OopClosure* f) { | |
145 f->do_oop(&_deleted_handle); | |
146 _global_handles->oops_do(f); | |
147 } | |
148 | |
149 | |
150 void JNIHandles::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { | |
151 _weak_global_handles->weak_oops_do(is_alive, f); | |
152 } | |
153 | |
154 | |
155 void JNIHandles::initialize() { | |
156 _global_handles = JNIHandleBlock::allocate_block(); | |
157 _weak_global_handles = JNIHandleBlock::allocate_block(); | |
158 EXCEPTION_MARK; | |
159 // We will never reach the CATCH below since Exceptions::_throw will cause | |
160 // the VM to exit if an exception is thrown during initialization | |
1142 | 161 klassOop k = SystemDictionary::Object_klass(); |
0 | 162 _deleted_handle = instanceKlass::cast(k)->allocate_permanent_instance(CATCH); |
163 } | |
164 | |
165 | |
166 bool JNIHandles::is_local_handle(Thread* thread, jobject handle) { | |
167 JNIHandleBlock* block = thread->active_handles(); | |
168 | |
169 // Look back past possible native calls to jni_PushLocalFrame. | |
170 while (block != NULL) { | |
171 if (block->chain_contains(handle)) { | |
172 return true; | |
173 } | |
174 block = block->pop_frame_link(); | |
175 } | |
176 return false; | |
177 } | |
178 | |
179 | |
180 // Determine if the handle is somewhere in the current thread's stack. | |
181 // We easily can't isolate any particular stack frame the handle might | |
182 // come from, so we'll check the whole stack. | |
183 | |
184 bool JNIHandles::is_frame_handle(JavaThread* thr, jobject obj) { | |
185 // If there is no java frame, then this must be top level code, such | |
186 // as the java command executable, in which case, this type of handle | |
187 // is not permitted. | |
188 return (thr->has_last_Java_frame() && | |
189 (void*)obj < (void*)thr->stack_base() && | |
190 (void*)obj >= (void*)thr->last_Java_sp()); | |
191 } | |
192 | |
193 | |
194 bool JNIHandles::is_global_handle(jobject handle) { | |
195 return _global_handles->chain_contains(handle); | |
196 } | |
197 | |
198 | |
199 bool JNIHandles::is_weak_global_handle(jobject handle) { | |
200 return _weak_global_handles->chain_contains(handle); | |
201 } | |
202 | |
203 long JNIHandles::global_handle_memory_usage() { | |
204 return _global_handles->memory_usage(); | |
205 } | |
206 | |
207 long JNIHandles::weak_global_handle_memory_usage() { | |
208 return _weak_global_handles->memory_usage(); | |
209 } | |
210 | |
211 | |
212 class AlwaysAliveClosure: public BoolObjectClosure { | |
213 public: | |
214 bool do_object_b(oop obj) { return true; } | |
215 void do_object(oop obj) { assert(false, "Don't call"); } | |
216 }; | |
217 | |
218 class CountHandleClosure: public OopClosure { | |
219 private: | |
220 int _count; | |
221 public: | |
222 CountHandleClosure(): _count(0) {} | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
223 virtual void do_oop(oop* unused) { |
0 | 224 _count++; |
225 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
226 virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); } |
0 | 227 int count() { return _count; } |
228 }; | |
229 | |
230 // We assume this is called at a safepoint: no lock is needed. | |
231 void JNIHandles::print_on(outputStream* st) { | |
232 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); | |
233 assert(_global_handles != NULL && _weak_global_handles != NULL, | |
234 "JNIHandles not initialized"); | |
235 | |
236 CountHandleClosure global_handle_count; | |
237 AlwaysAliveClosure always_alive; | |
238 oops_do(&global_handle_count); | |
239 weak_oops_do(&always_alive, &global_handle_count); | |
240 | |
241 st->print_cr("JNI global references: %d", global_handle_count.count()); | |
242 st->cr(); | |
243 st->flush(); | |
244 } | |
245 | |
246 class VerifyHandleClosure: public OopClosure { | |
247 public: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
248 virtual void do_oop(oop* root) { |
0 | 249 (*root)->verify(); |
250 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
251 virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); } |
0 | 252 }; |
253 | |
254 void JNIHandles::verify() { | |
255 VerifyHandleClosure verify_handle; | |
256 AlwaysAliveClosure always_alive; | |
257 | |
258 oops_do(&verify_handle); | |
259 weak_oops_do(&always_alive, &verify_handle); | |
260 } | |
261 | |
262 | |
263 | |
264 void jni_handles_init() { | |
265 JNIHandles::initialize(); | |
266 } | |
267 | |
268 | |
269 int JNIHandleBlock::_blocks_allocated = 0; | |
270 JNIHandleBlock* JNIHandleBlock::_block_free_list = NULL; | |
271 #ifndef PRODUCT | |
272 JNIHandleBlock* JNIHandleBlock::_block_list = NULL; | |
273 #endif | |
274 | |
275 | |
276 void JNIHandleBlock::zap() { | |
277 // Zap block values | |
278 _top = 0; | |
279 for (int index = 0; index < block_size_in_oops; index++) { | |
280 _handles[index] = badJNIHandle; | |
281 } | |
282 } | |
283 | |
284 JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread) { | |
285 assert(thread == NULL || thread == Thread::current(), "sanity check"); | |
286 JNIHandleBlock* block; | |
287 // Check the thread-local free list for a block so we don't | |
288 // have to acquire a mutex. | |
289 if (thread != NULL && thread->free_handle_block() != NULL) { | |
290 block = thread->free_handle_block(); | |
291 thread->set_free_handle_block(block->_next); | |
292 } | |
293 else { | |
294 // locking with safepoint checking introduces a potential deadlock: | |
295 // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock | |
296 // - another would hold Threads_lock (jni_AttachCurrentThread) and then | |
297 // JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block) | |
298 MutexLockerEx ml(JNIHandleBlockFreeList_lock, | |
299 Mutex::_no_safepoint_check_flag); | |
300 if (_block_free_list == NULL) { | |
301 // Allocate new block | |
302 block = new JNIHandleBlock(); | |
303 _blocks_allocated++; | |
304 if (TraceJNIHandleAllocation) { | |
305 tty->print_cr("JNIHandleBlock " INTPTR_FORMAT " allocated (%d total blocks)", | |
306 block, _blocks_allocated); | |
307 } | |
308 if (ZapJNIHandleArea) block->zap(); | |
309 #ifndef PRODUCT | |
310 // Link new block to list of all allocated blocks | |
311 block->_block_list_link = _block_list; | |
312 _block_list = block; | |
313 #endif | |
314 } else { | |
315 // Get block from free list | |
316 block = _block_free_list; | |
317 _block_free_list = _block_free_list->_next; | |
318 } | |
319 } | |
320 block->_top = 0; | |
321 block->_next = NULL; | |
322 block->_pop_frame_link = NULL; | |
323 // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle | |
324 debug_only(block->_last = NULL); | |
325 debug_only(block->_free_list = NULL); | |
326 debug_only(block->_allocate_before_rebuild = -1); | |
327 return block; | |
328 } | |
329 | |
330 | |
331 void JNIHandleBlock::release_block(JNIHandleBlock* block, Thread* thread) { | |
332 assert(thread == NULL || thread == Thread::current(), "sanity check"); | |
333 JNIHandleBlock* pop_frame_link = block->pop_frame_link(); | |
334 // Put returned block at the beginning of the thread-local free list. | |
335 // Note that if thread == NULL, we use it as an implicit argument that | |
336 // we _don't_ want the block to be kept on the free_handle_block. | |
337 // See for instance JavaThread::exit(). | |
338 if (thread != NULL ) { | |
339 if (ZapJNIHandleArea) block->zap(); | |
340 JNIHandleBlock* freelist = thread->free_handle_block(); | |
341 block->_pop_frame_link = NULL; | |
342 thread->set_free_handle_block(block); | |
343 | |
344 // Add original freelist to end of chain | |
345 if ( freelist != NULL ) { | |
346 while ( block->_next != NULL ) block = block->_next; | |
347 block->_next = freelist; | |
348 } | |
349 block = NULL; | |
350 } | |
351 if (block != NULL) { | |
352 // Return blocks to free list | |
353 // locking with safepoint checking introduces a potential deadlock: | |
354 // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock | |
355 // - another would hold Threads_lock (jni_AttachCurrentThread) and then | |
356 // JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block) | |
357 MutexLockerEx ml(JNIHandleBlockFreeList_lock, | |
358 Mutex::_no_safepoint_check_flag); | |
359 while (block != NULL) { | |
360 if (ZapJNIHandleArea) block->zap(); | |
361 JNIHandleBlock* next = block->_next; | |
362 block->_next = _block_free_list; | |
363 _block_free_list = block; | |
364 block = next; | |
365 } | |
366 } | |
367 if (pop_frame_link != NULL) { | |
368 // As a sanity check we release blocks pointed to by the pop_frame_link. | |
369 // This should never happen (only if PopLocalFrame is not called the | |
370 // correct number of times). | |
371 release_block(pop_frame_link, thread); | |
372 } | |
373 } | |
374 | |
375 | |
376 void JNIHandleBlock::oops_do(OopClosure* f) { | |
377 JNIHandleBlock* current_chain = this; | |
378 // Iterate over chain of blocks, followed by chains linked through the | |
379 // pop frame links. | |
380 while (current_chain != NULL) { | |
381 for (JNIHandleBlock* current = current_chain; current != NULL; | |
382 current = current->_next) { | |
383 assert(current == current_chain || current->pop_frame_link() == NULL, | |
384 "only blocks first in chain should have pop frame link set"); | |
385 for (int index = 0; index < current->_top; index++) { | |
386 oop* root = &(current->_handles)[index]; | |
387 oop value = *root; | |
388 // traverse heap pointers only, not deleted handles or free list | |
389 // pointers | |
390 if (value != NULL && Universe::heap()->is_in_reserved(value)) { | |
391 f->do_oop(root); | |
392 } | |
393 } | |
394 // the next handle block is valid only if current block is full | |
395 if (current->_top < block_size_in_oops) { | |
396 break; | |
397 } | |
398 } | |
399 current_chain = current_chain->pop_frame_link(); | |
400 } | |
401 } | |
402 | |
403 | |
404 void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive, | |
405 OopClosure* f) { | |
406 for (JNIHandleBlock* current = this; current != NULL; current = current->_next) { | |
407 assert(current->pop_frame_link() == NULL, | |
408 "blocks holding weak global JNI handles should not have pop frame link set"); | |
409 for (int index = 0; index < current->_top; index++) { | |
410 oop* root = &(current->_handles)[index]; | |
411 oop value = *root; | |
412 // traverse heap pointers only, not deleted handles or free list pointers | |
413 if (value != NULL && Universe::heap()->is_in_reserved(value)) { | |
414 if (is_alive->do_object_b(value)) { | |
415 // The weakly referenced object is alive, update pointer | |
416 f->do_oop(root); | |
417 } else { | |
418 // The weakly referenced object is not alive, clear the reference by storing NULL | |
419 if (TraceReferenceGC) { | |
420 tty->print_cr("Clearing JNI weak reference (" INTPTR_FORMAT ")", root); | |
421 } | |
422 *root = NULL; | |
423 } | |
424 } | |
425 } | |
426 // the next handle block is valid only if current block is full | |
427 if (current->_top < block_size_in_oops) { | |
428 break; | |
429 } | |
430 } | |
431 } | |
432 | |
433 | |
434 jobject JNIHandleBlock::allocate_handle(oop obj) { | |
435 assert(Universe::heap()->is_in_reserved(obj), "sanity check"); | |
436 if (_top == 0) { | |
437 // This is the first allocation or the initial block got zapped when | |
438 // entering a native function. If we have any following blocks they are | |
439 // not valid anymore. | |
440 for (JNIHandleBlock* current = _next; current != NULL; | |
441 current = current->_next) { | |
442 assert(current->_last == NULL, "only first block should have _last set"); | |
443 assert(current->_free_list == NULL, | |
444 "only first block should have _free_list set"); | |
445 current->_top = 0; | |
446 if (ZapJNIHandleArea) current->zap(); | |
447 } | |
448 // Clear initial block | |
449 _free_list = NULL; | |
450 _allocate_before_rebuild = 0; | |
451 _last = this; | |
452 if (ZapJNIHandleArea) zap(); | |
453 } | |
454 | |
455 // Try last block | |
456 if (_last->_top < block_size_in_oops) { | |
457 oop* handle = &(_last->_handles)[_last->_top++]; | |
458 *handle = obj; | |
459 return (jobject) handle; | |
460 } | |
461 | |
462 // Try free list | |
463 if (_free_list != NULL) { | |
464 oop* handle = _free_list; | |
465 _free_list = (oop*) *_free_list; | |
466 *handle = obj; | |
467 return (jobject) handle; | |
468 } | |
469 // Check if unused block follow last | |
470 if (_last->_next != NULL) { | |
471 // update last and retry | |
472 _last = _last->_next; | |
473 return allocate_handle(obj); | |
474 } | |
475 | |
476 // No space available, we have to rebuild free list or expand | |
477 if (_allocate_before_rebuild == 0) { | |
478 rebuild_free_list(); // updates _allocate_before_rebuild counter | |
479 } else { | |
480 // Append new block | |
481 Thread* thread = Thread::current(); | |
482 Handle obj_handle(thread, obj); | |
483 // This can block, so we need to preserve obj accross call. | |
484 _last->_next = JNIHandleBlock::allocate_block(thread); | |
485 _last = _last->_next; | |
486 _allocate_before_rebuild--; | |
487 obj = obj_handle(); | |
488 } | |
489 return allocate_handle(obj); // retry | |
490 } | |
491 | |
492 | |
493 void JNIHandleBlock::rebuild_free_list() { | |
494 assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking"); | |
495 int free = 0; | |
496 int blocks = 0; | |
497 for (JNIHandleBlock* current = this; current != NULL; current = current->_next) { | |
498 for (int index = 0; index < current->_top; index++) { | |
499 oop* handle = &(current->_handles)[index]; | |
500 if (*handle == JNIHandles::deleted_handle()) { | |
501 // this handle was cleared out by a delete call, reuse it | |
502 *handle = (oop) _free_list; | |
503 _free_list = handle; | |
504 free++; | |
505 } | |
506 } | |
507 // we should not rebuild free list if there are unused handles at the end | |
508 assert(current->_top == block_size_in_oops, "just checking"); | |
509 blocks++; | |
510 } | |
511 // Heuristic: if more than half of the handles are free we rebuild next time | |
512 // as well, otherwise we append a corresponding number of new blocks before | |
513 // attempting a free list rebuild again. | |
514 int total = blocks * block_size_in_oops; | |
515 int extra = total - 2*free; | |
516 if (extra > 0) { | |
517 // Not as many free handles as we would like - compute number of new blocks to append | |
518 _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops; | |
519 } | |
520 if (TraceJNIHandleAllocation) { | |
521 tty->print_cr("Rebuild free list JNIHandleBlock " INTPTR_FORMAT " blocks=%d used=%d free=%d add=%d", | |
522 this, blocks, total-free, free, _allocate_before_rebuild); | |
523 } | |
524 } | |
525 | |
526 | |
527 bool JNIHandleBlock::contains(jobject handle) const { | |
528 return ((jobject)&_handles[0] <= handle && handle<(jobject)&_handles[_top]); | |
529 } | |
530 | |
531 | |
532 bool JNIHandleBlock::chain_contains(jobject handle) const { | |
533 for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != NULL; current = current->_next) { | |
534 if (current->contains(handle)) { | |
535 return true; | |
536 } | |
537 } | |
538 return false; | |
539 } | |
540 | |
541 | |
542 int JNIHandleBlock::length() const { | |
543 int result = 1; | |
544 for (JNIHandleBlock* current = _next; current != NULL; current = current->_next) { | |
545 result++; | |
546 } | |
547 return result; | |
548 } | |
549 | |
550 // This method is not thread-safe, i.e., must be called whule holding a lock on the | |
551 // structure. | |
552 long JNIHandleBlock::memory_usage() const { | |
553 return length() * sizeof(JNIHandleBlock); | |
554 } | |
555 | |
556 | |
557 #ifndef PRODUCT | |
558 | |
559 bool JNIHandleBlock::any_contains(jobject handle) { | |
560 for (JNIHandleBlock* current = _block_list; current != NULL; current = current->_block_list_link) { | |
561 if (current->contains(handle)) { | |
562 return true; | |
563 } | |
564 } | |
565 return false; | |
566 } | |
567 | |
568 void JNIHandleBlock::print_statistics() { | |
569 int used_blocks = 0; | |
570 int free_blocks = 0; | |
571 int used_handles = 0; | |
572 int free_handles = 0; | |
573 JNIHandleBlock* block = _block_list; | |
574 while (block != NULL) { | |
575 if (block->_top > 0) { | |
576 used_blocks++; | |
577 } else { | |
578 free_blocks++; | |
579 } | |
580 used_handles += block->_top; | |
581 free_handles += (block_size_in_oops - block->_top); | |
582 block = block->_block_list_link; | |
583 } | |
584 tty->print_cr("JNIHandleBlocks statistics"); | |
585 tty->print_cr("- blocks allocated: %d", used_blocks + free_blocks); | |
586 tty->print_cr("- blocks in use: %d", used_blocks); | |
587 tty->print_cr("- blocks free: %d", free_blocks); | |
588 tty->print_cr("- handles in use: %d", used_handles); | |
589 tty->print_cr("- handles free: %d", free_handles); | |
590 } | |
591 | |
592 #endif |