Mercurial > hg > truffle
annotate src/share/vm/prims/jvmtiEnvBase.cpp @ 1615:ff38d05ea86f
6956958: assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted() || is_optimized() || is_megam
Reviewed-by: kvn
author | never |
---|---|
date | Fri, 18 Jun 2010 16:51:54 -0700 |
parents | c18cbe5936b8 |
children | ce6848d0666d |
rev | line source |
---|---|
0 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1144
diff
changeset
|
2 * Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1144
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1144
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1144
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 # include "incls/_precompiled.incl" | |
25 # include "incls/_jvmtiEnvBase.cpp.incl" | |
26 | |
27 | |
28 /////////////////////////////////////////////////////////////// | |
29 // | |
30 // JvmtiEnvBase | |
31 // | |
32 | |
33 JvmtiEnvBase* JvmtiEnvBase::_head_environment = NULL; | |
34 | |
35 bool JvmtiEnvBase::_globally_initialized = false; | |
36 volatile bool JvmtiEnvBase::_needs_clean_up = false; | |
37 | |
38 jvmtiPhase JvmtiEnvBase::_phase = JVMTI_PHASE_PRIMORDIAL; | |
39 | |
40 volatile int JvmtiEnvBase::_dying_thread_env_iteration_count = 0; | |
41 | |
42 extern jvmtiInterface_1_ jvmti_Interface; | |
43 extern jvmtiInterface_1_ jvmtiTrace_Interface; | |
44 | |
45 | |
46 // perform initializations that must occur before any JVMTI environments | |
47 // are released but which should only be initialized once (no matter | |
48 // how many environments are created). | |
49 void | |
50 JvmtiEnvBase::globally_initialize() { | |
51 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check"); | |
52 assert(_globally_initialized == false, "bad call"); | |
53 | |
54 JvmtiManageCapabilities::initialize(); | |
55 | |
56 #ifndef JVMTI_KERNEL | |
57 // register extension functions and events | |
58 JvmtiExtensions::register_extensions(); | |
59 #endif // !JVMTI_KERNEL | |
60 | |
61 #ifdef JVMTI_TRACE | |
62 JvmtiTrace::initialize(); | |
63 #endif | |
64 | |
65 _globally_initialized = true; | |
66 } | |
67 | |
68 | |
69 void | |
70 JvmtiEnvBase::initialize() { | |
71 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check"); | |
72 | |
73 // Add this environment to the end of the environment list (order is important) | |
74 { | |
75 // This block of code must not contain any safepoints, as list deallocation | |
76 // (which occurs at a safepoint) cannot occur simultaneously with this list | |
77 // addition. Note: No_Safepoint_Verifier cannot, currently, be used before | |
78 // threads exist. | |
79 JvmtiEnvIterator it; | |
80 JvmtiEnvBase *previous_env = NULL; | |
81 for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) { | |
82 previous_env = env; | |
83 } | |
84 if (previous_env == NULL) { | |
85 _head_environment = this; | |
86 } else { | |
87 previous_env->set_next_environment(this); | |
88 } | |
89 } | |
90 | |
91 if (_globally_initialized == false) { | |
92 globally_initialize(); | |
93 } | |
94 } | |
95 | |
96 | |
611
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
97 bool |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
98 JvmtiEnvBase::is_valid() { |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
99 jint value = 0; |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
100 |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
101 // This object might not be a JvmtiEnvBase so we can't assume |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
102 // the _magic field is properly aligned. Get the value in a safe |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
103 // way and then check against JVMTI_MAGIC. |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
104 |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
105 switch (sizeof(_magic)) { |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
106 case 2: |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
107 value = Bytes::get_native_u2((address)&_magic); |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
108 break; |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
109 |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
110 case 4: |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
111 value = Bytes::get_native_u4((address)&_magic); |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
112 break; |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
113 |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
114 case 8: |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
115 value = Bytes::get_native_u8((address)&_magic); |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
116 break; |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
117 |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
118 default: |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
119 guarantee(false, "_magic field is an unexpected size"); |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
120 } |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
121 |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
122 return value == JVMTI_MAGIC; |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
123 } |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
124 |
2f716c0acb64
6567360: 3/4 SIGBUS in jvmti RawMonitor magic check for unaligned bad monitor pointer
dcubed
parents:
609
diff
changeset
|
125 |
1122 | 126 bool |
1121 | 127 JvmtiEnvBase::use_version_1_0_semantics() { |
128 int major, minor, micro; | |
129 | |
130 JvmtiExport::decode_version_values(_version, &major, &minor, µ); | |
131 return major == 1 && minor == 0; // micro version doesn't matter here | |
132 } | |
133 | |
134 | |
135 bool | |
136 JvmtiEnvBase::use_version_1_1_semantics() { | |
137 int major, minor, micro; | |
138 | |
139 JvmtiExport::decode_version_values(_version, &major, &minor, µ); | |
140 return major == 1 && minor == 1; // micro version doesn't matter here | |
141 } | |
142 | |
143 | |
144 JvmtiEnvBase::JvmtiEnvBase(jint version) : _env_event_enable() { | |
145 _version = version; | |
0 | 146 _env_local_storage = NULL; |
147 _tag_map = NULL; | |
148 _native_method_prefix_count = 0; | |
149 _native_method_prefixes = NULL; | |
150 _next = NULL; | |
151 _class_file_load_hook_ever_enabled = false; | |
152 | |
153 // Moot since ClassFileLoadHook not yet enabled. | |
154 // But "true" will give a more predictable ClassFileLoadHook behavior | |
155 // for environment creation during ClassFileLoadHook. | |
156 _is_retransformable = true; | |
157 | |
158 // all callbacks initially NULL | |
159 memset(&_event_callbacks,0,sizeof(jvmtiEventCallbacks)); | |
160 | |
161 // all capabilities initially off | |
162 memset(&_current_capabilities, 0, sizeof(_current_capabilities)); | |
163 | |
164 // all prohibited capabilities initially off | |
165 memset(&_prohibited_capabilities, 0, sizeof(_prohibited_capabilities)); | |
166 | |
167 _magic = JVMTI_MAGIC; | |
168 | |
169 JvmtiEventController::env_initialize((JvmtiEnv*)this); | |
170 | |
171 #ifdef JVMTI_TRACE | |
371 | 172 _jvmti_external.functions = TraceJVMTI != NULL ? &jvmtiTrace_Interface : &jvmti_Interface; |
0 | 173 #else |
174 _jvmti_external.functions = &jvmti_Interface; | |
175 #endif | |
176 } | |
177 | |
178 | |
179 void | |
180 JvmtiEnvBase::dispose() { | |
181 | |
182 #ifdef JVMTI_TRACE | |
183 JvmtiTrace::shutdown(); | |
184 #endif | |
185 | |
186 // Dispose of event info and let the event controller call us back | |
187 // in a locked state (env_dispose, below) | |
188 JvmtiEventController::env_dispose(this); | |
189 } | |
190 | |
191 void | |
192 JvmtiEnvBase::env_dispose() { | |
193 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check"); | |
194 | |
195 // We have been entered with all events disabled on this environment. | |
196 // A race to re-enable events (by setting callbacks) is prevented by | |
197 // checking for a valid environment when setting callbacks (while | |
198 // holding the JvmtiThreadState_lock). | |
199 | |
200 // Mark as invalid. | |
201 _magic = DISPOSED_MAGIC; | |
202 | |
203 // Relinquish all capabilities. | |
204 jvmtiCapabilities *caps = get_capabilities(); | |
205 JvmtiManageCapabilities::relinquish_capabilities(caps, caps, caps); | |
206 | |
207 // Same situation as with events (see above) | |
208 set_native_method_prefixes(0, NULL); | |
209 | |
210 #ifndef JVMTI_KERNEL | |
211 JvmtiTagMap* tag_map_to_deallocate = _tag_map; | |
212 set_tag_map(NULL); | |
213 // A tag map can be big, deallocate it now | |
214 if (tag_map_to_deallocate != NULL) { | |
215 delete tag_map_to_deallocate; | |
216 } | |
217 #endif // !JVMTI_KERNEL | |
218 | |
219 _needs_clean_up = true; | |
220 } | |
221 | |
222 | |
223 JvmtiEnvBase::~JvmtiEnvBase() { | |
224 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); | |
225 | |
226 // There is a small window of time during which the tag map of a | |
227 // disposed environment could have been reallocated. | |
228 // Make sure it is gone. | |
229 #ifndef JVMTI_KERNEL | |
230 JvmtiTagMap* tag_map_to_deallocate = _tag_map; | |
231 set_tag_map(NULL); | |
232 // A tag map can be big, deallocate it now | |
233 if (tag_map_to_deallocate != NULL) { | |
234 delete tag_map_to_deallocate; | |
235 } | |
236 #endif // !JVMTI_KERNEL | |
237 | |
238 _magic = BAD_MAGIC; | |
239 } | |
240 | |
241 | |
242 void | |
243 JvmtiEnvBase::periodic_clean_up() { | |
244 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); | |
245 | |
246 // JvmtiEnvBase reference is saved in JvmtiEnvThreadState. So | |
247 // clean up JvmtiThreadState before deleting JvmtiEnv pointer. | |
248 JvmtiThreadState::periodic_clean_up(); | |
249 | |
250 // Unlink all invalid environments from the list of environments | |
251 // and deallocate them | |
252 JvmtiEnvIterator it; | |
253 JvmtiEnvBase* previous_env = NULL; | |
254 JvmtiEnvBase* env = it.first(); | |
255 while (env != NULL) { | |
256 if (env->is_valid()) { | |
257 previous_env = env; | |
258 env = it.next(env); | |
259 } else { | |
260 // This one isn't valid, remove it from the list and deallocate it | |
261 JvmtiEnvBase* defunct_env = env; | |
262 env = it.next(env); | |
263 if (previous_env == NULL) { | |
264 _head_environment = env; | |
265 } else { | |
266 previous_env->set_next_environment(env); | |
267 } | |
268 delete defunct_env; | |
269 } | |
270 } | |
271 | |
272 } | |
273 | |
274 | |
275 void | |
276 JvmtiEnvBase::check_for_periodic_clean_up() { | |
277 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); | |
278 | |
279 class ThreadInsideIterationClosure: public ThreadClosure { | |
280 private: | |
281 bool _inside; | |
282 public: | |
283 ThreadInsideIterationClosure() : _inside(false) {}; | |
284 | |
285 void do_thread(Thread* thread) { | |
286 _inside |= thread->is_inside_jvmti_env_iteration(); | |
287 } | |
288 | |
289 bool is_inside_jvmti_env_iteration() { | |
290 return _inside; | |
291 } | |
292 }; | |
293 | |
294 if (_needs_clean_up) { | |
295 // Check if we are currently iterating environment, | |
296 // deallocation should not occur if we are | |
297 ThreadInsideIterationClosure tiic; | |
298 Threads::threads_do(&tiic); | |
299 if (!tiic.is_inside_jvmti_env_iteration() && | |
300 !is_inside_dying_thread_env_iteration()) { | |
301 _needs_clean_up = false; | |
302 JvmtiEnvBase::periodic_clean_up(); | |
303 } | |
304 } | |
305 } | |
306 | |
307 | |
308 void | |
309 JvmtiEnvBase::record_first_time_class_file_load_hook_enabled() { | |
310 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), | |
311 "sanity check"); | |
312 | |
313 if (!_class_file_load_hook_ever_enabled) { | |
314 _class_file_load_hook_ever_enabled = true; | |
315 | |
316 if (get_capabilities()->can_retransform_classes) { | |
317 _is_retransformable = true; | |
318 } else { | |
319 _is_retransformable = false; | |
320 | |
321 // cannot add retransform capability after ClassFileLoadHook has been enabled | |
322 get_prohibited_capabilities()->can_retransform_classes = 1; | |
323 } | |
324 } | |
325 } | |
326 | |
327 | |
328 void | |
329 JvmtiEnvBase::record_class_file_load_hook_enabled() { | |
330 if (!_class_file_load_hook_ever_enabled) { | |
331 if (Threads::number_of_threads() == 0) { | |
332 record_first_time_class_file_load_hook_enabled(); | |
333 } else { | |
334 MutexLocker mu(JvmtiThreadState_lock); | |
335 record_first_time_class_file_load_hook_enabled(); | |
336 } | |
337 } | |
338 } | |
339 | |
340 | |
341 jvmtiError | |
342 JvmtiEnvBase::set_native_method_prefixes(jint prefix_count, char** prefixes) { | |
343 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), | |
344 "sanity check"); | |
345 | |
346 int old_prefix_count = get_native_method_prefix_count(); | |
347 char **old_prefixes = get_native_method_prefixes(); | |
348 | |
349 // allocate and install the new prefixex | |
350 if (prefix_count == 0 || !is_valid()) { | |
351 _native_method_prefix_count = 0; | |
352 _native_method_prefixes = NULL; | |
353 } else { | |
354 // there are prefixes, allocate an array to hold them, and fill it | |
355 char** new_prefixes = (char**)os::malloc((prefix_count) * sizeof(char*)); | |
356 if (new_prefixes == NULL) { | |
357 return JVMTI_ERROR_OUT_OF_MEMORY; | |
358 } | |
359 for (int i = 0; i < prefix_count; i++) { | |
360 char* prefix = prefixes[i]; | |
361 if (prefix == NULL) { | |
362 for (int j = 0; j < (i-1); j++) { | |
363 os::free(new_prefixes[j]); | |
364 } | |
365 os::free(new_prefixes); | |
366 return JVMTI_ERROR_NULL_POINTER; | |
367 } | |
368 prefix = os::strdup(prefixes[i]); | |
369 if (prefix == NULL) { | |
370 for (int j = 0; j < (i-1); j++) { | |
371 os::free(new_prefixes[j]); | |
372 } | |
373 os::free(new_prefixes); | |
374 return JVMTI_ERROR_OUT_OF_MEMORY; | |
375 } | |
376 new_prefixes[i] = prefix; | |
377 } | |
378 _native_method_prefix_count = prefix_count; | |
379 _native_method_prefixes = new_prefixes; | |
380 } | |
381 | |
382 // now that we know the new prefixes have been successfully installed we can | |
383 // safely remove the old ones | |
384 if (old_prefix_count != 0) { | |
385 for (int i = 0; i < old_prefix_count; i++) { | |
386 os::free(old_prefixes[i]); | |
387 } | |
388 os::free(old_prefixes); | |
389 } | |
390 | |
391 return JVMTI_ERROR_NONE; | |
392 } | |
393 | |
394 | |
395 // Collect all the prefixes which have been set in any JVM TI environments | |
396 // by the SetNativeMethodPrefix(es) functions. Be sure to maintain the | |
397 // order of environments and the order of prefixes within each environment. | |
398 // Return in a resource allocated array. | |
399 char** | |
400 JvmtiEnvBase::get_all_native_method_prefixes(int* count_ptr) { | |
401 assert(Threads::number_of_threads() == 0 || | |
402 SafepointSynchronize::is_at_safepoint() || | |
403 JvmtiThreadState_lock->is_locked(), | |
404 "sanity check"); | |
405 | |
406 int total_count = 0; | |
407 GrowableArray<char*>* prefix_array =new GrowableArray<char*>(5); | |
408 | |
409 JvmtiEnvIterator it; | |
410 for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) { | |
411 int prefix_count = env->get_native_method_prefix_count(); | |
412 char** prefixes = env->get_native_method_prefixes(); | |
413 for (int j = 0; j < prefix_count; j++) { | |
414 // retrieve a prefix and so that it is safe against asynchronous changes | |
415 // copy it into the resource area | |
416 char* prefix = prefixes[j]; | |
417 char* prefix_copy = NEW_RESOURCE_ARRAY(char, strlen(prefix)+1); | |
418 strcpy(prefix_copy, prefix); | |
419 prefix_array->at_put_grow(total_count++, prefix_copy); | |
420 } | |
421 } | |
422 | |
423 char** all_prefixes = NEW_RESOURCE_ARRAY(char*, total_count); | |
424 char** p = all_prefixes; | |
425 for (int i = 0; i < total_count; ++i) { | |
426 *p++ = prefix_array->at(i); | |
427 } | |
428 *count_ptr = total_count; | |
429 return all_prefixes; | |
430 } | |
431 | |
432 void | |
433 JvmtiEnvBase::set_event_callbacks(const jvmtiEventCallbacks* callbacks, | |
434 jint size_of_callbacks) { | |
435 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check"); | |
436 | |
437 size_t byte_cnt = sizeof(jvmtiEventCallbacks); | |
438 | |
439 // clear in either case to be sure we got any gap between sizes | |
440 memset(&_event_callbacks, 0, byte_cnt); | |
441 | |
442 // Now that JvmtiThreadState_lock is held, prevent a possible race condition where events | |
443 // are re-enabled by a call to set event callbacks where the DisposeEnvironment | |
444 // occurs after the boiler-plate environment check and before the lock is acquired. | |
445 if (callbacks != NULL && is_valid()) { | |
446 if (size_of_callbacks < (jint)byte_cnt) { | |
447 byte_cnt = size_of_callbacks; | |
448 } | |
449 memcpy(&_event_callbacks, callbacks, byte_cnt); | |
450 } | |
451 } | |
452 | |
453 // Called from JVMTI entry points which perform stack walking. If the | |
454 // associated JavaThread is the current thread, then wait_for_suspend | |
455 // is not used. Otherwise, it determines if we should wait for the | |
456 // "other" thread to complete external suspension. (NOTE: in future | |
457 // releases the suspension mechanism should be reimplemented so this | |
458 // is not necessary.) | |
459 // | |
460 bool | |
461 JvmtiEnvBase::is_thread_fully_suspended(JavaThread* thr, bool wait_for_suspend, uint32_t *bits) { | |
462 // "other" threads require special handling | |
463 if (thr != JavaThread::current()) { | |
464 if (wait_for_suspend) { | |
465 // We are allowed to wait for the external suspend to complete | |
466 // so give the other thread a chance to get suspended. | |
467 if (!thr->wait_for_ext_suspend_completion(SuspendRetryCount, | |
468 SuspendRetryDelay, bits)) { | |
469 // didn't make it so let the caller know | |
470 return false; | |
471 } | |
472 } | |
473 // We aren't allowed to wait for the external suspend to complete | |
474 // so if the other thread isn't externally suspended we need to | |
475 // let the caller know. | |
476 else if (!thr->is_ext_suspend_completed_with_lock(bits)) { | |
477 return false; | |
478 } | |
479 } | |
480 | |
481 return true; | |
482 } | |
483 | |
484 | |
485 // In the fullness of time, all users of the method should instead | |
486 // directly use allocate, besides being cleaner and faster, this will | |
487 // mean much better out of memory handling | |
488 unsigned char * | |
489 JvmtiEnvBase::jvmtiMalloc(jlong size) { | |
490 unsigned char* mem; | |
491 jvmtiError result = allocate(size, &mem); | |
492 assert(result == JVMTI_ERROR_NONE, "Allocate failed"); | |
493 return mem; | |
494 } | |
495 | |
496 | |
497 // | |
498 // Threads | |
499 // | |
500 | |
501 jobject * | |
502 JvmtiEnvBase::new_jobjectArray(int length, Handle *handles) { | |
503 if (length == 0) { | |
504 return NULL; | |
505 } | |
506 | |
507 jobject *objArray = (jobject *) jvmtiMalloc(sizeof(jobject) * length); | |
508 NULL_CHECK(objArray, NULL); | |
509 | |
510 for (int i=0; i<length; i++) { | |
511 objArray[i] = jni_reference(handles[i]); | |
512 } | |
513 return objArray; | |
514 } | |
515 | |
516 jthread * | |
517 JvmtiEnvBase::new_jthreadArray(int length, Handle *handles) { | |
518 return (jthread *) new_jobjectArray(length,handles); | |
519 } | |
520 | |
521 jthreadGroup * | |
522 JvmtiEnvBase::new_jthreadGroupArray(int length, Handle *handles) { | |
523 return (jthreadGroup *) new_jobjectArray(length,handles); | |
524 } | |
525 | |
526 | |
527 JavaThread * | |
528 JvmtiEnvBase::get_JavaThread(jthread jni_thread) { | |
529 oop t = JNIHandles::resolve_external_guard(jni_thread); | |
1142 | 530 if (t == NULL || !t->is_a(SystemDictionary::Thread_klass())) { |
0 | 531 return NULL; |
532 } | |
533 // The following returns NULL if the thread has not yet run or is in | |
534 // process of exiting | |
535 return java_lang_Thread::thread(t); | |
536 } | |
537 | |
538 | |
539 // update the access_flags for the field in the klass | |
540 void | |
541 JvmtiEnvBase::update_klass_field_access_flag(fieldDescriptor *fd) { | |
542 instanceKlass* ik = instanceKlass::cast(fd->field_holder()); | |
543 typeArrayOop fields = ik->fields(); | |
544 fields->ushort_at_put(fd->index(), (jushort)fd->access_flags().as_short()); | |
545 } | |
546 | |
547 | |
548 // return the vframe on the specified thread and depth, NULL if no such frame | |
549 vframe* | |
550 JvmtiEnvBase::vframeFor(JavaThread* java_thread, jint depth) { | |
551 if (!java_thread->has_last_Java_frame()) { | |
552 return NULL; | |
553 } | |
554 RegisterMap reg_map(java_thread); | |
555 vframe *vf = java_thread->last_java_vframe(®_map); | |
556 int d = 0; | |
557 while ((vf != NULL) && (d < depth)) { | |
558 vf = vf->java_sender(); | |
559 d++; | |
560 } | |
561 return vf; | |
562 } | |
563 | |
564 | |
565 // | |
566 // utilities: JNI objects | |
567 // | |
568 | |
569 | |
570 jclass | |
571 JvmtiEnvBase::get_jni_class_non_null(klassOop k) { | |
572 assert(k != NULL, "k != NULL"); | |
573 return (jclass)jni_reference(Klass::cast(k)->java_mirror()); | |
574 } | |
575 | |
576 #ifndef JVMTI_KERNEL | |
577 | |
578 // | |
579 // Field Information | |
580 // | |
581 | |
582 bool | |
583 JvmtiEnvBase::get_field_descriptor(klassOop k, jfieldID field, fieldDescriptor* fd) { | |
584 if (!jfieldIDWorkaround::is_valid_jfieldID(k, field)) { | |
585 return false; | |
586 } | |
587 bool found = false; | |
588 if (jfieldIDWorkaround::is_static_jfieldID(field)) { | |
589 JNIid* id = jfieldIDWorkaround::from_static_jfieldID(field); | |
590 int offset = id->offset(); | |
591 klassOop holder = id->holder(); | |
592 found = instanceKlass::cast(holder)->find_local_field_from_offset(offset, true, fd); | |
593 } else { | |
594 // Non-static field. The fieldID is really the offset of the field within the object. | |
595 int offset = jfieldIDWorkaround::from_instance_jfieldID(k, field); | |
596 found = instanceKlass::cast(k)->find_field_from_offset(offset, false, fd); | |
597 } | |
598 return found; | |
599 } | |
600 | |
601 // | |
602 // Object Monitor Information | |
603 // | |
604 | |
605 // | |
606 // Count the number of objects for a lightweight monitor. The hobj | |
607 // parameter is object that owns the monitor so this routine will | |
608 // count the number of times the same object was locked by frames | |
609 // in java_thread. | |
610 // | |
611 jint | |
612 JvmtiEnvBase::count_locked_objects(JavaThread *java_thread, Handle hobj) { | |
613 jint ret = 0; | |
614 if (!java_thread->has_last_Java_frame()) { | |
615 return ret; // no Java frames so no monitors | |
616 } | |
617 | |
618 ResourceMark rm; | |
619 HandleMark hm; | |
620 RegisterMap reg_map(java_thread); | |
621 | |
622 for(javaVFrame *jvf=java_thread->last_java_vframe(®_map); jvf != NULL; | |
623 jvf = jvf->java_sender()) { | |
624 GrowableArray<MonitorInfo*>* mons = jvf->monitors(); | |
625 if (!mons->is_empty()) { | |
626 for (int i = 0; i < mons->length(); i++) { | |
627 MonitorInfo *mi = mons->at(i); | |
818
b109e761e927
6837472: com/sun/jdi/MonitorFrameInfo.java fails with AggressiveOpts in 6u14
kvn
parents:
611
diff
changeset
|
628 if (mi->owner_is_scalar_replaced()) continue; |
0 | 629 |
630 // see if owner of the monitor is our object | |
631 if (mi->owner() != NULL && mi->owner() == hobj()) { | |
632 ret++; | |
633 } | |
634 } | |
635 } | |
636 } | |
637 return ret; | |
638 } | |
639 | |
640 | |
641 | |
642 jvmtiError | |
643 JvmtiEnvBase::get_current_contended_monitor(JavaThread *calling_thread, JavaThread *java_thread, jobject *monitor_ptr) { | |
644 #ifdef ASSERT | |
645 uint32_t debug_bits = 0; | |
646 #endif | |
647 assert((SafepointSynchronize::is_at_safepoint() || | |
648 is_thread_fully_suspended(java_thread, false, &debug_bits)), | |
649 "at safepoint or target thread is suspended"); | |
650 oop obj = NULL; | |
651 ObjectMonitor *mon = java_thread->current_waiting_monitor(); | |
652 if (mon == NULL) { | |
653 // thread is not doing an Object.wait() call | |
654 mon = java_thread->current_pending_monitor(); | |
655 if (mon != NULL) { | |
656 // The thread is trying to enter() or raw_enter() an ObjectMonitor. | |
657 obj = (oop)mon->object(); | |
658 // If obj == NULL, then ObjectMonitor is raw which doesn't count | |
659 // as contended for this API | |
660 } | |
661 // implied else: no contended ObjectMonitor | |
662 } else { | |
663 // thread is doing an Object.wait() call | |
664 obj = (oop)mon->object(); | |
665 assert(obj != NULL, "Object.wait() should have an object"); | |
666 } | |
667 | |
668 if (obj == NULL) { | |
669 *monitor_ptr = NULL; | |
670 } else { | |
671 HandleMark hm; | |
672 Handle hobj(obj); | |
673 *monitor_ptr = jni_reference(calling_thread, hobj); | |
674 } | |
675 return JVMTI_ERROR_NONE; | |
676 } | |
677 | |
678 | |
679 jvmtiError | |
680 JvmtiEnvBase::get_owned_monitors(JavaThread *calling_thread, JavaThread* java_thread, | |
681 GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list) { | |
682 jvmtiError err = JVMTI_ERROR_NONE; | |
683 #ifdef ASSERT | |
684 uint32_t debug_bits = 0; | |
685 #endif | |
686 assert((SafepointSynchronize::is_at_safepoint() || | |
687 is_thread_fully_suspended(java_thread, false, &debug_bits)), | |
688 "at safepoint or target thread is suspended"); | |
689 | |
690 if (java_thread->has_last_Java_frame()) { | |
691 ResourceMark rm; | |
692 HandleMark hm; | |
693 RegisterMap reg_map(java_thread); | |
694 | |
695 int depth = 0; | |
696 for (javaVFrame *jvf = java_thread->last_java_vframe(®_map); jvf != NULL; | |
697 jvf = jvf->java_sender()) { | |
698 if (depth++ < MaxJavaStackTraceDepth) { // check for stack too deep | |
699 // add locked objects for this frame into list | |
700 err = get_locked_objects_in_frame(calling_thread, java_thread, jvf, owned_monitors_list, depth-1); | |
701 if (err != JVMTI_ERROR_NONE) { | |
702 return err; | |
703 } | |
704 } | |
705 } | |
706 } | |
707 | |
708 // Get off stack monitors. (e.g. acquired via jni MonitorEnter). | |
709 JvmtiMonitorClosure jmc(java_thread, calling_thread, owned_monitors_list, this); | |
710 ObjectSynchronizer::monitors_iterate(&jmc); | |
711 err = jmc.error(); | |
712 | |
713 return err; | |
714 } | |
715 | |
716 // Save JNI local handles for any objects that this frame owns. | |
717 jvmtiError | |
718 JvmtiEnvBase::get_locked_objects_in_frame(JavaThread* calling_thread, JavaThread* java_thread, | |
719 javaVFrame *jvf, GrowableArray<jvmtiMonitorStackDepthInfo*>* owned_monitors_list, int stack_depth) { | |
720 jvmtiError err = JVMTI_ERROR_NONE; | |
721 ResourceMark rm; | |
722 | |
723 GrowableArray<MonitorInfo*>* mons = jvf->monitors(); | |
724 if (mons->is_empty()) { | |
725 return err; // this javaVFrame holds no monitors | |
726 } | |
727 | |
728 HandleMark hm; | |
729 oop wait_obj = NULL; | |
730 { | |
731 // save object of current wait() call (if any) for later comparison | |
732 ObjectMonitor *mon = java_thread->current_waiting_monitor(); | |
733 if (mon != NULL) { | |
734 wait_obj = (oop)mon->object(); | |
735 } | |
736 } | |
737 oop pending_obj = NULL; | |
738 { | |
739 // save object of current enter() call (if any) for later comparison | |
740 ObjectMonitor *mon = java_thread->current_pending_monitor(); | |
741 if (mon != NULL) { | |
742 pending_obj = (oop)mon->object(); | |
743 } | |
744 } | |
745 | |
746 for (int i = 0; i < mons->length(); i++) { | |
747 MonitorInfo *mi = mons->at(i); | |
748 | |
818
b109e761e927
6837472: com/sun/jdi/MonitorFrameInfo.java fails with AggressiveOpts in 6u14
kvn
parents:
611
diff
changeset
|
749 if (mi->owner_is_scalar_replaced()) continue; |
b109e761e927
6837472: com/sun/jdi/MonitorFrameInfo.java fails with AggressiveOpts in 6u14
kvn
parents:
611
diff
changeset
|
750 |
0 | 751 oop obj = mi->owner(); |
752 if (obj == NULL) { | |
753 // this monitor doesn't have an owning object so skip it | |
754 continue; | |
755 } | |
756 | |
757 if (wait_obj == obj) { | |
758 // the thread is waiting on this monitor so it isn't really owned | |
759 continue; | |
760 } | |
761 | |
762 if (pending_obj == obj) { | |
763 // the thread is pending on this monitor so it isn't really owned | |
764 continue; | |
765 } | |
766 | |
767 if (owned_monitors_list->length() > 0) { | |
768 // Our list has at least one object on it so we have to check | |
769 // for recursive object locking | |
770 bool found = false; | |
771 for (int j = 0; j < owned_monitors_list->length(); j++) { | |
772 jobject jobj = ((jvmtiMonitorStackDepthInfo*)owned_monitors_list->at(j))->monitor; | |
773 oop check = JNIHandles::resolve(jobj); | |
774 if (check == obj) { | |
775 found = true; // we found the object | |
776 break; | |
777 } | |
778 } | |
779 | |
780 if (found) { | |
781 // already have this object so don't include it | |
782 continue; | |
783 } | |
784 } | |
785 | |
786 // add the owning object to our list | |
787 jvmtiMonitorStackDepthInfo *jmsdi; | |
788 err = allocate(sizeof(jvmtiMonitorStackDepthInfo), (unsigned char **)&jmsdi); | |
789 if (err != JVMTI_ERROR_NONE) { | |
790 return err; | |
791 } | |
792 Handle hobj(obj); | |
793 jmsdi->monitor = jni_reference(calling_thread, hobj); | |
794 jmsdi->stack_depth = stack_depth; | |
795 owned_monitors_list->append(jmsdi); | |
796 } | |
797 | |
798 return err; | |
799 } | |
800 | |
801 jvmtiError | |
802 JvmtiEnvBase::get_stack_trace(JavaThread *java_thread, | |
803 jint start_depth, jint max_count, | |
804 jvmtiFrameInfo* frame_buffer, jint* count_ptr) { | |
805 #ifdef ASSERT | |
806 uint32_t debug_bits = 0; | |
807 #endif | |
808 assert((SafepointSynchronize::is_at_safepoint() || | |
809 is_thread_fully_suspended(java_thread, false, &debug_bits)), | |
810 "at safepoint or target thread is suspended"); | |
811 int count = 0; | |
812 if (java_thread->has_last_Java_frame()) { | |
813 RegisterMap reg_map(java_thread); | |
814 Thread* current_thread = Thread::current(); | |
815 ResourceMark rm(current_thread); | |
816 javaVFrame *jvf = java_thread->last_java_vframe(®_map); | |
817 HandleMark hm(current_thread); | |
818 if (start_depth != 0) { | |
819 if (start_depth > 0) { | |
820 for (int j = 0; j < start_depth && jvf != NULL; j++) { | |
821 jvf = jvf->java_sender(); | |
822 } | |
823 if (jvf == NULL) { | |
824 // start_depth is deeper than the stack depth | |
825 return JVMTI_ERROR_ILLEGAL_ARGUMENT; | |
826 } | |
827 } else { // start_depth < 0 | |
828 // we are referencing the starting depth based on the oldest | |
829 // part of the stack. | |
830 // optimize to limit the number of times that java_sender() is called | |
831 javaVFrame *jvf_cursor = jvf; | |
832 javaVFrame *jvf_prev = NULL; | |
833 javaVFrame *jvf_prev_prev; | |
834 int j = 0; | |
835 while (jvf_cursor != NULL) { | |
836 jvf_prev_prev = jvf_prev; | |
837 jvf_prev = jvf_cursor; | |
838 for (j = 0; j > start_depth && jvf_cursor != NULL; j--) { | |
839 jvf_cursor = jvf_cursor->java_sender(); | |
840 } | |
841 } | |
842 if (j == start_depth) { | |
843 // previous pointer is exactly where we want to start | |
844 jvf = jvf_prev; | |
845 } else { | |
846 // we need to back up further to get to the right place | |
847 if (jvf_prev_prev == NULL) { | |
848 // the -start_depth is greater than the stack depth | |
849 return JVMTI_ERROR_ILLEGAL_ARGUMENT; | |
850 } | |
851 // j now is the number of frames on the stack starting with | |
852 // jvf_prev, we start from jvf_prev_prev and move older on | |
853 // the stack that many, the result is -start_depth frames | |
854 // remaining. | |
855 jvf = jvf_prev_prev; | |
856 for (; j < 0; j++) { | |
857 jvf = jvf->java_sender(); | |
858 } | |
859 } | |
860 } | |
861 } | |
862 for (; count < max_count && jvf != NULL; count++) { | |
863 frame_buffer[count].method = jvf->method()->jmethod_id(); | |
864 frame_buffer[count].location = (jvf->method()->is_native() ? -1 : jvf->bci()); | |
865 jvf = jvf->java_sender(); | |
866 } | |
867 } else { | |
868 if (start_depth != 0) { | |
869 // no frames and there is a starting depth | |
870 return JVMTI_ERROR_ILLEGAL_ARGUMENT; | |
871 } | |
872 } | |
873 *count_ptr = count; | |
874 return JVMTI_ERROR_NONE; | |
875 } | |
876 | |
877 jvmtiError | |
878 JvmtiEnvBase::get_frame_count(JvmtiThreadState *state, jint *count_ptr) { | |
879 assert((state != NULL), | |
880 "JavaThread should create JvmtiThreadState before calling this method"); | |
881 *count_ptr = state->count_frames(); | |
882 return JVMTI_ERROR_NONE; | |
883 } | |
884 | |
885 jvmtiError | |
886 JvmtiEnvBase::get_frame_location(JavaThread *java_thread, jint depth, | |
887 jmethodID* method_ptr, jlocation* location_ptr) { | |
888 #ifdef ASSERT | |
889 uint32_t debug_bits = 0; | |
890 #endif | |
891 assert((SafepointSynchronize::is_at_safepoint() || | |
892 is_thread_fully_suspended(java_thread, false, &debug_bits)), | |
893 "at safepoint or target thread is suspended"); | |
894 Thread* current_thread = Thread::current(); | |
895 ResourceMark rm(current_thread); | |
896 | |
897 vframe *vf = vframeFor(java_thread, depth); | |
898 if (vf == NULL) { | |
899 return JVMTI_ERROR_NO_MORE_FRAMES; | |
900 } | |
901 | |
902 // vframeFor should return a java frame. If it doesn't | |
903 // it means we've got an internal error and we return the | |
904 // error in product mode. In debug mode we will instead | |
905 // attempt to cast the vframe to a javaVFrame and will | |
906 // cause an assertion/crash to allow further diagnosis. | |
907 #ifdef PRODUCT | |
908 if (!vf->is_java_frame()) { | |
909 return JVMTI_ERROR_INTERNAL; | |
910 } | |
911 #endif | |
912 | |
913 HandleMark hm(current_thread); | |
914 javaVFrame *jvf = javaVFrame::cast(vf); | |
915 methodOop method = jvf->method(); | |
916 if (method->is_native()) { | |
917 *location_ptr = -1; | |
918 } else { | |
919 *location_ptr = jvf->bci(); | |
920 } | |
921 *method_ptr = method->jmethod_id(); | |
922 | |
923 return JVMTI_ERROR_NONE; | |
924 } | |
925 | |
926 | |
927 jvmtiError | |
928 JvmtiEnvBase::get_object_monitor_usage(JavaThread* calling_thread, jobject object, jvmtiMonitorUsage* info_ptr) { | |
929 HandleMark hm; | |
930 Handle hobj; | |
931 | |
932 bool at_safepoint = SafepointSynchronize::is_at_safepoint(); | |
933 | |
934 // Check arguments | |
935 { | |
936 oop mirror = JNIHandles::resolve_external_guard(object); | |
937 NULL_CHECK(mirror, JVMTI_ERROR_INVALID_OBJECT); | |
938 NULL_CHECK(info_ptr, JVMTI_ERROR_NULL_POINTER); | |
939 | |
940 hobj = Handle(mirror); | |
941 } | |
942 | |
943 JavaThread *owning_thread = NULL; | |
944 ObjectMonitor *mon = NULL; | |
945 jvmtiMonitorUsage ret = { | |
946 NULL, 0, 0, NULL, 0, NULL | |
947 }; | |
948 | |
949 uint32_t debug_bits = 0; | |
950 // first derive the object's owner and entry_count (if any) | |
951 { | |
952 // Revoke any biases before querying the mark word | |
953 if (SafepointSynchronize::is_at_safepoint()) { | |
954 BiasedLocking::revoke_at_safepoint(hobj); | |
955 } else { | |
956 BiasedLocking::revoke_and_rebias(hobj, false, calling_thread); | |
957 } | |
958 | |
959 address owner = NULL; | |
960 { | |
961 markOop mark = hobj()->mark(); | |
962 | |
963 if (!mark->has_monitor()) { | |
964 // this object has a lightweight monitor | |
965 | |
966 if (mark->has_locker()) { | |
967 owner = (address)mark->locker(); // save the address of the Lock word | |
968 } | |
969 // implied else: no owner | |
970 } else { | |
971 // this object has a heavyweight monitor | |
972 mon = mark->monitor(); | |
973 | |
974 // The owner field of a heavyweight monitor may be NULL for no | |
975 // owner, a JavaThread * or it may still be the address of the | |
976 // Lock word in a JavaThread's stack. A monitor can be inflated | |
977 // by a non-owning JavaThread, but only the owning JavaThread | |
978 // can change the owner field from the Lock word to the | |
979 // JavaThread * and it may not have done that yet. | |
980 owner = (address)mon->owner(); | |
981 } | |
982 } | |
983 | |
984 if (owner != NULL) { | |
985 // This monitor is owned so we have to find the owning JavaThread. | |
986 // Since owning_thread_from_monitor_owner() grabs a lock, GC can | |
987 // move our object at this point. However, our owner value is safe | |
988 // since it is either the Lock word on a stack or a JavaThread *. | |
989 owning_thread = Threads::owning_thread_from_monitor_owner(owner, !at_safepoint); | |
990 assert(owning_thread != NULL, "sanity check"); | |
991 if (owning_thread != NULL) { // robustness | |
992 // The monitor's owner either has to be the current thread, at safepoint | |
993 // or it has to be suspended. Any of these conditions will prevent both | |
994 // contending and waiting threads from modifying the state of | |
995 // the monitor. | |
996 if (!at_safepoint && !JvmtiEnv::is_thread_fully_suspended(owning_thread, true, &debug_bits)) { | |
997 return JVMTI_ERROR_THREAD_NOT_SUSPENDED; | |
998 } | |
999 HandleMark hm; | |
1000 Handle th(owning_thread->threadObj()); | |
1001 ret.owner = (jthread)jni_reference(calling_thread, th); | |
1002 } | |
1003 // implied else: no owner | |
1004 } | |
1005 | |
1006 if (owning_thread != NULL) { // monitor is owned | |
1007 if ((address)owning_thread == owner) { | |
1008 // the owner field is the JavaThread * | |
1009 assert(mon != NULL, | |
1010 "must have heavyweight monitor with JavaThread * owner"); | |
1011 ret.entry_count = mon->recursions() + 1; | |
1012 } else { | |
1013 // The owner field is the Lock word on the JavaThread's stack | |
1014 // so the recursions field is not valid. We have to count the | |
1015 // number of recursive monitor entries the hard way. We pass | |
1016 // a handle to survive any GCs along the way. | |
1017 ResourceMark rm; | |
1018 ret.entry_count = count_locked_objects(owning_thread, hobj); | |
1019 } | |
1020 } | |
1021 // implied else: entry_count == 0 | |
1022 } | |
1023 | |
1024 int nWant,nWait; | |
1025 if (mon != NULL) { | |
1026 // this object has a heavyweight monitor | |
1027 nWant = mon->contentions(); // # of threads contending for monitor | |
1028 nWait = mon->waiters(); // # of threads in Object.wait() | |
1029 ret.waiter_count = nWant + nWait; | |
1030 ret.notify_waiter_count = nWait; | |
1031 } else { | |
1032 // this object has a lightweight monitor | |
1033 ret.waiter_count = 0; | |
1034 ret.notify_waiter_count = 0; | |
1035 } | |
1036 | |
1037 // Allocate memory for heavyweight and lightweight monitor. | |
1038 jvmtiError err; | |
1039 err = allocate(ret.waiter_count * sizeof(jthread *), (unsigned char**)&ret.waiters); | |
1040 if (err != JVMTI_ERROR_NONE) { | |
1041 return err; | |
1042 } | |
1043 err = allocate(ret.notify_waiter_count * sizeof(jthread *), | |
1044 (unsigned char**)&ret.notify_waiters); | |
1045 if (err != JVMTI_ERROR_NONE) { | |
1046 deallocate((unsigned char*)ret.waiters); | |
1047 return err; | |
1048 } | |
1049 | |
1050 // now derive the rest of the fields | |
1051 if (mon != NULL) { | |
1052 // this object has a heavyweight monitor | |
1053 | |
1054 // Number of waiters may actually be less than the waiter count. | |
1055 // So NULL out memory so that unused memory will be NULL. | |
1056 memset(ret.waiters, 0, ret.waiter_count * sizeof(jthread *)); | |
1057 memset(ret.notify_waiters, 0, ret.notify_waiter_count * sizeof(jthread *)); | |
1058 | |
1059 if (ret.waiter_count > 0) { | |
1060 // we have contending and/or waiting threads | |
1061 HandleMark hm; | |
1062 if (nWant > 0) { | |
1063 // we have contending threads | |
1064 ResourceMark rm; | |
1065 // get_pending_threads returns only java thread so we do not need to | |
1066 // check for non java threads. | |
1067 GrowableArray<JavaThread*>* wantList = Threads::get_pending_threads( | |
1068 nWant, (address)mon, !at_safepoint); | |
1069 if (wantList->length() < nWant) { | |
1070 // robustness: the pending list has gotten smaller | |
1071 nWant = wantList->length(); | |
1072 } | |
1073 for (int i = 0; i < nWant; i++) { | |
1074 JavaThread *pending_thread = wantList->at(i); | |
1075 // If the monitor has no owner, then a non-suspended contending | |
1076 // thread could potentially change the state of the monitor by | |
1077 // entering it. The JVM/TI spec doesn't allow this. | |
1078 if (owning_thread == NULL && !at_safepoint & | |
1079 !JvmtiEnv::is_thread_fully_suspended(pending_thread, true, &debug_bits)) { | |
1080 if (ret.owner != NULL) { | |
1081 destroy_jni_reference(calling_thread, ret.owner); | |
1082 } | |
1083 for (int j = 0; j < i; j++) { | |
1084 destroy_jni_reference(calling_thread, ret.waiters[j]); | |
1085 } | |
1086 deallocate((unsigned char*)ret.waiters); | |
1087 deallocate((unsigned char*)ret.notify_waiters); | |
1088 return JVMTI_ERROR_THREAD_NOT_SUSPENDED; | |
1089 } | |
1090 Handle th(pending_thread->threadObj()); | |
1091 ret.waiters[i] = (jthread)jni_reference(calling_thread, th); | |
1092 } | |
1093 } | |
1094 if (nWait > 0) { | |
1095 // we have threads in Object.wait() | |
1096 int offset = nWant; // add after any contending threads | |
1097 ObjectWaiter *waiter = mon->first_waiter(); | |
1098 for (int i = 0, j = 0; i < nWait; i++) { | |
1099 if (waiter == NULL) { | |
1100 // robustness: the waiting list has gotten smaller | |
1101 nWait = j; | |
1102 break; | |
1103 } | |
1104 Thread *t = mon->thread_of_waiter(waiter); | |
1105 if (t != NULL && t->is_Java_thread()) { | |
1106 JavaThread *wjava_thread = (JavaThread *)t; | |
1107 // If the thread was found on the ObjectWaiter list, then | |
1108 // it has not been notified. This thread can't change the | |
1109 // state of the monitor so it doesn't need to be suspended. | |
1110 Handle th(wjava_thread->threadObj()); | |
1111 ret.waiters[offset + j] = (jthread)jni_reference(calling_thread, th); | |
1112 ret.notify_waiters[j++] = (jthread)jni_reference(calling_thread, th); | |
1113 } | |
1114 waiter = mon->next_waiter(waiter); | |
1115 } | |
1116 } | |
1117 } | |
1118 | |
1119 // Adjust count. nWant and nWait count values may be less than original. | |
1120 ret.waiter_count = nWant + nWait; | |
1121 ret.notify_waiter_count = nWait; | |
1122 } else { | |
1123 // this object has a lightweight monitor and we have nothing more | |
1124 // to do here because the defaults are just fine. | |
1125 } | |
1126 | |
1127 // we don't update return parameter unless everything worked | |
1128 *info_ptr = ret; | |
1129 | |
1130 return JVMTI_ERROR_NONE; | |
1131 } | |
1132 | |
1133 ResourceTracker::ResourceTracker(JvmtiEnv* env) { | |
1134 _env = env; | |
1135 _allocations = new (ResourceObj::C_HEAP) GrowableArray<unsigned char*>(20, true); | |
1136 _failed = false; | |
1137 } | |
1138 ResourceTracker::~ResourceTracker() { | |
1139 if (_failed) { | |
1140 for (int i=0; i<_allocations->length(); i++) { | |
1141 _env->deallocate(_allocations->at(i)); | |
1142 } | |
1143 } | |
1144 delete _allocations; | |
1145 } | |
1146 | |
1147 jvmtiError ResourceTracker::allocate(jlong size, unsigned char** mem_ptr) { | |
1148 unsigned char *ptr; | |
1149 jvmtiError err = _env->allocate(size, &ptr); | |
1150 if (err == JVMTI_ERROR_NONE) { | |
1151 _allocations->append(ptr); | |
1152 *mem_ptr = ptr; | |
1153 } else { | |
1154 *mem_ptr = NULL; | |
1155 _failed = true; | |
1156 } | |
1157 return err; | |
1158 } | |
1159 | |
1160 unsigned char* ResourceTracker::allocate(jlong size) { | |
1161 unsigned char* ptr; | |
1162 allocate(size, &ptr); | |
1163 return ptr; | |
1164 } | |
1165 | |
1166 char* ResourceTracker::strdup(const char* str) { | |
1167 char *dup_str = (char*)allocate(strlen(str)+1); | |
1168 if (dup_str != NULL) { | |
1169 strcpy(dup_str, str); | |
1170 } | |
1171 return dup_str; | |
1172 } | |
1173 | |
1174 struct StackInfoNode { | |
1175 struct StackInfoNode *next; | |
1176 jvmtiStackInfo info; | |
1177 }; | |
1178 | |
1179 // Create a jvmtiStackInfo inside a linked list node and create a | |
1180 // buffer for the frame information, both allocated as resource objects. | |
1181 // Fill in both the jvmtiStackInfo and the jvmtiFrameInfo. | |
1182 // Note that either or both of thr and thread_oop | |
1183 // may be null if the thread is new or has exited. | |
1184 void | |
1185 VM_GetMultipleStackTraces::fill_frames(jthread jt, JavaThread *thr, oop thread_oop) { | |
1186 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); | |
1187 | |
1188 jint state = 0; | |
1189 struct StackInfoNode *node = NEW_RESOURCE_OBJ(struct StackInfoNode); | |
1190 jvmtiStackInfo *infop = &(node->info); | |
1191 node->next = head(); | |
1192 set_head(node); | |
1193 infop->frame_count = 0; | |
1194 infop->thread = jt; | |
1195 | |
1196 if (thread_oop != NULL) { | |
1197 // get most state bits | |
1198 state = (jint)java_lang_Thread::get_thread_status(thread_oop); | |
1199 } | |
1200 | |
1201 if (thr != NULL) { // add more state bits if there is a JavaThead to query | |
1202 // same as is_being_ext_suspended() but without locking | |
1203 if (thr->is_ext_suspended() || thr->is_external_suspend()) { | |
1204 state |= JVMTI_THREAD_STATE_SUSPENDED; | |
1205 } | |
1206 JavaThreadState jts = thr->thread_state(); | |
1207 if (jts == _thread_in_native) { | |
1208 state |= JVMTI_THREAD_STATE_IN_NATIVE; | |
1209 } | |
1210 OSThread* osThread = thr->osthread(); | |
1211 if (osThread != NULL && osThread->interrupted()) { | |
1212 state |= JVMTI_THREAD_STATE_INTERRUPTED; | |
1213 } | |
1214 } | |
1215 infop->state = state; | |
1216 | |
1217 if (thr != NULL || (state & JVMTI_THREAD_STATE_ALIVE) != 0) { | |
1218 infop->frame_buffer = NEW_RESOURCE_ARRAY(jvmtiFrameInfo, max_frame_count()); | |
1219 env()->get_stack_trace(thr, 0, max_frame_count(), | |
1220 infop->frame_buffer, &(infop->frame_count)); | |
1221 } else { | |
1222 infop->frame_buffer = NULL; | |
1223 infop->frame_count = 0; | |
1224 } | |
1225 _frame_count_total += infop->frame_count; | |
1226 } | |
1227 | |
1228 // Based on the stack information in the linked list, allocate memory | |
1229 // block to return and fill it from the info in the linked list. | |
1230 void | |
1231 VM_GetMultipleStackTraces::allocate_and_fill_stacks(jint thread_count) { | |
1232 // do I need to worry about alignment issues? | |
1233 jlong alloc_size = thread_count * sizeof(jvmtiStackInfo) | |
1234 + _frame_count_total * sizeof(jvmtiFrameInfo); | |
1235 env()->allocate(alloc_size, (unsigned char **)&_stack_info); | |
1236 | |
1237 // pointers to move through the newly allocated space as it is filled in | |
1238 jvmtiStackInfo *si = _stack_info + thread_count; // bottom of stack info | |
1239 jvmtiFrameInfo *fi = (jvmtiFrameInfo *)si; // is the top of frame info | |
1240 | |
1241 // copy information in resource area into allocated buffer | |
1242 // insert stack info backwards since linked list is backwards | |
1243 // insert frame info forwards | |
1244 // walk the StackInfoNodes | |
1245 for (struct StackInfoNode *sin = head(); sin != NULL; sin = sin->next) { | |
1246 jint frame_count = sin->info.frame_count; | |
1247 size_t frames_size = frame_count * sizeof(jvmtiFrameInfo); | |
1248 --si; | |
1249 memcpy(si, &(sin->info), sizeof(jvmtiStackInfo)); | |
1250 if (frames_size == 0) { | |
1251 si->frame_buffer = NULL; | |
1252 } else { | |
1253 memcpy(fi, sin->info.frame_buffer, frames_size); | |
1254 si->frame_buffer = fi; // point to the new allocated copy of the frames | |
1255 fi += frame_count; | |
1256 } | |
1257 } | |
1258 assert(si == _stack_info, "the last copied stack info must be the first record"); | |
1259 assert((unsigned char *)fi == ((unsigned char *)_stack_info) + alloc_size, | |
1260 "the last copied frame info must be the last record"); | |
1261 } | |
1262 | |
1263 | |
1264 void | |
1265 VM_GetThreadListStackTraces::doit() { | |
1266 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); | |
1267 | |
1268 ResourceMark rm; | |
1269 for (int i = 0; i < _thread_count; ++i) { | |
1270 jthread jt = _thread_list[i]; | |
1271 oop thread_oop = JNIHandles::resolve_external_guard(jt); | |
1142 | 1272 if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass())) { |
0 | 1273 set_result(JVMTI_ERROR_INVALID_THREAD); |
1274 return; | |
1275 } | |
1276 fill_frames(jt, java_lang_Thread::thread(thread_oop), thread_oop); | |
1277 } | |
1278 allocate_and_fill_stacks(_thread_count); | |
1279 } | |
1280 | |
1281 void | |
1282 VM_GetAllStackTraces::doit() { | |
1283 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); | |
1284 | |
1285 ResourceMark rm; | |
1286 _final_thread_count = 0; | |
1287 for (JavaThread *jt = Threads::first(); jt != NULL; jt = jt->next()) { | |
1288 oop thread_oop = jt->threadObj(); | |
1289 if (thread_oop != NULL && | |
1290 !jt->is_exiting() && | |
1291 java_lang_Thread::is_alive(thread_oop) && | |
1292 !jt->is_hidden_from_external_view()) { | |
1293 ++_final_thread_count; | |
1294 // Handle block of the calling thread is used to create local refs. | |
1295 fill_frames((jthread)JNIHandles::make_local(_calling_thread, thread_oop), | |
1296 jt, thread_oop); | |
1297 } | |
1298 } | |
1299 allocate_and_fill_stacks(_final_thread_count); | |
1300 } | |
1301 | |
1302 // Verifies that the top frame is a java frame in an expected state. | |
1303 // Deoptimizes frame if needed. | |
1304 // Checks that the frame method signature matches the return type (tos). | |
1305 // HandleMark must be defined in the caller only. | |
1306 // It is to keep a ret_ob_h handle alive after return to the caller. | |
1307 jvmtiError | |
1308 JvmtiEnvBase::check_top_frame(JavaThread* current_thread, JavaThread* java_thread, | |
1309 jvalue value, TosState tos, Handle* ret_ob_h) { | |
1310 ResourceMark rm(current_thread); | |
1311 | |
1312 vframe *vf = vframeFor(java_thread, 0); | |
1313 NULL_CHECK(vf, JVMTI_ERROR_NO_MORE_FRAMES); | |
1314 | |
1315 javaVFrame *jvf = (javaVFrame*) vf; | |
1316 if (!vf->is_java_frame() || jvf->method()->is_native()) { | |
1317 return JVMTI_ERROR_OPAQUE_FRAME; | |
1318 } | |
1319 | |
1320 // If the frame is a compiled one, need to deoptimize it. | |
1321 if (vf->is_compiled_frame()) { | |
1322 if (!vf->fr().can_be_deoptimized()) { | |
1323 return JVMTI_ERROR_OPAQUE_FRAME; | |
1324 } | |
1325 VM_DeoptimizeFrame deopt(java_thread, jvf->fr().id()); | |
1326 VMThread::execute(&deopt); | |
1327 } | |
1328 | |
1329 // Get information about method return type | |
1330 symbolHandle signature(current_thread, jvf->method()->signature()); | |
1331 | |
1332 ResultTypeFinder rtf(signature); | |
1333 TosState fr_tos = as_TosState(rtf.type()); | |
1334 if (fr_tos != tos) { | |
1335 if (tos != itos || (fr_tos != btos && fr_tos != ctos && fr_tos != stos)) { | |
1336 return JVMTI_ERROR_TYPE_MISMATCH; | |
1337 } | |
1338 } | |
1339 | |
1340 // Check that the jobject class matches the return type signature. | |
1341 jobject jobj = value.l; | |
1342 if (tos == atos && jobj != NULL) { // NULL reference is allowed | |
1343 Handle ob_h = Handle(current_thread, JNIHandles::resolve_external_guard(jobj)); | |
1344 NULL_CHECK(ob_h, JVMTI_ERROR_INVALID_OBJECT); | |
1345 KlassHandle ob_kh = KlassHandle(current_thread, ob_h()->klass()); | |
1346 NULL_CHECK(ob_kh, JVMTI_ERROR_INVALID_OBJECT); | |
1347 | |
1348 // Method return type signature. | |
1349 char* ty_sign = 1 + strchr(signature->as_C_string(), ')'); | |
1350 | |
1351 if (!VM_GetOrSetLocal::is_assignable(ty_sign, Klass::cast(ob_kh()), current_thread)) { | |
1352 return JVMTI_ERROR_TYPE_MISMATCH; | |
1353 } | |
1354 *ret_ob_h = ob_h; | |
1355 } | |
1356 return JVMTI_ERROR_NONE; | |
1357 } /* end check_top_frame */ | |
1358 | |
1359 | |
1360 // ForceEarlyReturn<type> follows the PopFrame approach in many aspects. | |
1361 // Main difference is on the last stage in the interpreter. | |
1362 // The PopFrame stops method execution to continue execution | |
1363 // from the same method call instruction. | |
1364 // The ForceEarlyReturn forces return from method so the execution | |
1365 // continues at the bytecode following the method call. | |
1366 | |
1367 // Threads_lock NOT held, java_thread not protected by lock | |
1368 // java_thread - pre-checked | |
1369 | |
1370 jvmtiError | |
1371 JvmtiEnvBase::force_early_return(JavaThread* java_thread, jvalue value, TosState tos) { | |
1372 JavaThread* current_thread = JavaThread::current(); | |
1373 HandleMark hm(current_thread); | |
1374 uint32_t debug_bits = 0; | |
1375 | |
609
ea20d7ce26b0
6800721: 3/4 JavaThread::jvmti_thread_state() and JvmtiThreadState::state_for() robustness
dcubed
parents:
470
diff
changeset
|
1376 // retrieve or create the state |
ea20d7ce26b0
6800721: 3/4 JavaThread::jvmti_thread_state() and JvmtiThreadState::state_for() robustness
dcubed
parents:
470
diff
changeset
|
1377 JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread); |
ea20d7ce26b0
6800721: 3/4 JavaThread::jvmti_thread_state() and JvmtiThreadState::state_for() robustness
dcubed
parents:
470
diff
changeset
|
1378 if (state == NULL) { |
ea20d7ce26b0
6800721: 3/4 JavaThread::jvmti_thread_state() and JvmtiThreadState::state_for() robustness
dcubed
parents:
470
diff
changeset
|
1379 return JVMTI_ERROR_THREAD_NOT_ALIVE; |
ea20d7ce26b0
6800721: 3/4 JavaThread::jvmti_thread_state() and JvmtiThreadState::state_for() robustness
dcubed
parents:
470
diff
changeset
|
1380 } |
ea20d7ce26b0
6800721: 3/4 JavaThread::jvmti_thread_state() and JvmtiThreadState::state_for() robustness
dcubed
parents:
470
diff
changeset
|
1381 |
0 | 1382 // Check if java_thread is fully suspended |
1383 if (!is_thread_fully_suspended(java_thread, | |
1384 true /* wait for suspend completion */, | |
1385 &debug_bits)) { | |
1386 return JVMTI_ERROR_THREAD_NOT_SUSPENDED; | |
1387 } | |
1388 | |
1389 // Check to see if a ForceEarlyReturn was already in progress | |
1390 if (state->is_earlyret_pending()) { | |
1391 // Probably possible for JVMTI clients to trigger this, but the | |
1392 // JPDA backend shouldn't allow this to happen | |
1393 return JVMTI_ERROR_INTERNAL; | |
1394 } | |
1395 { | |
1396 // The same as for PopFrame. Workaround bug: | |
1397 // 4812902: popFrame hangs if the method is waiting at a synchronize | |
1398 // Catch this condition and return an error to avoid hanging. | |
1399 // Now JVMTI spec allows an implementation to bail out with an opaque | |
1400 // frame error. | |
1401 OSThread* osThread = java_thread->osthread(); | |
1402 if (osThread->get_state() == MONITOR_WAIT) { | |
1403 return JVMTI_ERROR_OPAQUE_FRAME; | |
1404 } | |
1405 } | |
1406 Handle ret_ob_h = Handle(); | |
1407 jvmtiError err = check_top_frame(current_thread, java_thread, value, tos, &ret_ob_h); | |
1408 if (err != JVMTI_ERROR_NONE) { | |
1409 return err; | |
1410 } | |
1411 assert(tos != atos || value.l == NULL || ret_ob_h() != NULL, | |
1412 "return object oop must not be NULL if jobject is not NULL"); | |
1413 | |
1414 // Update the thread state to reflect that the top frame must be | |
1415 // forced to return. | |
1416 // The current frame will be returned later when the suspended | |
1417 // thread is resumed and right before returning from VM to Java. | |
1418 // (see call_VM_base() in assembler_<cpu>.cpp). | |
1419 | |
1420 state->set_earlyret_pending(); | |
1421 state->set_earlyret_oop(ret_ob_h()); | |
1422 state->set_earlyret_value(value, tos); | |
1423 | |
1424 // Set pending step flag for this early return. | |
1425 // It is cleared when next step event is posted. | |
1426 state->set_pending_step_for_earlyret(); | |
1427 | |
1428 return JVMTI_ERROR_NONE; | |
1429 } /* end force_early_return */ | |
1430 | |
1431 void | |
1432 JvmtiMonitorClosure::do_monitor(ObjectMonitor* mon) { | |
1433 if ( _error != JVMTI_ERROR_NONE) { | |
1434 // Error occurred in previous iteration so no need to add | |
1435 // to the list. | |
1436 return; | |
1437 } | |
1438 if (mon->owner() == _java_thread ) { | |
1439 // Filter out on stack monitors collected during stack walk. | |
1440 oop obj = (oop)mon->object(); | |
1441 bool found = false; | |
1442 for (int j = 0; j < _owned_monitors_list->length(); j++) { | |
1443 jobject jobj = ((jvmtiMonitorStackDepthInfo*)_owned_monitors_list->at(j))->monitor; | |
1444 oop check = JNIHandles::resolve(jobj); | |
1445 if (check == obj) { | |
1446 // On stack monitor already collected during the stack walk. | |
1447 found = true; | |
1448 break; | |
1449 } | |
1450 } | |
1451 if (found == false) { | |
1452 // This is off stack monitor (e.g. acquired via jni MonitorEnter). | |
1453 jvmtiError err; | |
1454 jvmtiMonitorStackDepthInfo *jmsdi; | |
1455 err = _env->allocate(sizeof(jvmtiMonitorStackDepthInfo), (unsigned char **)&jmsdi); | |
1456 if (err != JVMTI_ERROR_NONE) { | |
1457 _error = err; | |
1458 return; | |
1459 } | |
1460 Handle hobj(obj); | |
1461 jmsdi->monitor = _env->jni_reference(_calling_thread, hobj); | |
1462 // stack depth is unknown for this monitor. | |
1463 jmsdi->stack_depth = -1; | |
1464 _owned_monitors_list->append(jmsdi); | |
1465 } | |
1466 } | |
1467 } | |
1468 | |
1469 #endif // !JVMTI_KERNEL |