comparison src/share/vm/runtime/sharedRuntime.cpp @ 6725:da91efe96a93

6964458: Reimplement class meta-data storage to use native memory Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author coleenp
date Sat, 01 Sep 2012 13:25:18 -0400
parents 93c71eb28866
children 2cb2f30450c7
comparison
equal deleted inserted replaced
6724:36d1d483d5d6 6725:da91efe96a93
588 // called/generated when TraceRedefineClasses has the right bits 588 // called/generated when TraceRedefineClasses has the right bits
589 // set. Since obsolete methods are never compiled, we don't have 589 // set. Since obsolete methods are never compiled, we don't have
590 // to modify the compilers to generate calls to this function. 590 // to modify the compilers to generate calls to this function.
591 // 591 //
592 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry( 592 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
593 JavaThread* thread, methodOopDesc* method)) 593 JavaThread* thread, Method* method))
594 assert(RC_TRACE_IN_RANGE(0x00001000, 0x00002000), "wrong call"); 594 assert(RC_TRACE_IN_RANGE(0x00001000, 0x00002000), "wrong call");
595 595
596 if (method->is_obsolete()) { 596 if (method->is_obsolete()) {
597 // We are calling an obsolete method, but this is not necessarily 597 // We are calling an obsolete method, but this is not necessarily
598 // an error. Our method could have been redefined just after we 598 // an error. Our method could have been redefined just after we
599 // fetched the methodOop from the constant pool. 599 // fetched the Method* from the constant pool.
600 600
601 // RC_TRACE macro has an embedded ResourceMark 601 // RC_TRACE macro has an embedded ResourceMark
602 RC_TRACE_WITH_THREAD(0x00001000, thread, 602 RC_TRACE_WITH_THREAD(0x00001000, thread,
603 ("calling obsolete method '%s'", 603 ("calling obsolete method '%s'",
604 method->name_and_sig_as_C_string())); 604 method->name_and_sig_as_C_string()));
723 JRT_END 723 JRT_END
724 724
725 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread)) 725 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread))
726 // We avoid using the normal exception construction in this case because 726 // We avoid using the normal exception construction in this case because
727 // it performs an upcall to Java, and we're already out of stack space. 727 // it performs an upcall to Java, and we're already out of stack space.
728 klassOop k = SystemDictionary::StackOverflowError_klass(); 728 Klass* k = SystemDictionary::StackOverflowError_klass();
729 oop exception_oop = instanceKlass::cast(k)->allocate_instance(CHECK); 729 oop exception_oop = InstanceKlass::cast(k)->allocate_instance(CHECK);
730 Handle exception (thread, exception_oop); 730 Handle exception (thread, exception_oop);
731 if (StackTraceInThrowable) { 731 if (StackTraceInThrowable) {
732 java_lang_Throwable::fill_in_stack_trace(exception); 732 java_lang_Throwable::fill_in_stack_trace(exception);
733 } 733 }
734 throw_and_post_jvmti_exception(thread, exception); 734 throw_and_post_jvmti_exception(thread, exception);
907 JRT_END 907 JRT_END
908 908
909 909
910 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj)) 910 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
911 assert(obj->is_oop(), "must be a valid oop"); 911 assert(obj->is_oop(), "must be a valid oop");
912 assert(obj->klass()->klass_part()->has_finalizer(), "shouldn't be here otherwise"); 912 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
913 instanceKlass::register_finalizer(instanceOop(obj), CHECK); 913 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
914 JRT_END 914 JRT_END
915 915
916 916
917 jlong SharedRuntime::get_java_tid(Thread* thread) { 917 jlong SharedRuntime::get_java_tid(Thread* thread) {
918 if (thread != NULL) { 918 if (thread != NULL) {
933 return dtrace_object_alloc_base(Thread::current(), o); 933 return dtrace_object_alloc_base(Thread::current(), o);
934 } 934 }
935 935
936 int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) { 936 int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) {
937 assert(DTraceAllocProbes, "wrong call"); 937 assert(DTraceAllocProbes, "wrong call");
938 Klass* klass = o->blueprint(); 938 Klass* klass = o->klass();
939 int size = o->size(); 939 int size = o->size();
940 Symbol* name = klass->name(); 940 Symbol* name = klass->name();
941 #ifndef USDT2 941 #ifndef USDT2
942 HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread), 942 HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread),
943 name->bytes(), name->utf8_length(), size * HeapWordSize); 943 name->bytes(), name->utf8_length(), size * HeapWordSize);
948 #endif /* USDT2 */ 948 #endif /* USDT2 */
949 return 0; 949 return 0;
950 } 950 }
951 951
952 JRT_LEAF(int, SharedRuntime::dtrace_method_entry( 952 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
953 JavaThread* thread, methodOopDesc* method)) 953 JavaThread* thread, Method* method))
954 assert(DTraceMethodProbes, "wrong call"); 954 assert(DTraceMethodProbes, "wrong call");
955 Symbol* kname = method->klass_name(); 955 Symbol* kname = method->klass_name();
956 Symbol* name = method->name(); 956 Symbol* name = method->name();
957 Symbol* sig = method->signature(); 957 Symbol* sig = method->signature();
958 #ifndef USDT2 958 #ifndef USDT2
969 #endif /* USDT2 */ 969 #endif /* USDT2 */
970 return 0; 970 return 0;
971 JRT_END 971 JRT_END
972 972
973 JRT_LEAF(int, SharedRuntime::dtrace_method_exit( 973 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
974 JavaThread* thread, methodOopDesc* method)) 974 JavaThread* thread, Method* method))
975 assert(DTraceMethodProbes, "wrong call"); 975 assert(DTraceMethodProbes, "wrong call");
976 Symbol* kname = method->klass_name(); 976 Symbol* kname = method->klass_name();
977 Symbol* name = method->name(); 977 Symbol* name = method->name();
978 Symbol* sig = method->signature(); 978 Symbol* sig = method->signature();
979 #ifndef USDT2 979 #ifndef USDT2
1057 #ifdef ASSERT 1057 #ifdef ASSERT
1058 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls 1058 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1059 if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) { 1059 if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) {
1060 assert(receiver.not_null(), "should have thrown exception"); 1060 assert(receiver.not_null(), "should have thrown exception");
1061 KlassHandle receiver_klass(THREAD, receiver->klass()); 1061 KlassHandle receiver_klass(THREAD, receiver->klass());
1062 klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle)); 1062 Klass* rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
1063 // klass is already loaded 1063 // klass is already loaded
1064 KlassHandle static_receiver_klass(THREAD, rk); 1064 KlassHandle static_receiver_klass(THREAD, rk);
1065 // Method handle invokes might have been optimized to a direct call 1065 // Method handle invokes might have been optimized to a direct call
1066 // so don't check for the receiver class. 1066 // so don't check for the receiver class.
1067 // FIXME this weakens the assert too much 1067 // FIXME this weakens the assert too much
1069 assert(receiver_klass->is_subtype_of(static_receiver_klass()) || 1069 assert(receiver_klass->is_subtype_of(static_receiver_klass()) ||
1070 callee->is_method_handle_intrinsic() || 1070 callee->is_method_handle_intrinsic() ||
1071 callee->is_compiled_lambda_form(), 1071 callee->is_compiled_lambda_form(),
1072 "actual receiver must be subclass of static receiver klass"); 1072 "actual receiver must be subclass of static receiver klass");
1073 if (receiver_klass->oop_is_instance()) { 1073 if (receiver_klass->oop_is_instance()) {
1074 if (instanceKlass::cast(receiver_klass())->is_not_initialized()) { 1074 if (InstanceKlass::cast(receiver_klass())->is_not_initialized()) {
1075 tty->print_cr("ERROR: Klass not yet initialized!!"); 1075 tty->print_cr("ERROR: Klass not yet initialized!!");
1076 receiver_klass.print(); 1076 receiver_klass()->print();
1077 } 1077 }
1078 assert(!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized"); 1078 assert(!InstanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized");
1079 } 1079 }
1080 } 1080 }
1081 #endif 1081 #endif
1082 1082
1083 return receiver; 1083 return receiver;
1231 1231
1232 // grab lock, check for deoptimization and potentially patch caller 1232 // grab lock, check for deoptimization and potentially patch caller
1233 { 1233 {
1234 MutexLocker ml_patch(CompiledIC_lock); 1234 MutexLocker ml_patch(CompiledIC_lock);
1235 1235
1236 // Now that we are ready to patch if the methodOop was redefined then 1236 // Now that we are ready to patch if the Method* was redefined then
1237 // don't update call site and let the caller retry. 1237 // don't update call site and let the caller retry.
1238 1238
1239 if (!callee_method->is_old()) { 1239 if (!callee_method->is_old()) {
1240 #ifdef ASSERT 1240 #ifdef ASSERT
1241 // We must not try to patch to jump to an already unloaded method. 1241 // We must not try to patch to jump to an already unloaded method.
1243 assert(CodeCache::find_blob(dest_entry_point) != NULL, 1243 assert(CodeCache::find_blob(dest_entry_point) != NULL,
1244 "should not unload nmethod while locked"); 1244 "should not unload nmethod while locked");
1245 } 1245 }
1246 #endif 1246 #endif
1247 if (is_virtual) { 1247 if (is_virtual) {
1248 CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc()); 1248 nmethod* nm = callee_nm;
1249 if (nm == NULL) CodeCache::find_blob(caller_frame.pc());
1250 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1249 if (inline_cache->is_clean()) { 1251 if (inline_cache->is_clean()) {
1250 inline_cache->set_to_monomorphic(virtual_call_info); 1252 inline_cache->set_to_monomorphic(virtual_call_info);
1251 } 1253 }
1252 } else { 1254 } else {
1253 CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc()); 1255 CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc());
1272 #endif /* ASSERT */ 1274 #endif /* ASSERT */
1273 1275
1274 methodHandle callee_method; 1276 methodHandle callee_method;
1275 JRT_BLOCK 1277 JRT_BLOCK
1276 callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL); 1278 callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL);
1277 // Return methodOop through TLS 1279 // Return Method* through TLS
1278 thread->set_vm_result(callee_method()); 1280 thread->set_vm_result_2(callee_method());
1279 JRT_BLOCK_END 1281 JRT_BLOCK_END
1280 // return compiled code entry point after potential safepoints 1282 // return compiled code entry point after potential safepoints
1281 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); 1283 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1282 return callee_method->verified_code_entry(); 1284 return callee_method->verified_code_entry();
1283 JRT_END 1285 JRT_END
1305 CodeBlob* sender_cb = caller_frame.cb(); 1307 CodeBlob* sender_cb = caller_frame.cb();
1306 nmethod* sender_nm = sender_cb->as_nmethod_or_null(); 1308 nmethod* sender_nm = sender_cb->as_nmethod_or_null();
1307 1309
1308 if (caller_frame.is_interpreted_frame() || 1310 if (caller_frame.is_interpreted_frame() ||
1309 caller_frame.is_entry_frame()) { 1311 caller_frame.is_entry_frame()) {
1310 methodOop callee = thread->callee_target(); 1312 Method* callee = thread->callee_target();
1311 guarantee(callee != NULL && callee->is_method(), "bad handshake"); 1313 guarantee(callee != NULL && callee->is_method(), "bad handshake");
1312 thread->set_vm_result(callee); 1314 thread->set_vm_result_2(callee);
1313 thread->set_callee_target(NULL); 1315 thread->set_callee_target(NULL);
1314 return callee->get_c2i_entry(); 1316 return callee->get_c2i_entry();
1315 } 1317 }
1316 1318
1317 // Must be compiled to compiled path which is safe to stackwalk 1319 // Must be compiled to compiled path which is safe to stackwalk
1318 methodHandle callee_method; 1320 methodHandle callee_method;
1319 JRT_BLOCK 1321 JRT_BLOCK
1320 // Force resolving of caller (if we called from compiled frame) 1322 // Force resolving of caller (if we called from compiled frame)
1321 callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL); 1323 callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL);
1322 thread->set_vm_result(callee_method()); 1324 thread->set_vm_result_2(callee_method());
1323 JRT_BLOCK_END 1325 JRT_BLOCK_END
1324 // return compiled code entry point after potential safepoints 1326 // return compiled code entry point after potential safepoints
1325 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); 1327 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1326 return callee_method->verified_code_entry(); 1328 return callee_method->verified_code_entry();
1327 JRT_END 1329 JRT_END
1330 // resolve a static call and patch code 1332 // resolve a static call and patch code
1331 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread )) 1333 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread ))
1332 methodHandle callee_method; 1334 methodHandle callee_method;
1333 JRT_BLOCK 1335 JRT_BLOCK
1334 callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL); 1336 callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL);
1335 thread->set_vm_result(callee_method()); 1337 thread->set_vm_result_2(callee_method());
1336 JRT_BLOCK_END 1338 JRT_BLOCK_END
1337 // return compiled code entry point after potential safepoints 1339 // return compiled code entry point after potential safepoints
1338 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); 1340 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1339 return callee_method->verified_code_entry(); 1341 return callee_method->verified_code_entry();
1340 JRT_END 1342 JRT_END
1343 // resolve virtual call and update inline cache to monomorphic 1345 // resolve virtual call and update inline cache to monomorphic
1344 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread )) 1346 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread ))
1345 methodHandle callee_method; 1347 methodHandle callee_method;
1346 JRT_BLOCK 1348 JRT_BLOCK
1347 callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL); 1349 callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL);
1348 thread->set_vm_result(callee_method()); 1350 thread->set_vm_result_2(callee_method());
1349 JRT_BLOCK_END 1351 JRT_BLOCK_END
1350 // return compiled code entry point after potential safepoints 1352 // return compiled code entry point after potential safepoints
1351 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); 1353 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1352 return callee_method->verified_code_entry(); 1354 return callee_method->verified_code_entry();
1353 JRT_END 1355 JRT_END
1357 // monomorphic, so it has no inline cache). Patch code to resolved target. 1359 // monomorphic, so it has no inline cache). Patch code to resolved target.
1358 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread)) 1360 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
1359 methodHandle callee_method; 1361 methodHandle callee_method;
1360 JRT_BLOCK 1362 JRT_BLOCK
1361 callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL); 1363 callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
1362 thread->set_vm_result(callee_method()); 1364 thread->set_vm_result_2(callee_method());
1363 JRT_BLOCK_END 1365 JRT_BLOCK_END
1364 // return compiled code entry point after potential safepoints 1366 // return compiled code entry point after potential safepoints
1365 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); 1367 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1366 return callee_method->verified_code_entry(); 1368 return callee_method->verified_code_entry();
1367 JRT_END 1369 JRT_END
1440 RegisterMap reg_map(thread, false); 1442 RegisterMap reg_map(thread, false);
1441 frame caller_frame = thread->last_frame().sender(&reg_map); 1443 frame caller_frame = thread->last_frame().sender(&reg_map);
1442 CodeBlob* cb = caller_frame.cb(); 1444 CodeBlob* cb = caller_frame.cb();
1443 if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) { 1445 if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) {
1444 // Not a non-entrant nmethod, so find inline_cache 1446 // Not a non-entrant nmethod, so find inline_cache
1445 CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc()); 1447 CompiledIC* inline_cache = CompiledIC_before(((nmethod*)cb), caller_frame.pc());
1446 bool should_be_mono = false; 1448 bool should_be_mono = false;
1447 if (inline_cache->is_optimized()) { 1449 if (inline_cache->is_optimized()) {
1448 if (TraceCallFixup) { 1450 if (TraceCallFixup) {
1449 ResourceMark rm(thread); 1451 ResourceMark rm(thread);
1450 tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc)); 1452 tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1451 callee_method->print_short_name(tty); 1453 callee_method->print_short_name(tty);
1452 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); 1454 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1453 } 1455 }
1454 should_be_mono = true; 1456 should_be_mono = true;
1455 } else { 1457 } else if (inline_cache->is_icholder_call()) {
1456 compiledICHolderOop ic_oop = (compiledICHolderOop) inline_cache->cached_oop(); 1458 CompiledICHolder* ic_oop = inline_cache->cached_icholder();
1457 if ( ic_oop != NULL && ic_oop->is_compiledICHolder()) { 1459 if ( ic_oop != NULL) {
1458 1460
1459 if (receiver()->klass() == ic_oop->holder_klass()) { 1461 if (receiver()->klass() == ic_oop->holder_klass()) {
1460 // This isn't a real miss. We must have seen that compiled code 1462 // This isn't a real miss. We must have seen that compiled code
1461 // is now available and we want the call site converted to a 1463 // is now available and we want the call site converted to a
1462 // monomorphic compiled call site. 1464 // monomorphic compiled call site.
1589 if (is_static_call) { 1591 if (is_static_call) {
1590 CompiledStaticCall* ssc= compiledStaticCall_at(call_addr); 1592 CompiledStaticCall* ssc= compiledStaticCall_at(call_addr);
1591 ssc->set_to_clean(); 1593 ssc->set_to_clean();
1592 } else { 1594 } else {
1593 // compiled, dispatched call (which used to call an interpreted method) 1595 // compiled, dispatched call (which used to call an interpreted method)
1594 CompiledIC* inline_cache = CompiledIC_at(call_addr); 1596 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1595 inline_cache->set_to_clean(); 1597 inline_cache->set_to_clean();
1596 } 1598 }
1597 } 1599 }
1598 } 1600 }
1599 1601
1620 // We are calling the interpreter via a c2i. Normally this would mean that 1622 // We are calling the interpreter via a c2i. Normally this would mean that
1621 // we were called by a compiled method. However we could have lost a race 1623 // we were called by a compiled method. However we could have lost a race
1622 // where we went int -> i2c -> c2i and so the caller could in fact be 1624 // where we went int -> i2c -> c2i and so the caller could in fact be
1623 // interpreted. If the caller is compiled we attempt to patch the caller 1625 // interpreted. If the caller is compiled we attempt to patch the caller
1624 // so he no longer calls into the interpreter. 1626 // so he no longer calls into the interpreter.
1625 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc)) 1627 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
1626 methodOop moop(method); 1628 Method* moop(method);
1627 1629
1628 address entry_point = moop->from_compiled_entry(); 1630 address entry_point = moop->from_compiled_entry();
1629 1631
1630 // It's possible that deoptimization can occur at a call site which hasn't 1632 // It's possible that deoptimization can occur at a call site which hasn't
1631 // been resolved yet, in which case this function will be called from 1633 // been resolved yet, in which case this function will be called from
1632 // an nmethod that has been patched for deopt and we can ignore the 1634 // an nmethod that has been patched for deopt and we can ignore the
1633 // request for a fixup. 1635 // request for a fixup.
1634 // Also it is possible that we lost a race in that from_compiled_entry 1636 // Also it is possible that we lost a race in that from_compiled_entry
1635 // is now back to the i2c in that case we don't need to patch and if 1637 // is now back to the i2c in that case we don't need to patch and if
1636 // we did we'd leap into space because the callsite needs to use 1638 // we did we'd leap into space because the callsite needs to use
1637 // "to interpreter" stub in order to load up the methodOop. Don't 1639 // "to interpreter" stub in order to load up the Method*. Don't
1638 // ask me how I know this... 1640 // ask me how I know this...
1639 1641
1640 CodeBlob* cb = CodeCache::find_blob(caller_pc); 1642 CodeBlob* cb = CodeCache::find_blob(caller_pc);
1641 if (!cb->is_nmethod() || entry_point == moop->get_c2i_entry()) { 1643 if (!cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
1642 return; 1644 return;
1905 static int _size_histogram[MAX_ARITY]; // histogram of arg size in words 1907 static int _size_histogram[MAX_ARITY]; // histogram of arg size in words
1906 static int _max_arity; // max. arity seen 1908 static int _max_arity; // max. arity seen
1907 static int _max_size; // max. arg size seen 1909 static int _max_size; // max. arg size seen
1908 1910
1909 static void add_method_to_histogram(nmethod* nm) { 1911 static void add_method_to_histogram(nmethod* nm) {
1910 methodOop m = nm->method(); 1912 Method* m = nm->method();
1911 ArgumentCount args(m->signature()); 1913 ArgumentCount args(m->signature());
1912 int arity = args.size() + (m->is_static() ? 0 : 1); 1914 int arity = args.size() + (m->is_static() ? 0 : 1);
1913 int argsize = m->size_of_parameters(); 1915 int argsize = m->size_of_parameters();
1914 arity = MIN2(arity, MAX_ARITY-1); 1916 arity = MIN2(arity, MAX_ARITY-1);
1915 argsize = MIN2(argsize, MAX_ARITY-1); 1917 argsize = MIN2(argsize, MAX_ARITY-1);
2798 } 2800 }
2799 2801
2800 // QQQ we could place number of active monitors in the array so that compiled code 2802 // QQQ we could place number of active monitors in the array so that compiled code
2801 // could double check it. 2803 // could double check it.
2802 2804
2803 methodOop moop = fr.interpreter_frame_method(); 2805 Method* moop = fr.interpreter_frame_method();
2804 int max_locals = moop->max_locals(); 2806 int max_locals = moop->max_locals();
2805 // Allocate temp buffer, 1 word per local & 2 per active monitor 2807 // Allocate temp buffer, 1 word per local & 2 per active monitor
2806 int buf_size_words = max_locals + active_monitor_count*2; 2808 int buf_size_words = max_locals + active_monitor_count*2;
2807 intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words, mtCode); 2809 intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words, mtCode);
2808 2810