Mercurial > hg > truffle
comparison src/share/vm/opto/callGenerator.cpp @ 10278:6f3fd5150b67
6934604: enable parts of EliminateAutoBox by default
Summary: Resurrected autobox elimination code and enabled part of it by default.
Reviewed-by: roland, twisti
author | kvn |
---|---|
date | Wed, 08 May 2013 15:08:01 -0700 |
parents | 8bd61471a109 |
children | 3213ba4d3dff |
comparison
equal
deleted
inserted
replaced
10277:aabf54ccedb1 | 10278:6f3fd5150b67 |
---|---|
132 | 132 |
133 if (kit.C->log() != NULL) { | 133 if (kit.C->log() != NULL) { |
134 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); | 134 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); |
135 } | 135 } |
136 | 136 |
137 CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(tf(), target, method(), kit.bci()); | 137 CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci()); |
138 _call_node = call; // Save the call node in case we need it later | 138 _call_node = call; // Save the call node in case we need it later |
139 if (!is_static) { | 139 if (!is_static) { |
140 // Make an explicit receiver null_check as part of this call. | 140 // Make an explicit receiver null_check as part of this call. |
141 // Since we share a map with the caller, his JVMS gets adjusted. | 141 // Since we share a map with the caller, his JVMS gets adjusted. |
142 kit.null_check_receiver_before_call(method()); | 142 kit.null_check_receiver_before_call(method()); |
302 | 302 |
303 }; | 303 }; |
304 | 304 |
305 void LateInlineCallGenerator::do_late_inline() { | 305 void LateInlineCallGenerator::do_late_inline() { |
306 // Can't inline it | 306 // Can't inline it |
307 if (call_node() == NULL || call_node()->outcnt() == 0 || | 307 CallStaticJavaNode* call = call_node(); |
308 call_node()->in(0) == NULL || call_node()->in(0)->is_top()) { | 308 if (call == NULL || call->outcnt() == 0 || |
309 call->in(0) == NULL || call->in(0)->is_top()) { | |
309 return; | 310 return; |
310 } | 311 } |
311 | 312 |
312 const TypeTuple *r = call_node()->tf()->domain(); | 313 const TypeTuple *r = call->tf()->domain(); |
313 for (int i1 = 0; i1 < method()->arg_size(); i1++) { | 314 for (int i1 = 0; i1 < method()->arg_size(); i1++) { |
314 if (call_node()->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { | 315 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { |
315 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); | 316 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); |
316 return; | 317 return; |
317 } | 318 } |
318 } | 319 } |
319 | 320 |
320 if (call_node()->in(TypeFunc::Memory)->is_top()) { | 321 if (call->in(TypeFunc::Memory)->is_top()) { |
321 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); | 322 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); |
322 return; | 323 return; |
323 } | 324 } |
324 | 325 |
325 CallStaticJavaNode* call = call_node(); | 326 Compile* C = Compile::current(); |
327 // Remove inlined methods from Compiler's lists. | |
328 if (call->is_macro()) { | |
329 C->remove_macro_node(call); | |
330 } | |
326 | 331 |
327 // Make a clone of the JVMState that appropriate to use for driving a parse | 332 // Make a clone of the JVMState that appropriate to use for driving a parse |
328 Compile* C = Compile::current(); | 333 JVMState* old_jvms = call->jvms(); |
329 JVMState* jvms = call->jvms()->clone_shallow(C); | 334 JVMState* jvms = old_jvms->clone_shallow(C); |
330 uint size = call->req(); | 335 uint size = call->req(); |
331 SafePointNode* map = new (C) SafePointNode(size, jvms); | 336 SafePointNode* map = new (C) SafePointNode(size, jvms); |
332 for (uint i1 = 0; i1 < size; i1++) { | 337 for (uint i1 = 0; i1 < size; i1++) { |
333 map->init_req(i1, call->in(i1)); | 338 map->init_req(i1, call->in(i1)); |
334 } | 339 } |
338 Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory)); | 343 Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory)); |
339 C->initial_gvn()->set_type_bottom(mem); | 344 C->initial_gvn()->set_type_bottom(mem); |
340 map->set_req(TypeFunc::Memory, mem); | 345 map->set_req(TypeFunc::Memory, mem); |
341 } | 346 } |
342 | 347 |
343 // Make enough space for the expression stack and transfer the incoming arguments | 348 uint nargs = method()->arg_size(); |
344 int nargs = method()->arg_size(); | 349 // blow away old call arguments |
350 Node* top = C->top(); | |
351 for (uint i1 = 0; i1 < nargs; i1++) { | |
352 map->set_req(TypeFunc::Parms + i1, top); | |
353 } | |
345 jvms->set_map(map); | 354 jvms->set_map(map); |
355 | |
356 // Make enough space in the expression stack to transfer | |
357 // the incoming arguments and return value. | |
346 map->ensure_stack(jvms, jvms->method()->max_stack()); | 358 map->ensure_stack(jvms, jvms->method()->max_stack()); |
347 if (nargs > 0) { | 359 for (uint i1 = 0; i1 < nargs; i1++) { |
348 for (int i1 = 0; i1 < nargs; i1++) { | 360 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1)); |
349 map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1)); | 361 } |
350 } | 362 |
351 } | 363 // This check is done here because for_method_handle_inline() method |
352 | 364 // needs jvms for inlined state. |
353 if (!do_late_inline_check(jvms)) { | 365 if (!do_late_inline_check(jvms)) { |
354 map->disconnect_inputs(NULL, C); | 366 map->disconnect_inputs(NULL, C); |
355 return; | 367 return; |
356 } | 368 } |
357 | 369 |
478 | 490 |
479 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { | 491 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { |
480 return new LateInlineStringCallGenerator(method, inline_cg); | 492 return new LateInlineStringCallGenerator(method, inline_cg); |
481 } | 493 } |
482 | 494 |
495 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator { | |
496 | |
497 public: | |
498 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : | |
499 LateInlineCallGenerator(method, inline_cg) {} | |
500 | |
501 virtual JVMState* generate(JVMState* jvms) { | |
502 Compile *C = Compile::current(); | |
503 C->print_inlining_skip(this); | |
504 | |
505 C->add_boxing_late_inline(this); | |
506 | |
507 JVMState* new_jvms = DirectCallGenerator::generate(jvms); | |
508 return new_jvms; | |
509 } | |
510 }; | |
511 | |
512 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { | |
513 return new LateInlineBoxingCallGenerator(method, inline_cg); | |
514 } | |
483 | 515 |
484 //---------------------------WarmCallGenerator-------------------------------- | 516 //---------------------------WarmCallGenerator-------------------------------- |
485 // Internal class which handles initial deferral of inlining decisions. | 517 // Internal class which handles initial deferral of inlining decisions. |
486 class WarmCallGenerator : public CallGenerator { | 518 class WarmCallGenerator : public CallGenerator { |
487 WarmCallInfo* _call_info; | 519 WarmCallInfo* _call_info; |