comparison src/share/vm/opto/matcher.cpp @ 17812:a7d4d4655766

Merge
author kvn
date Wed, 26 Mar 2014 18:21:05 -0700
parents 62c54fcc0a35
children 78bbf4d43a14
comparison
equal deleted inserted replaced
17789:6b207d038106 17812:a7d4d4655766
51 # include "adfiles/ad_zero.hpp" 51 # include "adfiles/ad_zero.hpp"
52 #endif 52 #endif
53 #ifdef TARGET_ARCH_MODEL_arm 53 #ifdef TARGET_ARCH_MODEL_arm
54 # include "adfiles/ad_arm.hpp" 54 # include "adfiles/ad_arm.hpp"
55 #endif 55 #endif
56 #ifdef TARGET_ARCH_MODEL_ppc 56 #ifdef TARGET_ARCH_MODEL_ppc_32
57 # include "adfiles/ad_ppc.hpp" 57 # include "adfiles/ad_ppc_32.hpp"
58 #endif
59 #ifdef TARGET_ARCH_MODEL_ppc_64
60 # include "adfiles/ad_ppc_64.hpp"
58 #endif 61 #endif
59 62
60 OptoReg::Name OptoReg::c_frame_pointer; 63 OptoReg::Name OptoReg::c_frame_pointer;
61 64
62 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf]; 65 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
840 // Share frame pointer while making spill ops 843 // Share frame pointer while making spill ops
841 set_shared(fp); 844 set_shared(fp);
842 845
843 // Compute generic short-offset Loads 846 // Compute generic short-offset Loads
844 #ifdef _LP64 847 #ifdef _LP64
845 MachNode *spillCP = match_tree(new (C) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM)); 848 MachNode *spillCP = match_tree(new (C) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
846 #endif 849 #endif
847 MachNode *spillI = match_tree(new (C) LoadINode(NULL,mem,fp,atp)); 850 MachNode *spillI = match_tree(new (C) LoadINode(NULL,mem,fp,atp,TypeInt::INT,MemNode::unordered));
848 MachNode *spillL = match_tree(new (C) LoadLNode(NULL,mem,fp,atp)); 851 MachNode *spillL = match_tree(new (C) LoadLNode(NULL,mem,fp,atp,TypeLong::LONG,MemNode::unordered,false));
849 MachNode *spillF = match_tree(new (C) LoadFNode(NULL,mem,fp,atp)); 852 MachNode *spillF = match_tree(new (C) LoadFNode(NULL,mem,fp,atp,Type::FLOAT,MemNode::unordered));
850 MachNode *spillD = match_tree(new (C) LoadDNode(NULL,mem,fp,atp)); 853 MachNode *spillD = match_tree(new (C) LoadDNode(NULL,mem,fp,atp,Type::DOUBLE,MemNode::unordered));
851 MachNode *spillP = match_tree(new (C) LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM)); 854 MachNode *spillP = match_tree(new (C) LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
852 assert(spillI != NULL && spillL != NULL && spillF != NULL && 855 assert(spillI != NULL && spillL != NULL && spillF != NULL &&
853 spillD != NULL && spillP != NULL, ""); 856 spillD != NULL && spillP != NULL, "");
854
855 // Get the ADLC notion of the right regmask, for each basic type. 857 // Get the ADLC notion of the right regmask, for each basic type.
856 #ifdef _LP64 858 #ifdef _LP64
857 idealreg2regmask[Op_RegN] = &spillCP->out_RegMask(); 859 idealreg2regmask[Op_RegN] = &spillCP->out_RegMask();
858 #endif 860 #endif
859 idealreg2regmask[Op_RegI] = &spillI->out_RegMask(); 861 idealreg2regmask[Op_RegI] = &spillI->out_RegMask();
1334 for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) { 1336 for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1335 jvms->set_map(sfpt); 1337 jvms->set_map(sfpt);
1336 } 1338 }
1337 1339
1338 // Debug inputs begin just after the last incoming parameter 1340 // Debug inputs begin just after the last incoming parameter
1339 assert( (mcall == NULL) || (mcall->jvms() == NULL) || 1341 assert((mcall == NULL) || (mcall->jvms() == NULL) ||
1340 (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "" ); 1342 (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "");
1341 1343
1342 // Move the OopMap 1344 // Move the OopMap
1343 msfpt->_oop_map = sfpt->_oop_map; 1345 msfpt->_oop_map = sfpt->_oop_map;
1346
1347 // Add additional edges.
1348 if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
1349 // For these calls we can not add MachConstantBase in expand(), as the
1350 // ins are not complete then.
1351 msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
1352 if (msfpt->jvms() &&
1353 msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
1354 // We added an edge before jvms, so we must adapt the position of the ins.
1355 msfpt->jvms()->adapt_position(+1);
1356 }
1357 }
1344 1358
1345 // Registers killed by the call are set in the local scheduling pass 1359 // Registers killed by the call are set in the local scheduling pass
1346 // of Global Code Motion. 1360 // of Global Code Motion.
1347 return msfpt; 1361 return msfpt;
1348 } 1362 }
2435 // intervening volatile load, and thus we don't need a barrier here. 2449 // intervening volatile load, and thus we don't need a barrier here.
2436 // We retain the Node to act as a compiler ordering barrier. 2450 // We retain the Node to act as a compiler ordering barrier.
2437 bool Matcher::post_store_load_barrier(const Node* vmb) { 2451 bool Matcher::post_store_load_barrier(const Node* vmb) {
2438 Compile* C = Compile::current(); 2452 Compile* C = Compile::current();
2439 assert(vmb->is_MemBar(), ""); 2453 assert(vmb->is_MemBar(), "");
2440 assert(vmb->Opcode() != Op_MemBarAcquire, ""); 2454 assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, "");
2441 const MemBarNode* membar = vmb->as_MemBar(); 2455 const MemBarNode* membar = vmb->as_MemBar();
2442 2456
2443 // Get the Ideal Proj node, ctrl, that can be used to iterate forward 2457 // Get the Ideal Proj node, ctrl, that can be used to iterate forward
2444 Node* ctrl = NULL; 2458 Node* ctrl = NULL;
2445 for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) { 2459 for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
2480 } 2494 }
2481 2495
2482 if (x->is_MemBar()) { 2496 if (x->is_MemBar()) {
2483 // We must retain this membar if there is an upcoming volatile 2497 // We must retain this membar if there is an upcoming volatile
2484 // load, which will be followed by acquire membar. 2498 // load, which will be followed by acquire membar.
2485 if (xop == Op_MemBarAcquire) { 2499 if (xop == Op_MemBarAcquire || xop == Op_LoadFence) {
2486 return false; 2500 return false;
2487 } else { 2501 } else {
2488 // For other kinds of barriers, check by pretending we 2502 // For other kinds of barriers, check by pretending we
2489 // are them, and seeing if we can be removed. 2503 // are them, and seeing if we can be removed.
2490 return post_store_load_barrier(x->as_MemBar()); 2504 return post_store_load_barrier(x->as_MemBar());
2494 // probably not necessary to check for these 2508 // probably not necessary to check for these
2495 if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) { 2509 if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
2496 return false; 2510 return false;
2497 } 2511 }
2498 } 2512 }
2513 return false;
2514 }
2515
2516 // Check whether node n is a branch to an uncommon trap that we could
2517 // optimize as test with very high branch costs in case of going to
2518 // the uncommon trap. The code must be able to be recompiled to use
2519 // a cheaper test.
2520 bool Matcher::branches_to_uncommon_trap(const Node *n) {
2521 // Don't do it for natives, adapters, or runtime stubs
2522 Compile *C = Compile::current();
2523 if (!C->is_method_compilation()) return false;
2524
2525 assert(n->is_If(), "You should only call this on if nodes.");
2526 IfNode *ifn = n->as_If();
2527
2528 Node *ifFalse = NULL;
2529 for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
2530 if (ifn->fast_out(i)->is_IfFalse()) {
2531 ifFalse = ifn->fast_out(i);
2532 break;
2533 }
2534 }
2535 assert(ifFalse, "An If should have an ifFalse. Graph is broken.");
2536
2537 Node *reg = ifFalse;
2538 int cnt = 4; // We must protect against cycles. Limit to 4 iterations.
2539 // Alternatively use visited set? Seems too expensive.
2540 while (reg != NULL && cnt > 0) {
2541 CallNode *call = NULL;
2542 RegionNode *nxt_reg = NULL;
2543 for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
2544 Node *o = reg->fast_out(i);
2545 if (o->is_Call()) {
2546 call = o->as_Call();
2547 }
2548 if (o->is_Region()) {
2549 nxt_reg = o->as_Region();
2550 }
2551 }
2552
2553 if (call &&
2554 call->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
2555 const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
2556 if (trtype->isa_int() && trtype->is_int()->is_con()) {
2557 jint tr_con = trtype->is_int()->get_con();
2558 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
2559 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
2560 assert((int)reason < (int)BitsPerInt, "recode bit map");
2561
2562 if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
2563 && action != Deoptimization::Action_none) {
2564 // This uncommon trap is sure to recompile, eventually.
2565 // When that happens, C->too_many_traps will prevent
2566 // this transformation from happening again.
2567 return true;
2568 }
2569 }
2570 }
2571
2572 reg = nxt_reg;
2573 cnt--;
2574 }
2575
2499 return false; 2576 return false;
2500 } 2577 }
2501 2578
2502 //============================================================================= 2579 //=============================================================================
2503 //---------------------------State--------------------------------------------- 2580 //---------------------------State---------------------------------------------