comparison src/share/vm/opto/matcher.cpp @ 14456:abec000618bf

Merge
author kvn
date Tue, 28 Jan 2014 12:25:34 -0800
parents ad6695638a35
children cd5d10655495 62c54fcc0a35
comparison
equal deleted inserted replaced
14269:2a8891e0a082 14456:abec000618bf
51 # include "adfiles/ad_zero.hpp" 51 # include "adfiles/ad_zero.hpp"
52 #endif 52 #endif
53 #ifdef TARGET_ARCH_MODEL_arm 53 #ifdef TARGET_ARCH_MODEL_arm
54 # include "adfiles/ad_arm.hpp" 54 # include "adfiles/ad_arm.hpp"
55 #endif 55 #endif
56 #ifdef TARGET_ARCH_MODEL_ppc 56 #ifdef TARGET_ARCH_MODEL_ppc_32
57 # include "adfiles/ad_ppc.hpp" 57 # include "adfiles/ad_ppc_32.hpp"
58 #endif
59 #ifdef TARGET_ARCH_MODEL_ppc_64
60 # include "adfiles/ad_ppc_64.hpp"
58 #endif 61 #endif
59 62
60 OptoReg::Name OptoReg::c_frame_pointer; 63 OptoReg::Name OptoReg::c_frame_pointer;
61 64
62 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf]; 65 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
840 // Share frame pointer while making spill ops 843 // Share frame pointer while making spill ops
841 set_shared(fp); 844 set_shared(fp);
842 845
843 // Compute generic short-offset Loads 846 // Compute generic short-offset Loads
844 #ifdef _LP64 847 #ifdef _LP64
845 MachNode *spillCP = match_tree(new (C) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM)); 848 MachNode *spillCP = match_tree(new (C) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
846 #endif 849 #endif
847 MachNode *spillI = match_tree(new (C) LoadINode(NULL,mem,fp,atp)); 850 MachNode *spillI = match_tree(new (C) LoadINode(NULL,mem,fp,atp,TypeInt::INT,MemNode::unordered));
848 MachNode *spillL = match_tree(new (C) LoadLNode(NULL,mem,fp,atp)); 851 MachNode *spillL = match_tree(new (C) LoadLNode(NULL,mem,fp,atp,TypeLong::LONG,MemNode::unordered,false));
849 MachNode *spillF = match_tree(new (C) LoadFNode(NULL,mem,fp,atp)); 852 MachNode *spillF = match_tree(new (C) LoadFNode(NULL,mem,fp,atp,Type::FLOAT,MemNode::unordered));
850 MachNode *spillD = match_tree(new (C) LoadDNode(NULL,mem,fp,atp)); 853 MachNode *spillD = match_tree(new (C) LoadDNode(NULL,mem,fp,atp,Type::DOUBLE,MemNode::unordered));
851 MachNode *spillP = match_tree(new (C) LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM)); 854 MachNode *spillP = match_tree(new (C) LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
852 assert(spillI != NULL && spillL != NULL && spillF != NULL && 855 assert(spillI != NULL && spillL != NULL && spillF != NULL &&
853 spillD != NULL && spillP != NULL, ""); 856 spillD != NULL && spillP != NULL, "");
854
855 // Get the ADLC notion of the right regmask, for each basic type. 857 // Get the ADLC notion of the right regmask, for each basic type.
856 #ifdef _LP64 858 #ifdef _LP64
857 idealreg2regmask[Op_RegN] = &spillCP->out_RegMask(); 859 idealreg2regmask[Op_RegN] = &spillCP->out_RegMask();
858 #endif 860 #endif
859 idealreg2regmask[Op_RegI] = &spillI->out_RegMask(); 861 idealreg2regmask[Op_RegI] = &spillI->out_RegMask();
1334 for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) { 1336 for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1335 jvms->set_map(sfpt); 1337 jvms->set_map(sfpt);
1336 } 1338 }
1337 1339
1338 // Debug inputs begin just after the last incoming parameter 1340 // Debug inputs begin just after the last incoming parameter
1339 assert( (mcall == NULL) || (mcall->jvms() == NULL) || 1341 assert((mcall == NULL) || (mcall->jvms() == NULL) ||
1340 (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "" ); 1342 (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "");
1341 1343
1342 // Move the OopMap 1344 // Move the OopMap
1343 msfpt->_oop_map = sfpt->_oop_map; 1345 msfpt->_oop_map = sfpt->_oop_map;
1346
1347 // Add additional edges.
1348 if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
1349 // For these calls we can not add MachConstantBase in expand(), as the
1350 // ins are not complete then.
1351 msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
1352 if (msfpt->jvms() &&
1353 msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
1354 // We added an edge before jvms, so we must adapt the position of the ins.
1355 msfpt->jvms()->adapt_position(+1);
1356 }
1357 }
1344 1358
1345 // Registers killed by the call are set in the local scheduling pass 1359 // Registers killed by the call are set in the local scheduling pass
1346 // of Global Code Motion. 1360 // of Global Code Motion.
1347 return msfpt; 1361 return msfpt;
1348 } 1362 }
2329 // intervening volatile load, and thus we don't need a barrier here. 2343 // intervening volatile load, and thus we don't need a barrier here.
2330 // We retain the Node to act as a compiler ordering barrier. 2344 // We retain the Node to act as a compiler ordering barrier.
2331 bool Matcher::post_store_load_barrier(const Node* vmb) { 2345 bool Matcher::post_store_load_barrier(const Node* vmb) {
2332 Compile* C = Compile::current(); 2346 Compile* C = Compile::current();
2333 assert(vmb->is_MemBar(), ""); 2347 assert(vmb->is_MemBar(), "");
2334 assert(vmb->Opcode() != Op_MemBarAcquire, ""); 2348 assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, "");
2335 const MemBarNode* membar = vmb->as_MemBar(); 2349 const MemBarNode* membar = vmb->as_MemBar();
2336 2350
2337 // Get the Ideal Proj node, ctrl, that can be used to iterate forward 2351 // Get the Ideal Proj node, ctrl, that can be used to iterate forward
2338 Node* ctrl = NULL; 2352 Node* ctrl = NULL;
2339 for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) { 2353 for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
2374 } 2388 }
2375 2389
2376 if (x->is_MemBar()) { 2390 if (x->is_MemBar()) {
2377 // We must retain this membar if there is an upcoming volatile 2391 // We must retain this membar if there is an upcoming volatile
2378 // load, which will be followed by acquire membar. 2392 // load, which will be followed by acquire membar.
2379 if (xop == Op_MemBarAcquire) { 2393 if (xop == Op_MemBarAcquire || xop == Op_LoadFence) {
2380 return false; 2394 return false;
2381 } else { 2395 } else {
2382 // For other kinds of barriers, check by pretending we 2396 // For other kinds of barriers, check by pretending we
2383 // are them, and seeing if we can be removed. 2397 // are them, and seeing if we can be removed.
2384 return post_store_load_barrier(x->as_MemBar()); 2398 return post_store_load_barrier(x->as_MemBar());
2388 // probably not necessary to check for these 2402 // probably not necessary to check for these
2389 if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) { 2403 if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
2390 return false; 2404 return false;
2391 } 2405 }
2392 } 2406 }
2407 return false;
2408 }
2409
2410 // Check whether node n is a branch to an uncommon trap that we could
2411 // optimize as test with very high branch costs in case of going to
2412 // the uncommon trap. The code must be able to be recompiled to use
2413 // a cheaper test.
2414 bool Matcher::branches_to_uncommon_trap(const Node *n) {
2415 // Don't do it for natives, adapters, or runtime stubs
2416 Compile *C = Compile::current();
2417 if (!C->is_method_compilation()) return false;
2418
2419 assert(n->is_If(), "You should only call this on if nodes.");
2420 IfNode *ifn = n->as_If();
2421
2422 Node *ifFalse = NULL;
2423 for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
2424 if (ifn->fast_out(i)->is_IfFalse()) {
2425 ifFalse = ifn->fast_out(i);
2426 break;
2427 }
2428 }
2429 assert(ifFalse, "An If should have an ifFalse. Graph is broken.");
2430
2431 Node *reg = ifFalse;
2432 int cnt = 4; // We must protect against cycles. Limit to 4 iterations.
2433 // Alternatively use visited set? Seems too expensive.
2434 while (reg != NULL && cnt > 0) {
2435 CallNode *call = NULL;
2436 RegionNode *nxt_reg = NULL;
2437 for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
2438 Node *o = reg->fast_out(i);
2439 if (o->is_Call()) {
2440 call = o->as_Call();
2441 }
2442 if (o->is_Region()) {
2443 nxt_reg = o->as_Region();
2444 }
2445 }
2446
2447 if (call &&
2448 call->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
2449 const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
2450 if (trtype->isa_int() && trtype->is_int()->is_con()) {
2451 jint tr_con = trtype->is_int()->get_con();
2452 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
2453 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
2454 assert((int)reason < (int)BitsPerInt, "recode bit map");
2455
2456 if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
2457 && action != Deoptimization::Action_none) {
2458 // This uncommon trap is sure to recompile, eventually.
2459 // When that happens, C->too_many_traps will prevent
2460 // this transformation from happening again.
2461 return true;
2462 }
2463 }
2464 }
2465
2466 reg = nxt_reg;
2467 cnt--;
2468 }
2469
2393 return false; 2470 return false;
2394 } 2471 }
2395 2472
2396 //============================================================================= 2473 //=============================================================================
2397 //---------------------------State--------------------------------------------- 2474 //---------------------------State---------------------------------------------