comparison src/share/vm/opto/matcher.cpp @ 14909:4ca6dc0799b6

Backout jdk9 merge
author Gilles Duboscq <duboscq@ssw.jku.at>
date Tue, 01 Apr 2014 13:57:07 +0200
parents cd5d10655495
children 52b4284cb496
comparison
equal deleted inserted replaced
14908:8db6e76cb658 14909:4ca6dc0799b6
51 # include "adfiles/ad_zero.hpp" 51 # include "adfiles/ad_zero.hpp"
52 #endif 52 #endif
53 #ifdef TARGET_ARCH_MODEL_arm 53 #ifdef TARGET_ARCH_MODEL_arm
54 # include "adfiles/ad_arm.hpp" 54 # include "adfiles/ad_arm.hpp"
55 #endif 55 #endif
56 #ifdef TARGET_ARCH_MODEL_ppc_32 56 #ifdef TARGET_ARCH_MODEL_ppc
57 # include "adfiles/ad_ppc_32.hpp" 57 # include "adfiles/ad_ppc.hpp"
58 #endif
59 #ifdef TARGET_ARCH_MODEL_ppc_64
60 # include "adfiles/ad_ppc_64.hpp"
61 #endif 58 #endif
62 59
63 OptoReg::Name OptoReg::c_frame_pointer; 60 OptoReg::Name OptoReg::c_frame_pointer;
64 61
65 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf]; 62 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
843 // Share frame pointer while making spill ops 840 // Share frame pointer while making spill ops
844 set_shared(fp); 841 set_shared(fp);
845 842
846 // Compute generic short-offset Loads 843 // Compute generic short-offset Loads
847 #ifdef _LP64 844 #ifdef _LP64
848 MachNode *spillCP = match_tree(new (C) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered)); 845 MachNode *spillCP = match_tree(new (C) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM));
849 #endif 846 #endif
850 MachNode *spillI = match_tree(new (C) LoadINode(NULL,mem,fp,atp,TypeInt::INT,MemNode::unordered)); 847 MachNode *spillI = match_tree(new (C) LoadINode(NULL,mem,fp,atp));
851 MachNode *spillL = match_tree(new (C) LoadLNode(NULL,mem,fp,atp,TypeLong::LONG,MemNode::unordered,false)); 848 MachNode *spillL = match_tree(new (C) LoadLNode(NULL,mem,fp,atp));
852 MachNode *spillF = match_tree(new (C) LoadFNode(NULL,mem,fp,atp,Type::FLOAT,MemNode::unordered)); 849 MachNode *spillF = match_tree(new (C) LoadFNode(NULL,mem,fp,atp));
853 MachNode *spillD = match_tree(new (C) LoadDNode(NULL,mem,fp,atp,Type::DOUBLE,MemNode::unordered)); 850 MachNode *spillD = match_tree(new (C) LoadDNode(NULL,mem,fp,atp));
854 MachNode *spillP = match_tree(new (C) LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered)); 851 MachNode *spillP = match_tree(new (C) LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM));
855 assert(spillI != NULL && spillL != NULL && spillF != NULL && 852 assert(spillI != NULL && spillL != NULL && spillF != NULL &&
856 spillD != NULL && spillP != NULL, ""); 853 spillD != NULL && spillP != NULL, "");
854
857 // Get the ADLC notion of the right regmask, for each basic type. 855 // Get the ADLC notion of the right regmask, for each basic type.
858 #ifdef _LP64 856 #ifdef _LP64
859 idealreg2regmask[Op_RegN] = &spillCP->out_RegMask(); 857 idealreg2regmask[Op_RegN] = &spillCP->out_RegMask();
860 #endif 858 #endif
861 idealreg2regmask[Op_RegI] = &spillI->out_RegMask(); 859 idealreg2regmask[Op_RegI] = &spillI->out_RegMask();
1336 for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) { 1334 for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1337 jvms->set_map(sfpt); 1335 jvms->set_map(sfpt);
1338 } 1336 }
1339 1337
1340 // Debug inputs begin just after the last incoming parameter 1338 // Debug inputs begin just after the last incoming parameter
1341 assert((mcall == NULL) || (mcall->jvms() == NULL) || 1339 assert( (mcall == NULL) || (mcall->jvms() == NULL) ||
1342 (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), ""); 1340 (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "" );
1343 1341
1344 // Move the OopMap 1342 // Move the OopMap
1345 msfpt->_oop_map = sfpt->_oop_map; 1343 msfpt->_oop_map = sfpt->_oop_map;
1346
1347 // Add additional edges.
1348 if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
1349 // For these calls we can not add MachConstantBase in expand(), as the
1350 // ins are not complete then.
1351 msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
1352 if (msfpt->jvms() &&
1353 msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
1354 // We added an edge before jvms, so we must adapt the position of the ins.
1355 msfpt->jvms()->adapt_position(+1);
1356 }
1357 }
1358 1344
1359 // Registers killed by the call are set in the local scheduling pass 1345 // Registers killed by the call are set in the local scheduling pass
1360 // of Global Code Motion. 1346 // of Global Code Motion.
1361 return msfpt; 1347 return msfpt;
1362 } 1348 }
1996 case Op_MachProj: 1982 case Op_MachProj:
1997 case Op_MergeMem: 1983 case Op_MergeMem:
1998 case Op_Catch: 1984 case Op_Catch:
1999 case Op_CatchProj: 1985 case Op_CatchProj:
2000 case Op_CProj: 1986 case Op_CProj:
1987 case Op_FlagsProj:
2001 case Op_JumpProj: 1988 case Op_JumpProj:
2002 case Op_JProj: 1989 case Op_JProj:
2003 case Op_NeverBranch: 1990 case Op_NeverBranch:
2004 set_dontcare(n); 1991 set_dontcare(n);
2005 break; 1992 break;
2342 // intervening volatile load, and thus we don't need a barrier here. 2329 // intervening volatile load, and thus we don't need a barrier here.
2343 // We retain the Node to act as a compiler ordering barrier. 2330 // We retain the Node to act as a compiler ordering barrier.
2344 bool Matcher::post_store_load_barrier(const Node* vmb) { 2331 bool Matcher::post_store_load_barrier(const Node* vmb) {
2345 Compile* C = Compile::current(); 2332 Compile* C = Compile::current();
2346 assert(vmb->is_MemBar(), ""); 2333 assert(vmb->is_MemBar(), "");
2347 assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, ""); 2334 assert(vmb->Opcode() != Op_MemBarAcquire, "");
2348 const MemBarNode* membar = vmb->as_MemBar(); 2335 const MemBarNode* membar = vmb->as_MemBar();
2349 2336
2350 // Get the Ideal Proj node, ctrl, that can be used to iterate forward 2337 // Get the Ideal Proj node, ctrl, that can be used to iterate forward
2351 Node* ctrl = NULL; 2338 Node* ctrl = NULL;
2352 for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) { 2339 for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
2387 } 2374 }
2388 2375
2389 if (x->is_MemBar()) { 2376 if (x->is_MemBar()) {
2390 // We must retain this membar if there is an upcoming volatile 2377 // We must retain this membar if there is an upcoming volatile
2391 // load, which will be followed by acquire membar. 2378 // load, which will be followed by acquire membar.
2392 if (xop == Op_MemBarAcquire || xop == Op_LoadFence) { 2379 if (xop == Op_MemBarAcquire) {
2393 return false; 2380 return false;
2394 } else { 2381 } else {
2395 // For other kinds of barriers, check by pretending we 2382 // For other kinds of barriers, check by pretending we
2396 // are them, and seeing if we can be removed. 2383 // are them, and seeing if we can be removed.
2397 return post_store_load_barrier(x->as_MemBar()); 2384 return post_store_load_barrier(x->as_MemBar());
2401 // probably not necessary to check for these 2388 // probably not necessary to check for these
2402 if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) { 2389 if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
2403 return false; 2390 return false;
2404 } 2391 }
2405 } 2392 }
2406 return false;
2407 }
2408
2409 // Check whether node n is a branch to an uncommon trap that we could
2410 // optimize as test with very high branch costs in case of going to
2411 // the uncommon trap. The code must be able to be recompiled to use
2412 // a cheaper test.
2413 bool Matcher::branches_to_uncommon_trap(const Node *n) {
2414 // Don't do it for natives, adapters, or runtime stubs
2415 Compile *C = Compile::current();
2416 if (!C->is_method_compilation()) return false;
2417
2418 assert(n->is_If(), "You should only call this on if nodes.");
2419 IfNode *ifn = n->as_If();
2420
2421 Node *ifFalse = NULL;
2422 for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
2423 if (ifn->fast_out(i)->is_IfFalse()) {
2424 ifFalse = ifn->fast_out(i);
2425 break;
2426 }
2427 }
2428 assert(ifFalse, "An If should have an ifFalse. Graph is broken.");
2429
2430 Node *reg = ifFalse;
2431 int cnt = 4; // We must protect against cycles. Limit to 4 iterations.
2432 // Alternatively use visited set? Seems too expensive.
2433 while (reg != NULL && cnt > 0) {
2434 CallNode *call = NULL;
2435 RegionNode *nxt_reg = NULL;
2436 for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
2437 Node *o = reg->fast_out(i);
2438 if (o->is_Call()) {
2439 call = o->as_Call();
2440 }
2441 if (o->is_Region()) {
2442 nxt_reg = o->as_Region();
2443 }
2444 }
2445
2446 if (call &&
2447 call->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
2448 const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
2449 if (trtype->isa_int() && trtype->is_int()->is_con()) {
2450 jint tr_con = trtype->is_int()->get_con();
2451 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
2452 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
2453 assert((int)reason < (int)BitsPerInt, "recode bit map");
2454
2455 if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
2456 && action != Deoptimization::Action_none) {
2457 // This uncommon trap is sure to recompile, eventually.
2458 // When that happens, C->too_many_traps will prevent
2459 // this transformation from happening again.
2460 return true;
2461 }
2462 }
2463 }
2464
2465 reg = nxt_reg;
2466 cnt--;
2467 }
2468
2469 return false; 2393 return false;
2470 } 2394 }
2471 2395
2472 //============================================================================= 2396 //=============================================================================
2473 //---------------------------State--------------------------------------------- 2397 //---------------------------State---------------------------------------------