Mercurial > hg > truffle
comparison src/share/vm/opto/compile.cpp @ 6848:8e47bac5643a
7054512: Compress class pointers after perm gen removal
Summary: support of compress class pointers in the compilers.
Reviewed-by: kvn, twisti
author | roland |
---|---|
date | Tue, 09 Oct 2012 10:11:38 +0200 |
parents | c3e799c37717 |
children | f6badecb7ea7 |
comparison
equal
deleted
inserted
replaced
6847:65d07d9ee446 | 6848:8e47bac5643a |
---|---|
2237 if( n->outcnt() > 1 && | 2237 if( n->outcnt() > 1 && |
2238 !n->is_Proj() && | 2238 !n->is_Proj() && |
2239 nop != Op_CreateEx && | 2239 nop != Op_CreateEx && |
2240 nop != Op_CheckCastPP && | 2240 nop != Op_CheckCastPP && |
2241 nop != Op_DecodeN && | 2241 nop != Op_DecodeN && |
2242 nop != Op_DecodeNKlass && | |
2242 !n->is_Mem() ) { | 2243 !n->is_Mem() ) { |
2243 Node *x = n->clone(); | 2244 Node *x = n->clone(); |
2244 call->set_req( TypeFunc::Parms, x ); | 2245 call->set_req( TypeFunc::Parms, x ); |
2245 } | 2246 } |
2246 } | 2247 } |
2285 case Op_GetAndSetL: | 2286 case Op_GetAndSetL: |
2286 case Op_GetAndSetP: | 2287 case Op_GetAndSetP: |
2287 case Op_GetAndSetN: | 2288 case Op_GetAndSetN: |
2288 case Op_StoreP: | 2289 case Op_StoreP: |
2289 case Op_StoreN: | 2290 case Op_StoreN: |
2291 case Op_StoreNKlass: | |
2290 case Op_LoadB: | 2292 case Op_LoadB: |
2291 case Op_LoadUB: | 2293 case Op_LoadUB: |
2292 case Op_LoadUS: | 2294 case Op_LoadUS: |
2293 case Op_LoadI: | 2295 case Op_LoadI: |
2294 case Op_LoadUI2L: | 2296 case Op_LoadUI2L: |
2319 assert( !addp->is_AddP() || | 2321 assert( !addp->is_AddP() || |
2320 addp->in(AddPNode::Base)->is_top() || // Top OK for allocation | 2322 addp->in(AddPNode::Base)->is_top() || // Top OK for allocation |
2321 addp->in(AddPNode::Base) == n->in(AddPNode::Base), | 2323 addp->in(AddPNode::Base) == n->in(AddPNode::Base), |
2322 "Base pointers must match" ); | 2324 "Base pointers must match" ); |
2323 #ifdef _LP64 | 2325 #ifdef _LP64 |
2324 if (UseCompressedOops && | 2326 if ((UseCompressedOops || UseCompressedKlassPointers) && |
2325 addp->Opcode() == Op_ConP && | 2327 addp->Opcode() == Op_ConP && |
2326 addp == n->in(AddPNode::Base) && | 2328 addp == n->in(AddPNode::Base) && |
2327 n->in(AddPNode::Offset)->is_Con()) { | 2329 n->in(AddPNode::Offset)->is_Con()) { |
2328 // Use addressing with narrow klass to load with offset on x86. | 2330 // Use addressing with narrow klass to load with offset on x86. |
2329 // On sparc loading 32-bits constant and decoding it have less | 2331 // On sparc loading 32-bits constant and decoding it have less |
2330 // instructions (4) then load 64-bits constant (7). | 2332 // instructions (4) then load 64-bits constant (7). |
2331 // Do this transformation here since IGVN will convert ConN back to ConP. | 2333 // Do this transformation here since IGVN will convert ConN back to ConP. |
2332 const Type* t = addp->bottom_type(); | 2334 const Type* t = addp->bottom_type(); |
2333 if (t->isa_oopptr()) { | 2335 if (t->isa_oopptr() || t->isa_klassptr()) { |
2334 Node* nn = NULL; | 2336 Node* nn = NULL; |
2337 | |
2338 int op = t->isa_oopptr() ? Op_ConN : Op_ConNKlass; | |
2335 | 2339 |
2336 // Look for existing ConN node of the same exact type. | 2340 // Look for existing ConN node of the same exact type. |
2337 Compile* C = Compile::current(); | 2341 Compile* C = Compile::current(); |
2338 Node* r = C->root(); | 2342 Node* r = C->root(); |
2339 uint cnt = r->outcnt(); | 2343 uint cnt = r->outcnt(); |
2340 for (uint i = 0; i < cnt; i++) { | 2344 for (uint i = 0; i < cnt; i++) { |
2341 Node* m = r->raw_out(i); | 2345 Node* m = r->raw_out(i); |
2342 if (m!= NULL && m->Opcode() == Op_ConN && | 2346 if (m!= NULL && m->Opcode() == op && |
2343 m->bottom_type()->make_ptr() == t) { | 2347 m->bottom_type()->make_ptr() == t) { |
2344 nn = m; | 2348 nn = m; |
2345 break; | 2349 break; |
2346 } | 2350 } |
2347 } | 2351 } |
2348 if (nn != NULL) { | 2352 if (nn != NULL) { |
2349 // Decode a narrow oop to match address | 2353 // Decode a narrow oop to match address |
2350 // [R12 + narrow_oop_reg<<3 + offset] | 2354 // [R12 + narrow_oop_reg<<3 + offset] |
2351 nn = new (C) DecodeNNode(nn, t); | 2355 if (t->isa_oopptr()) { |
2356 nn = new (C) DecodeNNode(nn, t); | |
2357 } else { | |
2358 nn = new (C) DecodeNKlassNode(nn, t); | |
2359 } | |
2352 n->set_req(AddPNode::Base, nn); | 2360 n->set_req(AddPNode::Base, nn); |
2353 n->set_req(AddPNode::Address, nn); | 2361 n->set_req(AddPNode::Address, nn); |
2354 if (addp->outcnt() == 0) { | 2362 if (addp->outcnt() == 0) { |
2355 addp->disconnect_inputs(NULL); | 2363 addp->disconnect_inputs(NULL); |
2356 } | 2364 } |
2401 break; | 2409 break; |
2402 | 2410 |
2403 case Op_CmpP: | 2411 case Op_CmpP: |
2404 // Do this transformation here to preserve CmpPNode::sub() and | 2412 // Do this transformation here to preserve CmpPNode::sub() and |
2405 // other TypePtr related Ideal optimizations (for example, ptr nullness). | 2413 // other TypePtr related Ideal optimizations (for example, ptr nullness). |
2406 if (n->in(1)->is_DecodeN() || n->in(2)->is_DecodeN()) { | 2414 if (n->in(1)->is_DecodeNarrowPtr() || n->in(2)->is_DecodeNarrowPtr()) { |
2407 Node* in1 = n->in(1); | 2415 Node* in1 = n->in(1); |
2408 Node* in2 = n->in(2); | 2416 Node* in2 = n->in(2); |
2409 if (!in1->is_DecodeN()) { | 2417 if (!in1->is_DecodeNarrowPtr()) { |
2410 in2 = in1; | 2418 in2 = in1; |
2411 in1 = n->in(2); | 2419 in1 = n->in(2); |
2412 } | 2420 } |
2413 assert(in1->is_DecodeN(), "sanity"); | 2421 assert(in1->is_DecodeNarrowPtr(), "sanity"); |
2414 | 2422 |
2415 Compile* C = Compile::current(); | 2423 Compile* C = Compile::current(); |
2416 Node* new_in2 = NULL; | 2424 Node* new_in2 = NULL; |
2417 if (in2->is_DecodeN()) { | 2425 if (in2->is_DecodeNarrowPtr()) { |
2426 assert(in2->Opcode() == in1->Opcode(), "must be same node type"); | |
2418 new_in2 = in2->in(1); | 2427 new_in2 = in2->in(1); |
2419 } else if (in2->Opcode() == Op_ConP) { | 2428 } else if (in2->Opcode() == Op_ConP) { |
2420 const Type* t = in2->bottom_type(); | 2429 const Type* t = in2->bottom_type(); |
2421 if (t == TypePtr::NULL_PTR) { | 2430 if (t == TypePtr::NULL_PTR) { |
2431 assert(in1->is_DecodeN(), "compare klass to null?"); | |
2422 // Don't convert CmpP null check into CmpN if compressed | 2432 // Don't convert CmpP null check into CmpN if compressed |
2423 // oops implicit null check is not generated. | 2433 // oops implicit null check is not generated. |
2424 // This will allow to generate normal oop implicit null check. | 2434 // This will allow to generate normal oop implicit null check. |
2425 if (Matcher::gen_narrow_oop_implicit_null_checks()) | 2435 if (Matcher::gen_narrow_oop_implicit_null_checks()) |
2426 new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR); | 2436 new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR); |
2461 // Load [base_reg + offset], val_reg | 2471 // Load [base_reg + offset], val_reg |
2462 // NullCheck base_reg | 2472 // NullCheck base_reg |
2463 // | 2473 // |
2464 } else if (t->isa_oopptr()) { | 2474 } else if (t->isa_oopptr()) { |
2465 new_in2 = ConNode::make(C, t->make_narrowoop()); | 2475 new_in2 = ConNode::make(C, t->make_narrowoop()); |
2476 } else if (t->isa_klassptr()) { | |
2477 new_in2 = ConNode::make(C, t->make_narrowklass()); | |
2466 } | 2478 } |
2467 } | 2479 } |
2468 if (new_in2 != NULL) { | 2480 if (new_in2 != NULL) { |
2469 Node* cmpN = new (C) CmpNNode(in1->in(1), new_in2); | 2481 Node* cmpN = new (C) CmpNNode(in1->in(1), new_in2); |
2470 n->subsume_by( cmpN ); | 2482 n->subsume_by( cmpN ); |
2477 } | 2489 } |
2478 } | 2490 } |
2479 break; | 2491 break; |
2480 | 2492 |
2481 case Op_DecodeN: | 2493 case Op_DecodeN: |
2482 assert(!n->in(1)->is_EncodeP(), "should be optimized out"); | 2494 case Op_DecodeNKlass: |
2495 assert(!n->in(1)->is_EncodeNarrowPtr(), "should be optimized out"); | |
2483 // DecodeN could be pinned when it can't be fold into | 2496 // DecodeN could be pinned when it can't be fold into |
2484 // an address expression, see the code for Op_CastPP above. | 2497 // an address expression, see the code for Op_CastPP above. |
2485 assert(n->in(0) == NULL || !Matcher::narrow_oop_use_complex_address(), "no control"); | 2498 assert(n->in(0) == NULL || (UseCompressedOops && !Matcher::narrow_oop_use_complex_address()), "no control"); |
2486 break; | 2499 break; |
2487 | 2500 |
2488 case Op_EncodeP: { | 2501 case Op_EncodeP: |
2502 case Op_EncodePKlass: { | |
2489 Node* in1 = n->in(1); | 2503 Node* in1 = n->in(1); |
2490 if (in1->is_DecodeN()) { | 2504 if (in1->is_DecodeNarrowPtr()) { |
2491 n->subsume_by(in1->in(1)); | 2505 n->subsume_by(in1->in(1)); |
2492 } else if (in1->Opcode() == Op_ConP) { | 2506 } else if (in1->Opcode() == Op_ConP) { |
2493 Compile* C = Compile::current(); | 2507 Compile* C = Compile::current(); |
2494 const Type* t = in1->bottom_type(); | 2508 const Type* t = in1->bottom_type(); |
2495 if (t == TypePtr::NULL_PTR) { | 2509 if (t == TypePtr::NULL_PTR) { |
2510 assert(t->isa_oopptr(), "null klass?"); | |
2496 n->subsume_by(ConNode::make(C, TypeNarrowOop::NULL_PTR)); | 2511 n->subsume_by(ConNode::make(C, TypeNarrowOop::NULL_PTR)); |
2497 } else if (t->isa_oopptr()) { | 2512 } else if (t->isa_oopptr()) { |
2498 n->subsume_by(ConNode::make(C, t->make_narrowoop())); | 2513 n->subsume_by(ConNode::make(C, t->make_narrowoop())); |
2514 } else if (t->isa_klassptr()) { | |
2515 n->subsume_by(ConNode::make(C, t->make_narrowklass())); | |
2499 } | 2516 } |
2500 } | 2517 } |
2501 if (in1->outcnt() == 0) { | 2518 if (in1->outcnt() == 0) { |
2502 in1->disconnect_inputs(NULL); | 2519 in1->disconnect_inputs(NULL); |
2503 } | 2520 } |
2527 } | 2544 } |
2528 break; | 2545 break; |
2529 } | 2546 } |
2530 | 2547 |
2531 case Op_Phi: | 2548 case Op_Phi: |
2532 if (n->as_Phi()->bottom_type()->isa_narrowoop()) { | 2549 if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) { |
2533 // The EncodeP optimization may create Phi with the same edges | 2550 // The EncodeP optimization may create Phi with the same edges |
2534 // for all paths. It is not handled well by Register Allocator. | 2551 // for all paths. It is not handled well by Register Allocator. |
2535 Node* unique_in = n->in(1); | 2552 Node* unique_in = n->in(1); |
2536 assert(unique_in != NULL, ""); | 2553 assert(unique_in != NULL, ""); |
2537 uint cnt = n->req(); | 2554 uint cnt = n->req(); |
2690 nstack.pop(); // Shift to the next node on stack | 2707 nstack.pop(); // Shift to the next node on stack |
2691 } | 2708 } |
2692 } | 2709 } |
2693 | 2710 |
2694 // Skip next transformation if compressed oops are not used. | 2711 // Skip next transformation if compressed oops are not used. |
2695 if (!UseCompressedOops || !Matcher::gen_narrow_oop_implicit_null_checks()) | 2712 if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) || |
2713 (!UseCompressedOops && !UseCompressedKlassPointers)) | |
2696 return; | 2714 return; |
2697 | 2715 |
2698 // Go over safepoints nodes to skip DecodeN nodes for debug edges. | 2716 // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges. |
2699 // It could be done for an uncommon traps or any safepoints/calls | 2717 // It could be done for an uncommon traps or any safepoints/calls |
2700 // if the DecodeN node is referenced only in a debug info. | 2718 // if the DecodeN/DecodeNKlass node is referenced only in a debug info. |
2701 while (sfpt.size() > 0) { | 2719 while (sfpt.size() > 0) { |
2702 n = sfpt.pop(); | 2720 n = sfpt.pop(); |
2703 JVMState *jvms = n->as_SafePoint()->jvms(); | 2721 JVMState *jvms = n->as_SafePoint()->jvms(); |
2704 assert(jvms != NULL, "sanity"); | 2722 assert(jvms != NULL, "sanity"); |
2705 int start = jvms->debug_start(); | 2723 int start = jvms->debug_start(); |
2706 int end = n->req(); | 2724 int end = n->req(); |
2707 bool is_uncommon = (n->is_CallStaticJava() && | 2725 bool is_uncommon = (n->is_CallStaticJava() && |
2708 n->as_CallStaticJava()->uncommon_trap_request() != 0); | 2726 n->as_CallStaticJava()->uncommon_trap_request() != 0); |
2709 for (int j = start; j < end; j++) { | 2727 for (int j = start; j < end; j++) { |
2710 Node* in = n->in(j); | 2728 Node* in = n->in(j); |
2711 if (in->is_DecodeN()) { | 2729 if (in->is_DecodeNarrowPtr()) { |
2712 bool safe_to_skip = true; | 2730 bool safe_to_skip = true; |
2713 if (!is_uncommon ) { | 2731 if (!is_uncommon ) { |
2714 // Is it safe to skip? | 2732 // Is it safe to skip? |
2715 for (uint i = 0; i < in->outcnt(); i++) { | 2733 for (uint i = 0; i < in->outcnt(); i++) { |
2716 Node* u = in->raw_out(i); | 2734 Node* u = in->raw_out(i); |