comparison src/share/vm/opto/graphKit.cpp @ 10408:836a62f43af9

Merge with http://hg.openjdk.java.net/hsx/hsx25/hotspot/
author Doug Simon <doug.simon@oracle.com>
date Wed, 19 Jun 2013 10:45:56 +0200
parents b9a918201d47 6f3fd5150b67
children 6b0fd0964b87
comparison
equal deleted inserted replaced
10086:e0fb8a213650 10408:836a62f43af9
331 JVMState* ex_jvms = ex_map->_jvms; 331 JVMState* ex_jvms = ex_map->_jvms;
332 assert(ex_jvms->same_calls_as(phi_map->_jvms), "consistent call chains"); 332 assert(ex_jvms->same_calls_as(phi_map->_jvms), "consistent call chains");
333 assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals"); 333 assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals");
334 assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes"); 334 assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes");
335 assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS"); 335 assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS");
336 assert(ex_jvms->scloff() == phi_map->_jvms->scloff(), "matching scalar replaced objects");
336 assert(ex_map->req() == phi_map->req(), "matching maps"); 337 assert(ex_map->req() == phi_map->req(), "matching maps");
337 uint tos = ex_jvms->stkoff() + ex_jvms->sp(); 338 uint tos = ex_jvms->stkoff() + ex_jvms->sp();
338 Node* hidden_merge_mark = root(); 339 Node* hidden_merge_mark = root();
339 Node* region = phi_map->control(); 340 Node* region = phi_map->control();
340 MergeMemNode* phi_mem = phi_map->merged_memory(); 341 MergeMemNode* phi_mem = phi_map->merged_memory();
407 phi_map->set_req(i, dst); 408 phi_map->set_req(i, dst);
408 // Prepare to append interesting stuff onto the new phi: 409 // Prepare to append interesting stuff onto the new phi:
409 while (dst->req() > orig_width) dst->del_req(dst->req()-1); 410 while (dst->req() > orig_width) dst->del_req(dst->req()-1);
410 } else { 411 } else {
411 assert(dst->is_Phi(), "nobody else uses a hidden region"); 412 assert(dst->is_Phi(), "nobody else uses a hidden region");
412 phi = (PhiNode*)dst; 413 phi = dst->as_Phi();
413 } 414 }
414 if (add_multiple && src->in(0) == ex_control) { 415 if (add_multiple && src->in(0) == ex_control) {
415 // Both are phis. 416 // Both are phis.
416 add_n_reqs(dst, src); 417 add_n_reqs(dst, src);
417 } else { 418 } else {
1436 if (require_atomic_access && bt == T_LONG) { 1437 if (require_atomic_access && bt == T_LONG) {
1437 ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t); 1438 ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t);
1438 } else { 1439 } else {
1439 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt); 1440 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt);
1440 } 1441 }
1441 return _gvn.transform(ld); 1442 ld = _gvn.transform(ld);
1443 if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
1444 // Improve graph before escape analysis and boxing elimination.
1445 record_for_igvn(ld);
1446 }
1447 return ld;
1442 } 1448 }
1443 1449
1444 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, 1450 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1445 int adr_idx, 1451 int adr_idx,
1446 bool require_atomic_access) { 1452 bool require_atomic_access) {
3142 // since GC and deoptimization can happened. 3148 // since GC and deoptimization can happened.
3143 Node *mem = reset_memory(); 3149 Node *mem = reset_memory();
3144 set_all_memory(mem); // Create new memory state 3150 set_all_memory(mem); // Create new memory state
3145 3151
3146 AllocateNode* alloc 3152 AllocateNode* alloc
3147 = new (C) AllocateNode(C, AllocateNode::alloc_type(), 3153 = new (C) AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
3148 control(), mem, i_o(), 3154 control(), mem, i_o(),
3149 size, klass_node, 3155 size, klass_node,
3150 initial_slow_test); 3156 initial_slow_test);
3151 3157
3152 return set_output_for_allocation(alloc, oop_type); 3158 return set_output_for_allocation(alloc, oop_type);
3283 Node *mem = reset_memory(); 3289 Node *mem = reset_memory();
3284 set_all_memory(mem); // Create new memory state 3290 set_all_memory(mem); // Create new memory state
3285 3291
3286 // Create the AllocateArrayNode and its result projections 3292 // Create the AllocateArrayNode and its result projections
3287 AllocateArrayNode* alloc 3293 AllocateArrayNode* alloc
3288 = new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(), 3294 = new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
3289 control(), mem, i_o(), 3295 control(), mem, i_o(),
3290 size, klass_node, 3296 size, klass_node,
3291 initial_slow_test, 3297 initial_slow_test,
3292 length); 3298 length);
3293 3299
3324 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode. 3330 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
3325 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) { 3331 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
3326 if (ptr == NULL) { // reduce dumb test in callers 3332 if (ptr == NULL) { // reduce dumb test in callers
3327 return NULL; 3333 return NULL;
3328 } 3334 }
3329 if (ptr->is_CheckCastPP()) { // strip a raw-to-oop cast 3335 ptr = ptr->uncast(); // strip a raw-to-oop cast
3330 ptr = ptr->in(1); 3336 if (ptr == NULL) return NULL;
3331 if (ptr == NULL) return NULL; 3337
3332 }
3333 if (ptr->is_Proj()) { 3338 if (ptr->is_Proj()) {
3334 Node* allo = ptr->in(0); 3339 Node* allo = ptr->in(0);
3335 if (allo != NULL && allo->is_Allocate()) { 3340 if (allo != NULL && allo->is_Allocate()) {
3336 return allo->as_Allocate(); 3341 return allo->as_Allocate();
3337 } 3342 }
3367 for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) { 3372 for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
3368 Node* init = rawoop->fast_out(i); 3373 Node* init = rawoop->fast_out(i);
3369 if (init->is_Initialize()) { 3374 if (init->is_Initialize()) {
3370 assert(init->as_Initialize()->allocation() == this, "2-way link"); 3375 assert(init->as_Initialize()->allocation() == this, "2-way link");
3371 return init->as_Initialize(); 3376 return init->as_Initialize();
3372 }
3373 }
3374 return NULL;
3375 }
3376
3377 // Trace Allocate -> Proj[Parm] -> MemBarStoreStore
3378 MemBarStoreStoreNode* AllocateNode::storestore() {
3379 ProjNode* rawoop = proj_out(AllocateNode::RawAddress);
3380 if (rawoop == NULL) return NULL;
3381 for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
3382 Node* storestore = rawoop->fast_out(i);
3383 if (storestore->is_MemBarStoreStore()) {
3384 return storestore->as_MemBarStoreStore();
3385 } 3377 }
3386 } 3378 }
3387 return NULL; 3379 return NULL;
3388 } 3380 }
3389 3381
3562 3554
3563 Node* tls = __ thread(); // ThreadLocalStorage 3555 Node* tls = __ thread(); // ThreadLocalStorage
3564 3556
3565 Node* no_ctrl = NULL; 3557 Node* no_ctrl = NULL;
3566 Node* no_base = __ top(); 3558 Node* no_base = __ top();
3567 Node* zero = __ ConI(0); 3559 Node* zero = __ ConI(0);
3560 Node* zeroX = __ ConX(0);
3568 3561
3569 float likely = PROB_LIKELY(0.999); 3562 float likely = PROB_LIKELY(0.999);
3570 float unlikely = PROB_UNLIKELY(0.999); 3563 float unlikely = PROB_UNLIKELY(0.999);
3571 3564
3572 BasicType active_type = in_bytes(PtrQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE; 3565 BasicType active_type = in_bytes(PtrQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
3588 // Now some of the values 3581 // Now some of the values
3589 Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw); 3582 Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
3590 3583
3591 // if (!marking) 3584 // if (!marking)
3592 __ if_then(marking, BoolTest::ne, zero); { 3585 __ if_then(marking, BoolTest::ne, zero); {
3593 Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw); 3586 BasicType index_bt = TypeX_X->basic_type();
3587 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
3588 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
3594 3589
3595 if (do_load) { 3590 if (do_load) {
3596 // load original value 3591 // load original value
3597 // alias_idx correct?? 3592 // alias_idx correct??
3598 pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx); 3593 pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx);
3601 // if (pre_val != NULL) 3596 // if (pre_val != NULL)
3602 __ if_then(pre_val, BoolTest::ne, null()); { 3597 __ if_then(pre_val, BoolTest::ne, null()); {
3603 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); 3598 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
3604 3599
3605 // is the queue for this thread full? 3600 // is the queue for this thread full?
3606 __ if_then(index, BoolTest::ne, zero, likely); { 3601 __ if_then(index, BoolTest::ne, zeroX, likely); {
3607 3602
3608 // decrement the index 3603 // decrement the index
3609 Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t))); 3604 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
3610 Node* next_indexX = next_index;
3611 #ifdef _LP64
3612 // We could refine the type for what it's worth
3613 // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
3614 next_indexX = _gvn.transform( new (C) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
3615 #endif
3616 3605
3617 // Now get the buffer location we will log the previous value into and store it 3606 // Now get the buffer location we will log the previous value into and store it
3618 Node *log_addr = __ AddP(no_base, buffer, next_indexX); 3607 Node *log_addr = __ AddP(no_base, buffer, next_index);
3619 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw); 3608 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw);
3620 // update the index 3609 // update the index
3621 __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw); 3610 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw);
3622 3611
3623 } __ else_(); { 3612 } __ else_(); {
3624 3613
3625 // logging buffer is full, call the runtime 3614 // logging buffer is full, call the runtime
3626 const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type(); 3615 const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
3643 Node* index, 3632 Node* index,
3644 Node* index_adr, 3633 Node* index_adr,
3645 Node* buffer, 3634 Node* buffer,
3646 const TypeFunc* tf) { 3635 const TypeFunc* tf) {
3647 3636
3648 Node* zero = __ ConI(0); 3637 Node* zero = __ ConI(0);
3638 Node* zeroX = __ ConX(0);
3649 Node* no_base = __ top(); 3639 Node* no_base = __ top();
3650 BasicType card_bt = T_BYTE; 3640 BasicType card_bt = T_BYTE;
3651 // Smash zero into card. MUST BE ORDERED WRT TO STORE 3641 // Smash zero into card. MUST BE ORDERED WRT TO STORE
3652 __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw); 3642 __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
3653 3643
3654 // Now do the queue work 3644 // Now do the queue work
3655 __ if_then(index, BoolTest::ne, zero); { 3645 __ if_then(index, BoolTest::ne, zeroX); {
3656 3646
3657 Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t))); 3647 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
3658 Node* next_indexX = next_index; 3648 Node* log_addr = __ AddP(no_base, buffer, next_index);
3659 #ifdef _LP64
3660 // We could refine the type for what it's worth
3661 // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
3662 next_indexX = _gvn.transform( new (C) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
3663 #endif // _LP64
3664 Node* log_addr = __ AddP(no_base, buffer, next_indexX);
3665 3649
3666 __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw); 3650 __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw);
3667 __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw); 3651 __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw);
3668 3652
3669 } __ else_(); { 3653 } __ else_(); {
3670 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread()); 3654 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
3671 } __ end_if(); 3655 } __ end_if();
3672 3656
3723 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset)); 3707 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
3724 3708
3725 // Now some values 3709 // Now some values
3726 // Use ctrl to avoid hoisting these values past a safepoint, which could 3710 // Use ctrl to avoid hoisting these values past a safepoint, which could
3727 // potentially reset these fields in the JavaThread. 3711 // potentially reset these fields in the JavaThread.
3728 Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw); 3712 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
3729 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); 3713 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
3730 3714
3731 // Convert the store obj pointer to an int prior to doing math on it 3715 // Convert the store obj pointer to an int prior to doing math on it
3732 // Must use ctrl to prevent "integerized oop" existing across safepoint 3716 // Must use ctrl to prevent "integerized oop" existing across safepoint
3733 Node* cast = __ CastPX(__ ctrl(), adr); 3717 Node* cast = __ CastPX(__ ctrl(), adr);