comparison src/cpu/sparc/vm/templateTable_sparc.cpp @ 10105:aeaca88565e6

8010862: The Method counter fields used for profiling can be allocated lazily. Summary: Allocate the method's profiling related metadata until they are needed. Reviewed-by: coleenp, roland
author jiangli
date Tue, 09 Apr 2013 17:17:41 -0400
parents f16e75e0cf11
children 9500809ceead
comparison
equal deleted inserted replaced
9055:dcdeb150988c 10105:aeaca88565e6
1602 } 1602 }
1603 1603
1604 // Normal (non-jsr) branch handling 1604 // Normal (non-jsr) branch handling
1605 1605
1606 // Save the current Lbcp 1606 // Save the current Lbcp
1607 const Register O0_cur_bcp = O0; 1607 const Register l_cur_bcp = Lscratch;
1608 __ mov( Lbcp, O0_cur_bcp ); 1608 __ mov( Lbcp, l_cur_bcp );
1609
1610 1609
1611 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1610 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1612 if ( increment_invocation_counter_for_backward_branches ) { 1611 if ( increment_invocation_counter_for_backward_branches ) {
1613 Label Lforward; 1612 Label Lforward;
1614 // check branch direction 1613 // check branch direction
1615 __ br( Assembler::positive, false, Assembler::pn, Lforward ); 1614 __ br( Assembler::positive, false, Assembler::pn, Lforward );
1616 // Bump bytecode pointer by displacement (take the branch) 1615 // Bump bytecode pointer by displacement (take the branch)
1617 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr 1616 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
1617
1618 const Register Rcounters = G3_scratch;
1619 __ get_method_counters(Lmethod, Rcounters, Lforward);
1618 1620
1619 if (TieredCompilation) { 1621 if (TieredCompilation) {
1620 Label Lno_mdo, Loverflow; 1622 Label Lno_mdo, Loverflow;
1621 int increment = InvocationCounter::count_increment; 1623 int increment = InvocationCounter::count_increment;
1622 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 1624 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1626 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo); 1628 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo);
1627 1629
1628 // Increment backedge counter in the MDO 1630 // Increment backedge counter in the MDO
1629 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) + 1631 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) +
1630 in_bytes(InvocationCounter::counter_offset())); 1632 in_bytes(InvocationCounter::counter_offset()));
1631 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch, 1633 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0,
1632 Assembler::notZero, &Lforward); 1634 Assembler::notZero, &Lforward);
1633 __ ba_short(Loverflow); 1635 __ ba_short(Loverflow);
1634 } 1636 }
1635 1637
1636 // If there's no MDO, increment counter in Method* 1638 // If there's no MDO, increment counter in MethodCounters*
1637 __ bind(Lno_mdo); 1639 __ bind(Lno_mdo);
1638 Address backedge_counter(Lmethod, in_bytes(Method::backedge_counter_offset()) + 1640 Address backedge_counter(Rcounters,
1639 in_bytes(InvocationCounter::counter_offset())); 1641 in_bytes(MethodCounters::backedge_counter_offset()) +
1640 __ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch, 1642 in_bytes(InvocationCounter::counter_offset()));
1643 __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0,
1641 Assembler::notZero, &Lforward); 1644 Assembler::notZero, &Lforward);
1642 __ bind(Loverflow); 1645 __ bind(Loverflow);
1643 1646
1644 // notify point for loop, pass branch bytecode 1647 // notify point for loop, pass branch bytecode
1645 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp); 1648 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp);
1646 1649
1647 // Was an OSR adapter generated? 1650 // Was an OSR adapter generated?
1648 // O0 = osr nmethod 1651 // O0 = osr nmethod
1649 __ br_null_short(O0, Assembler::pn, Lforward); 1652 __ br_null_short(O0, Assembler::pn, Lforward);
1650 1653
1677 __ delayed()->nop(); 1680 __ delayed()->nop();
1678 1681
1679 } else { 1682 } else {
1680 // Update Backedge branch separately from invocations 1683 // Update Backedge branch separately from invocations
1681 const Register G4_invoke_ctr = G4; 1684 const Register G4_invoke_ctr = G4;
1682 __ increment_backedge_counter(G4_invoke_ctr, G1_scratch); 1685 __ increment_backedge_counter(Rcounters, G4_invoke_ctr, G1_scratch);
1683 if (ProfileInterpreter) { 1686 if (ProfileInterpreter) {
1684 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward); 1687 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward);
1685 if (UseOnStackReplacement) { 1688 if (UseOnStackReplacement) {
1686 __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch); 1689 __ test_backedge_count_for_osr(O2_bumped_count, l_cur_bcp, G3_scratch);
1687 } 1690 }
1688 } else { 1691 } else {
1689 if (UseOnStackReplacement) { 1692 if (UseOnStackReplacement) {
1690 __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch); 1693 __ test_backedge_count_for_osr(G4_invoke_ctr, l_cur_bcp, G3_scratch);
1691 } 1694 }
1692 } 1695 }
1693 } 1696 }
1694 1697
1695 __ bind(Lforward); 1698 __ bind(Lforward);