comparison src/cpu/sparc/vm/templateTable_sparc.cpp @ 1783:d5d065957597

6953144: Tiered compilation Summary: Infrastructure for tiered compilation support (interpreter + c1 + c2) for 32 and 64 bit. Simple tiered policy implementation. Reviewed-by: kvn, never, phh, twisti
author iveresov
date Fri, 03 Sep 2010 17:51:07 -0700
parents 126ea7725993
children d55217dc206f
comparison
equal deleted inserted replaced
1782:f353275af40e 1783:d5d065957597
1578 1578
1579 // Save the current Lbcp 1579 // Save the current Lbcp
1580 const Register O0_cur_bcp = O0; 1580 const Register O0_cur_bcp = O0;
1581 __ mov( Lbcp, O0_cur_bcp ); 1581 __ mov( Lbcp, O0_cur_bcp );
1582 1582
1583
1583 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1584 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1584 if ( increment_invocation_counter_for_backward_branches ) { 1585 if ( increment_invocation_counter_for_backward_branches ) {
1585 Label Lforward; 1586 Label Lforward;
1586 // check branch direction 1587 // check branch direction
1587 __ br( Assembler::positive, false, Assembler::pn, Lforward ); 1588 __ br( Assembler::positive, false, Assembler::pn, Lforward );
1588 // Bump bytecode pointer by displacement (take the branch) 1589 // Bump bytecode pointer by displacement (take the branch)
1589 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr 1590 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
1590 1591
1591 // Update Backedge branch separately from invocations 1592 if (TieredCompilation) {
1592 const Register G4_invoke_ctr = G4; 1593 Label Lno_mdo, Loverflow;
1593 __ increment_backedge_counter(G4_invoke_ctr, G1_scratch); 1594 int increment = InvocationCounter::count_increment;
1594 if (ProfileInterpreter) { 1595 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1595 __ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward); 1596 if (ProfileInterpreter) {
1596 if (UseOnStackReplacement) { 1597 // If no method data exists, go to profile_continue.
1597 __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch); 1598 __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
1599 __ br_null(G4_scratch, false, Assembler::pn, Lno_mdo);
1600 __ delayed()->nop();
1601
1602 // Increment backedge counter in the MDO
1603 Address mdo_backedge_counter(G4_scratch, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
1604 in_bytes(InvocationCounter::counter_offset()));
1605 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch,
1606 Assembler::notZero, &Lforward);
1607 __ ba(false, Loverflow);
1608 __ delayed()->nop();
1598 } 1609 }
1610
1611 // If there's no MDO, increment counter in methodOop
1612 __ bind(Lno_mdo);
1613 Address backedge_counter(Lmethod, in_bytes(methodOopDesc::backedge_counter_offset()) +
1614 in_bytes(InvocationCounter::counter_offset()));
1615 __ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch,
1616 Assembler::notZero, &Lforward);
1617 __ bind(Loverflow);
1618
1619 // notify point for loop, pass branch bytecode
1620 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp);
1621
1622 // Was an OSR adapter generated?
1623 // O0 = osr nmethod
1624 __ br_null(O0, false, Assembler::pn, Lforward);
1625 __ delayed()->nop();
1626
1627 // Has the nmethod been invalidated already?
1628 __ ld(O0, nmethod::entry_bci_offset(), O2);
1629 __ cmp(O2, InvalidOSREntryBci);
1630 __ br(Assembler::equal, false, Assembler::pn, Lforward);
1631 __ delayed()->nop();
1632
1633 // migrate the interpreter frame off of the stack
1634
1635 __ mov(G2_thread, L7);
1636 // save nmethod
1637 __ mov(O0, L6);
1638 __ set_last_Java_frame(SP, noreg);
1639 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
1640 __ reset_last_Java_frame();
1641 __ mov(L7, G2_thread);
1642
1643 // move OSR nmethod to I1
1644 __ mov(L6, I1);
1645
1646 // OSR buffer to I0
1647 __ mov(O0, I0);
1648
1649 // remove the interpreter frame
1650 __ restore(I5_savedSP, 0, SP);
1651
1652 // Jump to the osr code.
1653 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
1654 __ jmp(O2, G0);
1655 __ delayed()->nop();
1656
1599 } else { 1657 } else {
1600 if (UseOnStackReplacement) { 1658 // Update Backedge branch separately from invocations
1601 __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch); 1659 const Register G4_invoke_ctr = G4;
1660 __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
1661 if (ProfileInterpreter) {
1662 __ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward);
1663 if (UseOnStackReplacement) {
1664 __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
1665 }
1666 } else {
1667 if (UseOnStackReplacement) {
1668 __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
1669 }
1602 } 1670 }
1603 } 1671 }
1604 1672
1605 __ bind(Lforward); 1673 __ bind(Lforward);
1606 } else 1674 } else