comparison src/share/vm/c1/c1_LIRGenerator.cpp @ 8883:b9a918201d47

Merge with hsx25
author Gilles Duboscq <duboscq@ssw.jku.at>
date Sat, 06 Apr 2013 20:04:06 +0200
parents 5fc51c1ecdeb d595e8ddadd9
children 89e4d67fdd2a
comparison
equal deleted inserted replaced
8660:d47b52b0ff68 8883:b9a918201d47
401 401
402 402
403 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) { 403 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
404 assert(state != NULL, "state must be defined"); 404 assert(state != NULL, "state must be defined");
405 405
406 #ifndef PRODUCT
407 state->verify();
408 #endif
409
406 ValueStack* s = state; 410 ValueStack* s = state;
407 for_each_state(s) { 411 for_each_state(s) {
408 if (s->kind() == ValueStack::EmptyExceptionState) { 412 if (s->kind() == ValueStack::EmptyExceptionState) {
409 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty"); 413 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
410 continue; 414 continue;
451 } 455 }
452 } 456 }
453 } 457 }
454 } 458 }
455 459
456 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers()); 460 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
457 } 461 }
458 462
459 463
460 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) { 464 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
461 return state_for(x, x->exception_state()); 465 return state_for(x, x->exception_state());
1790 tty->print_cr(" ###class not loaded at load_%s bci %d", 1794 tty->print_cr(" ###class not loaded at load_%s bci %d",
1791 x->is_static() ? "static" : "field", x->printable_bci()); 1795 x->is_static() ? "static" : "field", x->printable_bci());
1792 } 1796 }
1793 #endif 1797 #endif
1794 1798
1799 bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1795 if (x->needs_null_check() && 1800 if (x->needs_null_check() &&
1796 (needs_patching || 1801 (needs_patching ||
1797 MacroAssembler::needs_explicit_null_check(x->offset()))) { 1802 MacroAssembler::needs_explicit_null_check(x->offset()) ||
1803 stress_deopt)) {
1804 LIR_Opr obj = object.result();
1805 if (stress_deopt) {
1806 obj = new_register(T_OBJECT);
1807 __ move(LIR_OprFact::oopConst(NULL), obj);
1808 }
1798 // emit an explicit null check because the offset is too large 1809 // emit an explicit null check because the offset is too large
1799 __ null_check(object.result(), new CodeEmitInfo(info)); 1810 __ null_check(obj, new CodeEmitInfo(info));
1800 } 1811 }
1801 1812
1802 LIR_Opr reg = rlock_result(x, field_type); 1813 LIR_Opr reg = rlock_result(x, field_type);
1803 LIR_Address* address; 1814 LIR_Address* address;
1804 if (needs_patching) { 1815 if (needs_patching) {
1871 if (nc == NULL) { 1882 if (nc == NULL) {
1872 info = state_for(x); 1883 info = state_for(x);
1873 } else { 1884 } else {
1874 info = state_for(nc); 1885 info = state_for(nc);
1875 } 1886 }
1887 if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
1888 LIR_Opr obj = new_register(T_OBJECT);
1889 __ move(LIR_OprFact::oopConst(NULL), obj);
1890 __ null_check(obj, new CodeEmitInfo(info));
1891 }
1876 } 1892 }
1877 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none); 1893 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1878 } 1894 }
1879 1895
1880 1896
1881 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) { 1897 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1882 bool use_length = x->length() != NULL; 1898 bool use_length = x->length() != NULL;
1883 LIRItem array(x->array(), this); 1899 LIRItem array(x->array(), this);
1884 LIRItem index(x->index(), this); 1900 LIRItem index(x->index(), this);
1885 LIRItem length(this); 1901 LIRItem length(this);
1886 bool needs_range_check = true; 1902 bool needs_range_check = x->compute_needs_range_check();
1887 1903
1888 if (use_length) { 1904 if (use_length && needs_range_check) {
1889 needs_range_check = x->compute_needs_range_check(); 1905 length.set_instruction(x->length());
1890 if (needs_range_check) { 1906 length.load_item();
1891 length.set_instruction(x->length());
1892 length.load_item();
1893 }
1894 } 1907 }
1895 1908
1896 array.load_item(); 1909 array.load_item();
1897 if (index.is_constant() && can_inline_as_constant(x->index())) { 1910 if (index.is_constant() && can_inline_as_constant(x->index())) {
1898 // let it be a constant 1911 // let it be a constant
1908 if (nc != NULL) { 1921 if (nc != NULL) {
1909 null_check_info = state_for(nc); 1922 null_check_info = state_for(nc);
1910 } else { 1923 } else {
1911 null_check_info = range_check_info; 1924 null_check_info = range_check_info;
1912 } 1925 }
1926 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1927 LIR_Opr obj = new_register(T_OBJECT);
1928 __ move(LIR_OprFact::oopConst(NULL), obj);
1929 __ null_check(obj, new CodeEmitInfo(null_check_info));
1930 }
1913 } 1931 }
1914 1932
1915 // emit array address setup early so it schedules better 1933 // emit array address setup early so it schedules better
1916 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false); 1934 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1917 1935
1918 if (GenerateRangeChecks && needs_range_check) { 1936 if (GenerateRangeChecks && needs_range_check) {
1919 if (use_length) { 1937 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1938 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1939 } else if (use_length) {
1920 // TODO: use a (modified) version of array_range_check that does not require a 1940 // TODO: use a (modified) version of array_range_check that does not require a
1921 // constant length to be loaded to a register 1941 // constant length to be loaded to a register
1922 __ cmp(lir_cond_belowEqual, length.result(), index.result()); 1942 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1923 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); 1943 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1924 } else { 1944 } else {
2632 2652
2633 if (method()->is_synchronized() && GenerateSynchronizationCode) { 2653 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2634 LIR_Opr lock = new_register(T_INT); 2654 LIR_Opr lock = new_register(T_INT);
2635 __ load_stack_address_monitor(0, lock); 2655 __ load_stack_address_monitor(0, lock);
2636 2656
2637 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL); 2657 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
2638 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info); 2658 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2639 2659
2640 // receiver is guaranteed non-NULL so don't need CodeEmitInfo 2660 // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2641 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL); 2661 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2642 } 2662 }
2643 } 2663 }
2644 2664
2645 // increment invocation counters if needed 2665 // increment invocation counters if needed
2646 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting. 2666 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2647 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL); 2667 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2648 increment_invocation_counter(info); 2668 increment_invocation_counter(info);
2649 } 2669 }
2650 2670
2651 // all blocks with a successor must end with an unconditional jump 2671 // all blocks with a successor must end with an unconditional jump
2652 // to the successor even if they are consecutive 2672 // to the successor even if they are consecutive
3100 } else { 3120 } else {
3101 __ move(result, rlock_result(x)); 3121 __ move(result, rlock_result(x));
3102 } 3122 }
3103 } 3123 }
3104 3124
3125 void LIRGenerator::do_Assert(Assert *x) {
3126 #ifdef ASSERT
3127 ValueTag tag = x->x()->type()->tag();
3128 If::Condition cond = x->cond();
3129
3130 LIRItem xitem(x->x(), this);
3131 LIRItem yitem(x->y(), this);
3132 LIRItem* xin = &xitem;
3133 LIRItem* yin = &yitem;
3134
3135 assert(tag == intTag, "Only integer assertions are valid!");
3136
3137 xin->load_item();
3138 yin->dont_load_item();
3139
3140 set_no_result(x);
3141
3142 LIR_Opr left = xin->result();
3143 LIR_Opr right = yin->result();
3144
3145 __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3146 #endif
3147 }
3148
3149
3150 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3151
3152
3153 Instruction *a = x->x();
3154 Instruction *b = x->y();
3155 if (!a || StressRangeCheckElimination) {
3156 assert(!b || StressRangeCheckElimination, "B must also be null");
3157
3158 CodeEmitInfo *info = state_for(x, x->state());
3159 CodeStub* stub = new PredicateFailedStub(info);
3160
3161 __ jump(stub);
3162 } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3163 int a_int = a->type()->as_IntConstant()->value();
3164 int b_int = b->type()->as_IntConstant()->value();
3165
3166 bool ok = false;
3167
3168 switch(x->cond()) {
3169 case Instruction::eql: ok = (a_int == b_int); break;
3170 case Instruction::neq: ok = (a_int != b_int); break;
3171 case Instruction::lss: ok = (a_int < b_int); break;
3172 case Instruction::leq: ok = (a_int <= b_int); break;
3173 case Instruction::gtr: ok = (a_int > b_int); break;
3174 case Instruction::geq: ok = (a_int >= b_int); break;
3175 case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3176 case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3177 default: ShouldNotReachHere();
3178 }
3179
3180 if (ok) {
3181
3182 CodeEmitInfo *info = state_for(x, x->state());
3183 CodeStub* stub = new PredicateFailedStub(info);
3184
3185 __ jump(stub);
3186 }
3187 } else {
3188
3189 ValueTag tag = x->x()->type()->tag();
3190 If::Condition cond = x->cond();
3191 LIRItem xitem(x->x(), this);
3192 LIRItem yitem(x->y(), this);
3193 LIRItem* xin = &xitem;
3194 LIRItem* yin = &yitem;
3195
3196 assert(tag == intTag, "Only integer deoptimizations are valid!");
3197
3198 xin->load_item();
3199 yin->dont_load_item();
3200 set_no_result(x);
3201
3202 LIR_Opr left = xin->result();
3203 LIR_Opr right = yin->result();
3204
3205 CodeEmitInfo *info = state_for(x, x->state());
3206 CodeStub* stub = new PredicateFailedStub(info);
3207
3208 __ cmp(lir_cond(cond), left, right);
3209 __ branch(lir_cond(cond), right->type(), stub);
3210 }
3211 }
3212
3213
3105 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) { 3214 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3106 LIRItemList args(1); 3215 LIRItemList args(1);
3107 LIRItem value(arg1, this); 3216 LIRItem value(arg1, this);
3108 args.append(&value); 3217 args.append(&value);
3109 BasicTypeList signature; 3218 BasicTypeList signature;