comparison src/share/vm/c1/c1_LinearScan.cpp @ 1681:126ea7725993

6953477: Increase portability and flexibility of building Hotspot Summary: A collection of portability improvements including shared code support for PPC, ARM platforms, software floating point, cross compilation support and improvements in error crash detail. Reviewed-by: phh, never, coleenp, dholmes
author bobv
date Tue, 03 Aug 2010 08:13:38 -0400
parents b812ff5abc73
children 87b64980e2f1
comparison
equal deleted inserted replaced
1680:a64438a2b7e8 1681:126ea7725993
167 bool LinearScan::is_precolored_cpu_interval(const Interval* i) { 167 bool LinearScan::is_precolored_cpu_interval(const Interval* i) {
168 return i->reg_num() < LinearScan::nof_cpu_regs; 168 return i->reg_num() < LinearScan::nof_cpu_regs;
169 } 169 }
170 170
171 bool LinearScan::is_virtual_cpu_interval(const Interval* i) { 171 bool LinearScan::is_virtual_cpu_interval(const Interval* i) {
172 #if defined(__SOFTFP__) || defined(E500V2)
173 return i->reg_num() >= LIR_OprDesc::vreg_base;
174 #else
172 return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() != T_FLOAT && i->type() != T_DOUBLE); 175 return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() != T_FLOAT && i->type() != T_DOUBLE);
176 #endif // __SOFTFP__ or E500V2
173 } 177 }
174 178
175 bool LinearScan::is_precolored_fpu_interval(const Interval* i) { 179 bool LinearScan::is_precolored_fpu_interval(const Interval* i) {
176 return i->reg_num() >= LinearScan::nof_cpu_regs && i->reg_num() < LinearScan::nof_regs; 180 return i->reg_num() >= LinearScan::nof_cpu_regs && i->reg_num() < LinearScan::nof_regs;
177 } 181 }
178 182
179 bool LinearScan::is_virtual_fpu_interval(const Interval* i) { 183 bool LinearScan::is_virtual_fpu_interval(const Interval* i) {
184 #if defined(__SOFTFP__) || defined(E500V2)
185 return false;
186 #else
180 return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() == T_FLOAT || i->type() == T_DOUBLE); 187 return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() == T_FLOAT || i->type() == T_DOUBLE);
188 #endif // __SOFTFP__ or E500V2
181 } 189 }
182 190
183 bool LinearScan::is_in_fpu_register(const Interval* i) { 191 bool LinearScan::is_in_fpu_register(const Interval* i) {
184 // fixed intervals not needed for FPU stack allocation 192 // fixed intervals not needed for FPU stack allocation
185 return i->reg_num() >= nof_regs && pd_first_fpu_reg <= i->assigned_reg() && i->assigned_reg() <= pd_last_fpu_reg; 193 return i->reg_num() >= nof_regs && pd_first_fpu_reg <= i->assigned_reg() && i->assigned_reg() <= pd_last_fpu_reg;
2008 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register"); 2016 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2009 assert(interval->assigned_regHi() == any_reg, "must not have hi register"); 2017 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2010 return LIR_OprFact::single_cpu_oop(assigned_reg); 2018 return LIR_OprFact::single_cpu_oop(assigned_reg);
2011 } 2019 }
2012 2020
2021 #ifdef __SOFTFP__
2022 case T_FLOAT: // fall through
2023 #endif // __SOFTFP__
2013 case T_INT: { 2024 case T_INT: {
2014 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register"); 2025 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2015 assert(interval->assigned_regHi() == any_reg, "must not have hi register"); 2026 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2016 return LIR_OprFact::single_cpu(assigned_reg); 2027 return LIR_OprFact::single_cpu(assigned_reg);
2017 } 2028 }
2018 2029
2030 #ifdef __SOFTFP__
2031 case T_DOUBLE: // fall through
2032 #endif // __SOFTFP__
2019 case T_LONG: { 2033 case T_LONG: {
2020 int assigned_regHi = interval->assigned_regHi(); 2034 int assigned_regHi = interval->assigned_regHi();
2021 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register"); 2035 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2022 assert(num_physical_regs(T_LONG) == 1 || 2036 assert(num_physical_regs(T_LONG) == 1 ||
2023 (assigned_regHi >= pd_first_cpu_reg && assigned_regHi <= pd_last_cpu_reg), "no cpu register"); 2037 (assigned_regHi >= pd_first_cpu_reg && assigned_regHi <= pd_last_cpu_reg), "no cpu register");
2031 } 2045 }
2032 2046
2033 #ifdef _LP64 2047 #ifdef _LP64
2034 return LIR_OprFact::double_cpu(assigned_reg, assigned_reg); 2048 return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
2035 #else 2049 #else
2036 #ifdef SPARC 2050 #if defined(SPARC) || defined(PPC)
2037 return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg); 2051 return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
2038 #else 2052 #else
2039 return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi); 2053 return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi);
2040 #endif // SPARC 2054 #endif // SPARC
2041 #endif // LP64 2055 #endif // LP64
2042 } 2056 }
2043 2057
2058 #ifndef __SOFTFP__
2044 case T_FLOAT: { 2059 case T_FLOAT: {
2045 #ifdef X86 2060 #ifdef X86
2046 if (UseSSE >= 1) { 2061 if (UseSSE >= 1) {
2047 assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register"); 2062 assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
2048 assert(interval->assigned_regHi() == any_reg, "must not have hi register"); 2063 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2067 #ifdef SPARC 2082 #ifdef SPARC
2068 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register"); 2083 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2069 assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register"); 2084 assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2070 assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even"); 2085 assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2071 LIR_Opr result = LIR_OprFact::double_fpu(interval->assigned_regHi() - pd_first_fpu_reg, assigned_reg - pd_first_fpu_reg); 2086 LIR_Opr result = LIR_OprFact::double_fpu(interval->assigned_regHi() - pd_first_fpu_reg, assigned_reg - pd_first_fpu_reg);
2087 #elif defined(ARM)
2088 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2089 assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2090 assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2091 LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg, interval->assigned_regHi() - pd_first_fpu_reg);
2072 #else 2092 #else
2073 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register"); 2093 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2074 assert(interval->assigned_regHi() == any_reg, "must not have hi register (double fpu values are stored in one register on Intel)"); 2094 assert(interval->assigned_regHi() == any_reg, "must not have hi register (double fpu values are stored in one register on Intel)");
2075 LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg); 2095 LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg);
2076 #endif 2096 #endif
2077 return result; 2097 return result;
2078 } 2098 }
2099 #endif // __SOFTFP__
2079 2100
2080 default: { 2101 default: {
2081 ShouldNotReachHere(); 2102 ShouldNotReachHere();
2082 return LIR_OprFact::illegalOpr; 2103 return LIR_OprFact::illegalOpr;
2083 } 2104 }
2635 2656
2636 assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)"); 2657 assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)");
2637 #endif 2658 #endif
2638 #ifdef SPARC 2659 #ifdef SPARC
2639 assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)"); 2660 assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)");
2661 #endif
2662 #ifdef ARM
2663 assert(opr->fpu_regnrHi() == opr->fpu_regnrLo() + 1, "assumed in calculation (only fpu_regnrLo is used)");
2664 #endif
2665 #ifdef PPC
2666 assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)");
2640 #endif 2667 #endif
2641 2668
2642 VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi()); 2669 VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
2643 #ifdef _LP64 2670 #ifdef _LP64
2644 first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first)); 2671 first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
6133 LIR_Op* prev_op = instructions->at(instructions->length() - 2); 6160 LIR_Op* prev_op = instructions->at(instructions->length() - 2);
6134 if (prev_op->code() == lir_branch || prev_op->code() == lir_cond_float_branch) { 6161 if (prev_op->code() == lir_branch || prev_op->code() == lir_cond_float_branch) {
6135 assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch"); 6162 assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
6136 LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op; 6163 LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op;
6137 6164
6165 LIR_Op2* prev_cmp = NULL;
6166
6167 for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
6168 prev_op = instructions->at(j);
6169 if(prev_op->code() == lir_cmp) {
6170 assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");
6171 prev_cmp = (LIR_Op2*)prev_op;
6172 assert(prev_branch->cond() == prev_cmp->condition(), "should be the same");
6173 }
6174 }
6175 assert(prev_cmp != NULL, "should have found comp instruction for branch");
6138 if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) { 6176 if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {
6139 6177
6140 TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id())); 6178 TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));
6141 6179
6142 // eliminate a conditional branch to the immediate successor 6180 // eliminate a conditional branch to the immediate successor
6143 prev_branch->change_block(last_branch->block()); 6181 prev_branch->change_block(last_branch->block());
6144 prev_branch->negate_cond(); 6182 prev_branch->negate_cond();
6183 prev_cmp->set_condition(prev_branch->cond());
6145 instructions->truncate(instructions->length() - 1); 6184 instructions->truncate(instructions->length() - 1);
6146 } 6185 }
6147 } 6186 }
6148 } 6187 }
6149 } 6188 }