comparison src/share/vm/opto/live.cpp @ 12355:cefad50507d8

Merge with hs25-b53
author Gilles Duboscq <duboscq@ssw.jku.at>
date Fri, 11 Oct 2013 10:38:03 +0200
parents da051ce490eb
children de6a9e811145
comparison
equal deleted inserted replaced
12058:ccb4f2af2319 12355:cefad50507d8
28 #include "opto/chaitin.hpp" 28 #include "opto/chaitin.hpp"
29 #include "opto/live.hpp" 29 #include "opto/live.hpp"
30 #include "opto/machnode.hpp" 30 #include "opto/machnode.hpp"
31 31
32 32
33
34 //=============================================================================
35 //------------------------------PhaseLive--------------------------------------
36 // Compute live-in/live-out. We use a totally incremental algorithm. The LIVE 33 // Compute live-in/live-out. We use a totally incremental algorithm. The LIVE
37 // problem is monotonic. The steady-state solution looks like this: pull a 34 // problem is monotonic. The steady-state solution looks like this: pull a
38 // block from the worklist. It has a set of delta's - values which are newly 35 // block from the worklist. It has a set of delta's - values which are newly
39 // live-in from the block. Push these to the live-out sets of all predecessor 36 // live-in from the block. Push these to the live-out sets of all predecessor
40 // blocks. At each predecessor, the new live-out values are ANDed with what is 37 // blocks. At each predecessor, the new live-out values are ANDed with what is
51 _maxlrg = maxlrg; 48 _maxlrg = maxlrg;
52 _worklist = new (_arena) Block_List(); 49 _worklist = new (_arena) Block_List();
53 50
54 // Init the sparse live arrays. This data is live on exit from here! 51 // Init the sparse live arrays. This data is live on exit from here!
55 // The _live info is the live-out info. 52 // The _live info is the live-out info.
56 _live = (IndexSet*)_arena->Amalloc(sizeof(IndexSet)*_cfg._num_blocks); 53 _live = (IndexSet*)_arena->Amalloc(sizeof(IndexSet) * _cfg.number_of_blocks());
57 uint i; 54 uint i;
58 for( i=0; i<_cfg._num_blocks; i++ ) { 55 for (i = 0; i < _cfg.number_of_blocks(); i++) {
59 _live[i].initialize(_maxlrg); 56 _live[i].initialize(_maxlrg);
60 } 57 }
61 58
62 // Init the sparse arrays for delta-sets. 59 // Init the sparse arrays for delta-sets.
63 ResourceMark rm; // Nuke temp storage on exit 60 ResourceMark rm; // Nuke temp storage on exit
64 61
65 // Does the memory used by _defs and _deltas get reclaimed? Does it matter? TT 62 // Does the memory used by _defs and _deltas get reclaimed? Does it matter? TT
66 63
67 // Array of values defined locally in blocks 64 // Array of values defined locally in blocks
68 _defs = NEW_RESOURCE_ARRAY(IndexSet,_cfg._num_blocks); 65 _defs = NEW_RESOURCE_ARRAY(IndexSet,_cfg.number_of_blocks());
69 for( i=0; i<_cfg._num_blocks; i++ ) { 66 for (i = 0; i < _cfg.number_of_blocks(); i++) {
70 _defs[i].initialize(_maxlrg); 67 _defs[i].initialize(_maxlrg);
71 } 68 }
72 69
73 // Array of delta-set pointers, indexed by block pre_order-1. 70 // Array of delta-set pointers, indexed by block pre_order-1.
74 _deltas = NEW_RESOURCE_ARRAY(IndexSet*,_cfg._num_blocks); 71 _deltas = NEW_RESOURCE_ARRAY(IndexSet*,_cfg.number_of_blocks());
75 memset( _deltas, 0, sizeof(IndexSet*)* _cfg._num_blocks); 72 memset( _deltas, 0, sizeof(IndexSet*)* _cfg.number_of_blocks());
76 73
77 _free_IndexSet = NULL; 74 _free_IndexSet = NULL;
78 75
79 // Blocks having done pass-1 76 // Blocks having done pass-1
80 VectorSet first_pass(Thread::current()->resource_area()); 77 VectorSet first_pass(Thread::current()->resource_area());
81 78
82 // Outer loop: must compute local live-in sets and push into predecessors. 79 // Outer loop: must compute local live-in sets and push into predecessors.
83 uint iters = _cfg._num_blocks; // stat counters 80 for (uint j = _cfg.number_of_blocks(); j > 0; j--) {
84 for( uint j=_cfg._num_blocks; j>0; j-- ) { 81 Block* block = _cfg.get_block(j - 1);
85 Block *b = _cfg._blocks[j-1];
86 82
87 // Compute the local live-in set. Start with any new live-out bits. 83 // Compute the local live-in set. Start with any new live-out bits.
88 IndexSet *use = getset( b ); 84 IndexSet* use = getset(block);
89 IndexSet *def = &_defs[b->_pre_order-1]; 85 IndexSet* def = &_defs[block->_pre_order-1];
90 DEBUG_ONLY(IndexSet *def_outside = getfreeset();) 86 DEBUG_ONLY(IndexSet *def_outside = getfreeset();)
91 uint i; 87 uint i;
92 for( i=b->_nodes.size(); i>1; i-- ) { 88 for (i = block->number_of_nodes(); i > 1; i--) {
93 Node *n = b->_nodes[i-1]; 89 Node* n = block->get_node(i-1);
94 if( n->is_Phi() ) break; 90 if (n->is_Phi()) {
95 91 break;
96 uint r = _names[n->_idx]; 92 }
93
94 uint r = _names.at(n->_idx);
97 assert(!def_outside->member(r), "Use of external LRG overlaps the same LRG defined in this block"); 95 assert(!def_outside->member(r), "Use of external LRG overlaps the same LRG defined in this block");
98 def->insert( r ); 96 def->insert( r );
99 use->remove( r ); 97 use->remove( r );
100 uint cnt = n->req(); 98 uint cnt = n->req();
101 for( uint k=1; k<cnt; k++ ) { 99 for (uint k = 1; k < cnt; k++) {
102 Node *nk = n->in(k); 100 Node *nk = n->in(k);
103 uint nkidx = nk->_idx; 101 uint nkidx = nk->_idx;
104 if (_cfg.get_block_for_node(nk) != b) { 102 if (_cfg.get_block_for_node(nk) != block) {
105 uint u = _names[nkidx]; 103 uint u = _names.at(nkidx);
106 use->insert( u ); 104 use->insert(u);
107 DEBUG_ONLY(def_outside->insert( u );) 105 DEBUG_ONLY(def_outside->insert(u);)
108 } 106 }
109 } 107 }
110 } 108 }
111 #ifdef ASSERT 109 #ifdef ASSERT
112 def_outside->set_next(_free_IndexSet); 110 def_outside->set_next(_free_IndexSet);
113 _free_IndexSet = def_outside; // Drop onto free list 111 _free_IndexSet = def_outside; // Drop onto free list
114 #endif 112 #endif
115 // Remove anything defined by Phis and the block start instruction 113 // Remove anything defined by Phis and the block start instruction
116 for( uint k=i; k>0; k-- ) { 114 for (uint k = i; k > 0; k--) {
117 uint r = _names[b->_nodes[k-1]->_idx]; 115 uint r = _names.at(block->get_node(k - 1)->_idx);
118 def->insert( r ); 116 def->insert(r);
119 use->remove( r ); 117 use->remove(r);
120 } 118 }
121 119
122 // Push these live-in things to predecessors 120 // Push these live-in things to predecessors
123 for( uint l=1; l<b->num_preds(); l++ ) { 121 for (uint l = 1; l < block->num_preds(); l++) {
124 Block *p = _cfg.get_block_for_node(b->pred(l)); 122 Block* p = _cfg.get_block_for_node(block->pred(l));
125 add_liveout( p, use, first_pass ); 123 add_liveout(p, use, first_pass);
126 124
127 // PhiNode uses go in the live-out set of prior blocks. 125 // PhiNode uses go in the live-out set of prior blocks.
128 for( uint k=i; k>0; k-- ) 126 for (uint k = i; k > 0; k--) {
129 add_liveout( p, _names[b->_nodes[k-1]->in(l)->_idx], first_pass ); 127 add_liveout(p, _names.at(block->get_node(k-1)->in(l)->_idx), first_pass);
130 } 128 }
131 freeset( b ); 129 }
132 first_pass.set(b->_pre_order); 130 freeset(block);
131 first_pass.set(block->_pre_order);
133 132
134 // Inner loop: blocks that picked up new live-out values to be propagated 133 // Inner loop: blocks that picked up new live-out values to be propagated
135 while( _worklist->size() ) { 134 while (_worklist->size()) {
136 // !!!!! 135 Block* block = _worklist->pop();
137 // #ifdef ASSERT 136 IndexSet *delta = getset(block);
138 iters++;
139 // #endif
140 Block *b = _worklist->pop();
141 IndexSet *delta = getset(b);
142 assert( delta->count(), "missing delta set" ); 137 assert( delta->count(), "missing delta set" );
143 138
144 // Add new-live-in to predecessors live-out sets 139 // Add new-live-in to predecessors live-out sets
145 for (uint l = 1; l < b->num_preds(); l++) { 140 for (uint l = 1; l < block->num_preds(); l++) {
146 Block* block = _cfg.get_block_for_node(b->pred(l)); 141 Block* predecessor = _cfg.get_block_for_node(block->pred(l));
147 add_liveout(block, delta, first_pass); 142 add_liveout(predecessor, delta, first_pass);
148 } 143 }
149 144
150 freeset(b); 145 freeset(block);
151 } // End of while-worklist-not-empty 146 } // End of while-worklist-not-empty
152 147
153 } // End of for-all-blocks-outer-loop 148 } // End of for-all-blocks-outer-loop
154 149
155 // We explicitly clear all of the IndexSets which we are about to release. 150 // We explicitly clear all of the IndexSets which we are about to release.
156 // This allows us to recycle their internal memory into IndexSet's free list. 151 // This allows us to recycle their internal memory into IndexSet's free list.
157 152
158 for( i=0; i<_cfg._num_blocks; i++ ) { 153 for (i = 0; i < _cfg.number_of_blocks(); i++) {
159 _defs[i].clear(); 154 _defs[i].clear();
160 if (_deltas[i]) { 155 if (_deltas[i]) {
161 // Is this always true? 156 // Is this always true?
162 _deltas[i]->clear(); 157 _deltas[i]->clear();
163 } 158 }
169 temp->clear(); 164 temp->clear();
170 } 165 }
171 166
172 } 167 }
173 168
174 //------------------------------stats------------------------------------------
175 #ifndef PRODUCT 169 #ifndef PRODUCT
176 void PhaseLive::stats(uint iters) const { 170 void PhaseLive::stats(uint iters) const {
177 } 171 }
178 #endif 172 #endif
179 173
180 //------------------------------getset-----------------------------------------
181 // Get an IndexSet for a block. Return existing one, if any. Make a new 174 // Get an IndexSet for a block. Return existing one, if any. Make a new
182 // empty one if a prior one does not exist. 175 // empty one if a prior one does not exist.
183 IndexSet *PhaseLive::getset( Block *p ) { 176 IndexSet *PhaseLive::getset( Block *p ) {
184 IndexSet *delta = _deltas[p->_pre_order-1]; 177 IndexSet *delta = _deltas[p->_pre_order-1];
185 if( !delta ) // Not on worklist? 178 if( !delta ) // Not on worklist?
186 // Get a free set; flag as being on worklist 179 // Get a free set; flag as being on worklist
187 delta = _deltas[p->_pre_order-1] = getfreeset(); 180 delta = _deltas[p->_pre_order-1] = getfreeset();
188 return delta; // Return set of new live-out items 181 return delta; // Return set of new live-out items
189 } 182 }
190 183
191 //------------------------------getfreeset-------------------------------------
192 // Pull from free list, or allocate. Internal allocation on the returned set 184 // Pull from free list, or allocate. Internal allocation on the returned set
193 // is always from thread local storage. 185 // is always from thread local storage.
194 IndexSet *PhaseLive::getfreeset( ) { 186 IndexSet *PhaseLive::getfreeset( ) {
195 IndexSet *f = _free_IndexSet; 187 IndexSet *f = _free_IndexSet;
196 if( !f ) { 188 if( !f ) {
205 f->initialize(_maxlrg, Thread::current()->resource_area()); 197 f->initialize(_maxlrg, Thread::current()->resource_area());
206 } 198 }
207 return f; 199 return f;
208 } 200 }
209 201
210 //------------------------------freeset----------------------------------------
211 // Free an IndexSet from a block. 202 // Free an IndexSet from a block.
212 void PhaseLive::freeset( const Block *p ) { 203 void PhaseLive::freeset( const Block *p ) {
213 IndexSet *f = _deltas[p->_pre_order-1]; 204 IndexSet *f = _deltas[p->_pre_order-1];
214 f->set_next(_free_IndexSet); 205 f->set_next(_free_IndexSet);
215 _free_IndexSet = f; // Drop onto free list 206 _free_IndexSet = f; // Drop onto free list
216 _deltas[p->_pre_order-1] = NULL; 207 _deltas[p->_pre_order-1] = NULL;
217 } 208 }
218 209
219 //------------------------------add_liveout------------------------------------
220 // Add a live-out value to a given blocks live-out set. If it is new, then 210 // Add a live-out value to a given blocks live-out set. If it is new, then
221 // also add it to the delta set and stick the block on the worklist. 211 // also add it to the delta set and stick the block on the worklist.
222 void PhaseLive::add_liveout( Block *p, uint r, VectorSet &first_pass ) { 212 void PhaseLive::add_liveout( Block *p, uint r, VectorSet &first_pass ) {
223 IndexSet *live = &_live[p->_pre_order-1]; 213 IndexSet *live = &_live[p->_pre_order-1];
224 if( live->insert(r) ) { // If actually inserted... 214 if( live->insert(r) ) { // If actually inserted...
231 getset(p)->insert(r); 221 getset(p)->insert(r);
232 } 222 }
233 } 223 }
234 } 224 }
235 225
236
237 //------------------------------add_liveout------------------------------------
238 // Add a vector of live-out values to a given blocks live-out set. 226 // Add a vector of live-out values to a given blocks live-out set.
239 void PhaseLive::add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ) { 227 void PhaseLive::add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ) {
240 IndexSet *live = &_live[p->_pre_order-1]; 228 IndexSet *live = &_live[p->_pre_order-1];
241 IndexSet *defs = &_defs[p->_pre_order-1]; 229 IndexSet *defs = &_defs[p->_pre_order-1];
242 IndexSet *on_worklist = _deltas[p->_pre_order-1]; 230 IndexSet *on_worklist = _deltas[p->_pre_order-1];
260 _free_IndexSet = delta; // Drop onto free list 248 _free_IndexSet = delta; // Drop onto free list
261 } 249 }
262 } 250 }
263 251
264 #ifndef PRODUCT 252 #ifndef PRODUCT
265 //------------------------------dump-------------------------------------------
266 // Dump the live-out set for a block 253 // Dump the live-out set for a block
267 void PhaseLive::dump( const Block *b ) const { 254 void PhaseLive::dump( const Block *b ) const {
268 tty->print("Block %d: ",b->_pre_order); 255 tty->print("Block %d: ",b->_pre_order);
269 tty->print("LiveOut: "); _live[b->_pre_order-1].dump(); 256 tty->print("LiveOut: "); _live[b->_pre_order-1].dump();
270 uint cnt = b->_nodes.size(); 257 uint cnt = b->number_of_nodes();
271 for( uint i=0; i<cnt; i++ ) { 258 for( uint i=0; i<cnt; i++ ) {
272 tty->print("L%d/", _names[b->_nodes[i]->_idx] ); 259 tty->print("L%d/", _names.at(b->get_node(i)->_idx));
273 b->_nodes[i]->dump(); 260 b->get_node(i)->dump();
274 } 261 }
275 tty->print("\n"); 262 tty->print("\n");
276 } 263 }
277 264
278 //------------------------------verify_base_ptrs-------------------------------
279 // Verify that base pointers and derived pointers are still sane. 265 // Verify that base pointers and derived pointers are still sane.
280 void PhaseChaitin::verify_base_ptrs( ResourceArea *a ) const { 266 void PhaseChaitin::verify_base_ptrs( ResourceArea *a ) const {
281 #ifdef ASSERT 267 #ifdef ASSERT
282 Unique_Node_List worklist(a); 268 Unique_Node_List worklist(a);
283 for( uint i = 0; i < _cfg._num_blocks; i++ ) { 269 for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
284 Block *b = _cfg._blocks[i]; 270 Block* block = _cfg.get_block(i);
285 for( uint j = b->end_idx() + 1; j > 1; j-- ) { 271 for (uint j = block->end_idx() + 1; j > 1; j--) {
286 Node *n = b->_nodes[j-1]; 272 Node* n = block->get_node(j-1);
287 if( n->is_Phi() ) break; 273 if (n->is_Phi()) {
274 break;
275 }
288 // Found a safepoint? 276 // Found a safepoint?
289 if( n->is_MachSafePoint() ) { 277 if (n->is_MachSafePoint()) {
290 MachSafePointNode *sfpt = n->as_MachSafePoint(); 278 MachSafePointNode *sfpt = n->as_MachSafePoint();
291 JVMState* jvms = sfpt->jvms(); 279 JVMState* jvms = sfpt->jvms();
292 if (jvms != NULL) { 280 if (jvms != NULL) {
293 // Now scan for a live derived pointer 281 // Now scan for a live derived pointer
294 if (jvms->oopoff() < sfpt->req()) { 282 if (jvms->oopoff() < sfpt->req()) {
331 check->as_Mach()->ideal_Opcode() == Op_CMoveP || 319 check->as_Mach()->ideal_Opcode() == Op_CMoveP ||
332 check->as_Mach()->ideal_Opcode() == Op_CheckCastPP || 320 check->as_Mach()->ideal_Opcode() == Op_CheckCastPP ||
333 #ifdef _LP64 321 #ifdef _LP64
334 UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP || 322 UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP ||
335 UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN || 323 UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN ||
336 UseCompressedKlassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass || 324 UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
337 #endif 325 #endif
338 check->as_Mach()->ideal_Opcode() == Op_LoadP || 326 check->as_Mach()->ideal_Opcode() == Op_LoadP ||
339 check->as_Mach()->ideal_Opcode() == Op_LoadKlass)) { 327 check->as_Mach()->ideal_Opcode() == Op_LoadKlass)) {
340 // Valid nodes 328 // Valid nodes
341 } else { 329 } else {
356 } // End of forall instructions in block 344 } // End of forall instructions in block
357 } // End of forall blocks 345 } // End of forall blocks
358 #endif 346 #endif
359 } 347 }
360 348
361 //------------------------------verify-------------------------------------
362 // Verify that graphs and base pointers are still sane. 349 // Verify that graphs and base pointers are still sane.
363 void PhaseChaitin::verify( ResourceArea *a, bool verify_ifg ) const { 350 void PhaseChaitin::verify( ResourceArea *a, bool verify_ifg ) const {
364 #ifdef ASSERT 351 #ifdef ASSERT
365 if( VerifyOpto || VerifyRegisterAllocator ) { 352 if( VerifyOpto || VerifyRegisterAllocator ) {
366 _cfg.verify(); 353 _cfg.verify();