Mercurial > hg > truffle
annotate src/share/vm/opto/phaseX.hpp @ 1994:6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
7002546: regression on SpecJbb2005 on 7b118 comparing to 7b117 on small heaps
Summary: Relaxed assertion checking related to incremental_collection_failed flag to allow for ExplicitGCInvokesConcurrent behaviour where we do not want a failing scavenge to bail to a stop-world collection. Parameterized incremental_collection_will_fail() so we can selectively use, or not use, as appropriate, the statistical prediction at specific use sites. This essentially reverts the scavenge bail-out logic to what it was prior to some recent changes that had inadvertently started using the statistical prediction which can be noisy in the presence of bursty loads. Added some associated verbose non-product debugging messages.
Reviewed-by: johnc, tonyp
author | ysr |
---|---|
date | Tue, 07 Dec 2010 21:55:53 -0800 |
parents | f95d63e2154a |
children | 08eb13460b3a |
rev | line source |
---|---|
0 | 1 /* |
1972 | 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1489
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1489
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1489
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef SHARE_VM_OPTO_PHASEX_HPP |
26 #define SHARE_VM_OPTO_PHASEX_HPP | |
27 | |
28 #include "libadt/dict.hpp" | |
29 #include "libadt/vectset.hpp" | |
30 #include "memory/resourceArea.hpp" | |
31 #include "opto/memnode.hpp" | |
32 #include "opto/node.hpp" | |
33 #include "opto/phase.hpp" | |
34 #include "opto/type.hpp" | |
35 | |
0 | 36 class Compile; |
37 class ConINode; | |
38 class ConLNode; | |
39 class Node; | |
40 class Type; | |
41 class PhaseTransform; | |
42 class PhaseGVN; | |
43 class PhaseIterGVN; | |
44 class PhaseCCP; | |
45 class PhasePeephole; | |
46 class PhaseRegAlloc; | |
47 | |
48 | |
49 //----------------------------------------------------------------------------- | |
50 // Expandable closed hash-table of nodes, initialized to NULL. | |
51 // Note that the constructor just zeros things | |
52 // Storage is reclaimed when the Arena's lifetime is over. | |
53 class NodeHash : public StackObj { | |
54 protected: | |
55 Arena *_a; // Arena to allocate in | |
56 uint _max; // Size of table (power of 2) | |
57 uint _inserts; // For grow and debug, count of hash_inserts | |
58 uint _insert_limit; // 'grow' when _inserts reaches _insert_limit | |
59 Node **_table; // Hash table of Node pointers | |
60 Node *_sentinel; // Replaces deleted entries in hash table | |
61 | |
62 public: | |
63 NodeHash(uint est_max_size); | |
64 NodeHash(Arena *arena, uint est_max_size); | |
65 NodeHash(NodeHash *use_this_state); | |
66 #ifdef ASSERT | |
67 ~NodeHash(); // Unlock all nodes upon destruction of table. | |
68 void operator=(const NodeHash&); // Unlock all nodes upon replacement of table. | |
69 #endif | |
70 Node *hash_find(const Node*);// Find an equivalent version in hash table | |
71 Node *hash_find_insert(Node*);// If not in table insert else return found node | |
72 void hash_insert(Node*); // Insert into hash table | |
73 bool hash_delete(const Node*);// Replace with _sentinel in hash table | |
74 void check_grow() { | |
75 _inserts++; | |
76 if( _inserts == _insert_limit ) { grow(); } | |
77 assert( _inserts <= _insert_limit, "hash table overflow"); | |
78 assert( _inserts < _max, "hash table overflow" ); | |
79 } | |
80 static uint round_up(uint); // Round up to nearest power of 2 | |
81 void grow(); // Grow _table to next power of 2 and rehash | |
82 // Return 75% of _max, rounded up. | |
83 uint insert_limit() const { return _max - (_max>>2); } | |
84 | |
85 void clear(); // Set all entries to NULL, keep storage. | |
86 // Size of hash table | |
87 uint size() const { return _max; } | |
88 // Return Node* at index in table | |
89 Node *at(uint table_index) { | |
90 assert(table_index < _max, "Must be within table"); | |
91 return _table[table_index]; | |
92 } | |
93 | |
94 void remove_useless_nodes(VectorSet &useful); // replace with sentinel | |
95 | |
96 Node *sentinel() { return _sentinel; } | |
97 | |
98 #ifndef PRODUCT | |
99 Node *find_index(uint idx); // For debugging | |
100 void dump(); // For debugging, dump statistics | |
101 #endif | |
102 uint _grows; // For debugging, count of table grow()s | |
103 uint _look_probes; // For debugging, count of hash probes | |
104 uint _lookup_hits; // For debugging, count of hash_finds | |
105 uint _lookup_misses; // For debugging, count of hash_finds | |
106 uint _insert_probes; // For debugging, count of hash probes | |
107 uint _delete_probes; // For debugging, count of hash probes for deletes | |
108 uint _delete_hits; // For debugging, count of hash probes for deletes | |
109 uint _delete_misses; // For debugging, count of hash probes for deletes | |
110 uint _total_inserts; // For debugging, total inserts into hash table | |
111 uint _total_insert_probes; // For debugging, total probes while inserting | |
112 }; | |
113 | |
114 | |
115 //----------------------------------------------------------------------------- | |
116 // Map dense integer indices to Types. Uses classic doubling-array trick. | |
117 // Abstractly provides an infinite array of Type*'s, initialized to NULL. | |
118 // Note that the constructor just zeros things, and since I use Arena | |
119 // allocation I do not need a destructor to reclaim storage. | |
120 // Despite the general name, this class is customized for use by PhaseTransform. | |
121 class Type_Array : public StackObj { | |
122 Arena *_a; // Arena to allocate in | |
123 uint _max; | |
124 const Type **_types; | |
125 void grow( uint i ); // Grow array node to fit | |
126 const Type *operator[] ( uint i ) const // Lookup, or NULL for not mapped | |
127 { return (i<_max) ? _types[i] : (Type*)NULL; } | |
128 friend class PhaseTransform; | |
129 public: | |
130 Type_Array(Arena *a) : _a(a), _max(0), _types(0) {} | |
131 Type_Array(Type_Array *ta) : _a(ta->_a), _max(ta->_max), _types(ta->_types) { } | |
132 const Type *fast_lookup(uint i) const{assert(i<_max,"oob");return _types[i];} | |
133 // Extend the mapping: index i maps to Type *n. | |
134 void map( uint i, const Type *n ) { if( i>=_max ) grow(i); _types[i] = n; } | |
135 uint Size() const { return _max; } | |
136 #ifndef PRODUCT | |
137 void dump() const; | |
138 #endif | |
139 }; | |
140 | |
141 | |
142 //------------------------------PhaseRemoveUseless----------------------------- | |
143 // Remove useless nodes from GVN hash-table, worklist, and graph | |
144 class PhaseRemoveUseless : public Phase { | |
145 protected: | |
146 Unique_Node_List _useful; // Nodes reachable from root | |
147 // list is allocated from current resource area | |
148 public: | |
149 PhaseRemoveUseless( PhaseGVN *gvn, Unique_Node_List *worklist ); | |
150 | |
151 Unique_Node_List *get_useful() { return &_useful; } | |
152 }; | |
153 | |
154 | |
155 //------------------------------PhaseTransform--------------------------------- | |
156 // Phases that analyze, then transform. Constructing the Phase object does any | |
157 // global or slow analysis. The results are cached later for a fast | |
158 // transformation pass. When the Phase object is deleted the cached analysis | |
159 // results are deleted. | |
160 class PhaseTransform : public Phase { | |
161 protected: | |
162 Arena* _arena; | |
163 Node_Array _nodes; // Map old node indices to new nodes. | |
164 Type_Array _types; // Map old node indices to Types. | |
165 | |
166 // ConNode caches: | |
167 enum { _icon_min = -1 * HeapWordSize, | |
168 _icon_max = 16 * HeapWordSize, | |
169 _lcon_min = _icon_min, | |
170 _lcon_max = _icon_max, | |
171 _zcon_max = (uint)T_CONFLICT | |
172 }; | |
173 ConINode* _icons[_icon_max - _icon_min + 1]; // cached jint constant nodes | |
174 ConLNode* _lcons[_lcon_max - _lcon_min + 1]; // cached jlong constant nodes | |
175 ConNode* _zcons[_zcon_max + 1]; // cached is_zero_type nodes | |
176 void init_con_caches(); | |
177 | |
178 // Support both int and long caches because either might be an intptr_t, | |
179 // so they show up frequently in address computations. | |
180 | |
181 public: | |
182 PhaseTransform( PhaseNumber pnum ); | |
183 PhaseTransform( Arena *arena, PhaseNumber pnum ); | |
184 PhaseTransform( PhaseTransform *phase, PhaseNumber pnum ); | |
185 | |
186 Arena* arena() { return _arena; } | |
187 Type_Array& types() { return _types; } | |
188 // _nodes is used in varying ways by subclasses, which define local accessors | |
189 | |
190 public: | |
191 // Get a previously recorded type for the node n. | |
192 // This type must already have been recorded. | |
193 // If you want the type of a very new (untransformed) node, | |
194 // you must use type_or_null, and test the result for NULL. | |
195 const Type* type(const Node* n) const { | |
196 const Type* t = _types.fast_lookup(n->_idx); | |
197 assert(t != NULL, "must set before get"); | |
198 return t; | |
199 } | |
200 // Get a previously recorded type for the node n, | |
201 // or else return NULL if there is none. | |
202 const Type* type_or_null(const Node* n) const { | |
203 return _types.fast_lookup(n->_idx); | |
204 } | |
205 // Record a type for a node. | |
206 void set_type(const Node* n, const Type *t) { | |
207 assert(t != NULL, "type must not be null"); | |
208 _types.map(n->_idx, t); | |
209 } | |
210 // Record an initial type for a node, the node's bottom type. | |
211 void set_type_bottom(const Node* n) { | |
212 // Use this for initialization when bottom_type() (or better) is not handy. | |
213 // Usually the initialization shoudl be to n->Value(this) instead, | |
214 // or a hand-optimized value like Type::MEMORY or Type::CONTROL. | |
215 assert(_types[n->_idx] == NULL, "must set the initial type just once"); | |
216 _types.map(n->_idx, n->bottom_type()); | |
217 } | |
218 // Make sure the types array is big enough to record a size for the node n. | |
219 // (In product builds, we never want to do range checks on the types array!) | |
220 void ensure_type_or_null(const Node* n) { | |
221 if (n->_idx >= _types.Size()) | |
222 _types.map(n->_idx, NULL); // Grow the types array as needed. | |
223 } | |
224 | |
225 // Utility functions: | |
226 const TypeInt* find_int_type( Node* n); | |
227 const TypeLong* find_long_type(Node* n); | |
228 jint find_int_con( Node* n, jint value_if_unknown) { | |
229 const TypeInt* t = find_int_type(n); | |
230 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; | |
231 } | |
232 jlong find_long_con(Node* n, jlong value_if_unknown) { | |
233 const TypeLong* t = find_long_type(n); | |
234 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; | |
235 } | |
236 | |
237 // Make an idealized constant, i.e., one of ConINode, ConPNode, ConFNode, etc. | |
238 // Same as transform(ConNode::make(t)). | |
239 ConNode* makecon(const Type* t); | |
240 virtual ConNode* uncached_makecon(const Type* t) // override in PhaseValues | |
241 { ShouldNotCallThis(); return NULL; } | |
242 | |
243 // Fast int or long constant. Same as TypeInt::make(i) or TypeLong::make(l). | |
244 ConINode* intcon(jint i); | |
245 ConLNode* longcon(jlong l); | |
246 | |
247 // Fast zero or null constant. Same as makecon(Type::get_zero_type(bt)). | |
248 ConNode* zerocon(BasicType bt); | |
249 | |
250 // Return a node which computes the same function as this node, but | |
251 // in a faster or cheaper fashion. | |
252 virtual Node *transform( Node *n ) = 0; | |
253 | |
254 // Return whether two Nodes are equivalent. | |
255 // Must not be recursive, since the recursive version is built from this. | |
256 // For pessimistic optimizations this is simply pointer equivalence. | |
257 bool eqv(const Node* n1, const Node* n2) const { return n1 == n2; } | |
258 | |
259 // Return whether two Nodes are equivalent, after stripping casting. | |
260 bool eqv_uncast(const Node* n1, const Node* n2) const { | |
261 return eqv(n1->uncast(), n2->uncast()); | |
262 } | |
263 | |
264 // For pessimistic passes, the return type must monotonically narrow. | |
265 // For optimistic passes, the return type must monotonically widen. | |
266 // It is possible to get into a "death march" in either type of pass, | |
267 // where the types are continually moving but it will take 2**31 or | |
268 // more steps to converge. This doesn't happen on most normal loops. | |
269 // | |
270 // Here is an example of a deadly loop for an optimistic pass, along | |
271 // with a partial trace of inferred types: | |
272 // x = phi(0,x'); L: x' = x+1; if (x' >= 0) goto L; | |
273 // 0 1 join([0..max], 1) | |
274 // [0..1] [1..2] join([0..max], [1..2]) | |
275 // [0..2] [1..3] join([0..max], [1..3]) | |
276 // ... ... ... | |
277 // [0..max] [min]u[1..max] join([0..max], [min..max]) | |
278 // [0..max] ==> fixpoint | |
279 // We would have proven, the hard way, that the iteration space is all | |
280 // non-negative ints, with the loop terminating due to 32-bit overflow. | |
281 // | |
282 // Here is the corresponding example for a pessimistic pass: | |
283 // x = phi(0,x'); L: x' = x-1; if (x' >= 0) goto L; | |
284 // int int join([0..max], int) | |
285 // [0..max] [-1..max-1] join([0..max], [-1..max-1]) | |
286 // [0..max-1] [-1..max-2] join([0..max], [-1..max-2]) | |
287 // ... ... ... | |
288 // [0..1] [-1..0] join([0..max], [-1..0]) | |
289 // 0 -1 join([0..max], -1) | |
290 // 0 == fixpoint | |
291 // We would have proven, the hard way, that the iteration space is {0}. | |
292 // (Usually, other optimizations will make the "if (x >= 0)" fold up | |
293 // before we get into trouble. But not always.) | |
294 // | |
295 // It's a pleasant thing to observe that the pessimistic pass | |
296 // will make short work of the optimistic pass's deadly loop, | |
297 // and vice versa. That is a good example of the complementary | |
298 // purposes of the CCP (optimistic) vs. GVN (pessimistic) phases. | |
299 // | |
300 // In any case, only widen or narrow a few times before going to the | |
301 // correct flavor of top or bottom. | |
302 // | |
303 // This call only needs to be made once as the data flows around any | |
304 // given cycle. We do it at Phis, and nowhere else. | |
305 // The types presented are the new type of a phi (computed by PhiNode::Value) | |
306 // and the previously computed type, last time the phi was visited. | |
307 // | |
308 // The third argument is upper limit for the saturated value, | |
309 // if the phase wishes to widen the new_type. | |
310 // If the phase is narrowing, the old type provides a lower limit. | |
311 // Caller guarantees that old_type and new_type are no higher than limit_type. | |
312 virtual const Type* saturate(const Type* new_type, const Type* old_type, | |
313 const Type* limit_type) const | |
314 { ShouldNotCallThis(); return NULL; } | |
315 | |
316 #ifndef PRODUCT | |
317 void dump_old2new_map() const; | |
318 void dump_new( uint new_lidx ) const; | |
319 void dump_types() const; | |
320 void dump_nodes_and_types(const Node *root, uint depth, bool only_ctrl = true); | |
321 void dump_nodes_and_types_recur( const Node *n, uint depth, bool only_ctrl, VectorSet &visited); | |
322 | |
323 uint _count_progress; // For profiling, count transforms that make progress | |
1489
cff162798819
6888953: some calls to function-like macros are missing semicolons
jcoomes
parents:
1080
diff
changeset
|
324 void set_progress() { ++_count_progress; assert( allow_progress(),"No progress allowed during verification"); } |
0 | 325 void clear_progress() { _count_progress = 0; } |
326 uint made_progress() const { return _count_progress; } | |
327 | |
328 uint _count_transforms; // For profiling, count transforms performed | |
329 void set_transforms() { ++_count_transforms; } | |
330 void clear_transforms() { _count_transforms = 0; } | |
331 uint made_transforms() const{ return _count_transforms; } | |
332 | |
333 bool _allow_progress; // progress not allowed during verification pass | |
334 void set_allow_progress(bool allow) { _allow_progress = allow; } | |
335 bool allow_progress() { return _allow_progress; } | |
336 #endif | |
337 }; | |
338 | |
339 //------------------------------PhaseValues------------------------------------ | |
340 // Phase infrastructure to support values | |
341 class PhaseValues : public PhaseTransform { | |
342 protected: | |
343 NodeHash _table; // Hash table for value-numbering | |
344 | |
345 public: | |
346 PhaseValues( Arena *arena, uint est_max_size ); | |
347 PhaseValues( PhaseValues *pt ); | |
348 PhaseValues( PhaseValues *ptv, const char *dummy ); | |
349 NOT_PRODUCT( ~PhaseValues(); ) | |
350 virtual PhaseIterGVN *is_IterGVN() { return 0; } | |
351 | |
352 // Some Ideal and other transforms delete --> modify --> insert values | |
353 bool hash_delete(Node *n) { return _table.hash_delete(n); } | |
354 void hash_insert(Node *n) { _table.hash_insert(n); } | |
355 Node *hash_find_insert(Node *n){ return _table.hash_find_insert(n); } | |
356 Node *hash_find(const Node *n) { return _table.hash_find(n); } | |
357 | |
358 // Used after parsing to eliminate values that are no longer in program | |
1080
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
948
diff
changeset
|
359 void remove_useless_nodes(VectorSet &useful) { |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
948
diff
changeset
|
360 _table.remove_useless_nodes(useful); |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
948
diff
changeset
|
361 // this may invalidate cached cons so reset the cache |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
948
diff
changeset
|
362 init_con_caches(); |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
948
diff
changeset
|
363 } |
0 | 364 |
365 virtual ConNode* uncached_makecon(const Type* t); // override from PhaseTransform | |
366 | |
367 virtual const Type* saturate(const Type* new_type, const Type* old_type, | |
368 const Type* limit_type) const | |
369 { return new_type; } | |
370 | |
371 #ifndef PRODUCT | |
372 uint _count_new_values; // For profiling, count new values produced | |
373 void inc_new_values() { ++_count_new_values; } | |
374 void clear_new_values() { _count_new_values = 0; } | |
375 uint made_new_values() const { return _count_new_values; } | |
376 #endif | |
377 }; | |
378 | |
379 | |
380 //------------------------------PhaseGVN--------------------------------------- | |
381 // Phase for performing local, pessimistic GVN-style optimizations. | |
382 class PhaseGVN : public PhaseValues { | |
383 public: | |
384 PhaseGVN( Arena *arena, uint est_max_size ) : PhaseValues( arena, est_max_size ) {} | |
385 PhaseGVN( PhaseGVN *gvn ) : PhaseValues( gvn ) {} | |
386 PhaseGVN( PhaseGVN *gvn, const char *dummy ) : PhaseValues( gvn, dummy ) {} | |
387 | |
388 // Return a node which computes the same function as this node, but | |
389 // in a faster or cheaper fashion. | |
390 Node *transform( Node *n ); | |
391 Node *transform_no_reclaim( Node *n ); | |
392 | |
393 // Check for a simple dead loop when a data node references itself. | |
394 DEBUG_ONLY(void dead_loop_check(Node *n);) | |
395 }; | |
396 | |
397 //------------------------------PhaseIterGVN----------------------------------- | |
398 // Phase for iteratively performing local, pessimistic GVN-style optimizations. | |
399 // and ideal transformations on the graph. | |
400 class PhaseIterGVN : public PhaseGVN { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
73
diff
changeset
|
401 private: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
73
diff
changeset
|
402 bool _delay_transform; // When true simply register the node when calling transform |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
73
diff
changeset
|
403 // instead of actually optimizing it |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
73
diff
changeset
|
404 |
0 | 405 // Idealize old Node 'n' with respect to its inputs and its value |
406 virtual Node *transform_old( Node *a_node ); | |
1621
6027dddc26c6
6677629: PhaseIterGVN::subsume_node() should call hash_delete() and add_users_to_worklist()
kvn
parents:
1552
diff
changeset
|
407 |
6027dddc26c6
6677629: PhaseIterGVN::subsume_node() should call hash_delete() and add_users_to_worklist()
kvn
parents:
1552
diff
changeset
|
408 // Subsume users of node 'old' into node 'nn' |
6027dddc26c6
6677629: PhaseIterGVN::subsume_node() should call hash_delete() and add_users_to_worklist()
kvn
parents:
1552
diff
changeset
|
409 void subsume_node( Node *old, Node *nn ); |
6027dddc26c6
6677629: PhaseIterGVN::subsume_node() should call hash_delete() and add_users_to_worklist()
kvn
parents:
1552
diff
changeset
|
410 |
0 | 411 protected: |
412 | |
413 // Idealize new Node 'n' with respect to its inputs and its value | |
414 virtual Node *transform( Node *a_node ); | |
415 | |
416 // Warm up hash table, type table and initial worklist | |
417 void init_worklist( Node *a_root ); | |
418 | |
419 virtual const Type* saturate(const Type* new_type, const Type* old_type, | |
420 const Type* limit_type) const; | |
421 // Usually returns new_type. Returns old_type if new_type is only a slight | |
422 // improvement, such that it would take many (>>10) steps to reach 2**32. | |
423 | |
424 public: | |
425 PhaseIterGVN( PhaseIterGVN *igvn ); // Used by CCP constructor | |
426 PhaseIterGVN( PhaseGVN *gvn ); // Used after Parser | |
427 PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ); // Used after +VerifyOpto | |
428 | |
429 virtual PhaseIterGVN *is_IterGVN() { return this; } | |
430 | |
431 Unique_Node_List _worklist; // Iterative worklist | |
432 | |
433 // Given def-use info and an initial worklist, apply Node::Ideal, | |
434 // Node::Value, Node::Identity, hash-based value numbering, Node::Ideal_DU | |
435 // and dominator info to a fixed point. | |
436 void optimize(); | |
437 | |
438 // Register a new node with the iter GVN pass without transforming it. | |
439 // Used when we need to restructure a Region/Phi area and all the Regions | |
440 // and Phis need to complete this one big transform before any other | |
441 // transforms can be triggered on the region. | |
442 // Optional 'orig' is an earlier version of this node. | |
443 // It is significant only for debugging and profiling. | |
444 Node* register_new_node_with_optimizer(Node* n, Node* orig = NULL); | |
445 | |
446 // Kill a globally dead Node. It is allowed to have uses which are | |
447 // assumed dead and left 'in limbo'. | |
448 void remove_globally_dead_node( Node *dead ); | |
449 | |
450 // Kill all inputs to a dead node, recursively making more dead nodes. | |
451 // The Node must be dead locally, i.e., have no uses. | |
452 void remove_dead_node( Node *dead ) { | |
453 assert(dead->outcnt() == 0 && !dead->is_top(), "node must be dead"); | |
454 remove_globally_dead_node(dead); | |
455 } | |
456 | |
457 // Add users of 'n' to worklist | |
458 void add_users_to_worklist0( Node *n ); | |
459 void add_users_to_worklist ( Node *n ); | |
460 | |
73
a8880a78d355
6259129: (Escape Analysis) scalar replacement for not escaping objects
kvn
parents:
0
diff
changeset
|
461 // Replace old node with new one. |
a8880a78d355
6259129: (Escape Analysis) scalar replacement for not escaping objects
kvn
parents:
0
diff
changeset
|
462 void replace_node( Node *old, Node *nn ) { |
a8880a78d355
6259129: (Escape Analysis) scalar replacement for not escaping objects
kvn
parents:
0
diff
changeset
|
463 add_users_to_worklist(old); |
1621
6027dddc26c6
6677629: PhaseIterGVN::subsume_node() should call hash_delete() and add_users_to_worklist()
kvn
parents:
1552
diff
changeset
|
464 hash_delete(old); // Yank from hash before hacking edges |
73
a8880a78d355
6259129: (Escape Analysis) scalar replacement for not escaping objects
kvn
parents:
0
diff
changeset
|
465 subsume_node(old, nn); |
a8880a78d355
6259129: (Escape Analysis) scalar replacement for not escaping objects
kvn
parents:
0
diff
changeset
|
466 } |
a8880a78d355
6259129: (Escape Analysis) scalar replacement for not escaping objects
kvn
parents:
0
diff
changeset
|
467 |
851
fc4be448891f
6851742: (EA) allocation elimination doesn't work with UseG1GC
kvn
parents:
196
diff
changeset
|
468 bool delay_transform() const { return _delay_transform; } |
fc4be448891f
6851742: (EA) allocation elimination doesn't work with UseG1GC
kvn
parents:
196
diff
changeset
|
469 |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
73
diff
changeset
|
470 void set_delay_transform(bool delay) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
73
diff
changeset
|
471 _delay_transform = delay; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
73
diff
changeset
|
472 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
73
diff
changeset
|
473 |
0 | 474 #ifndef PRODUCT |
475 protected: | |
476 // Sub-quadratic implementation of VerifyIterativeGVN. | |
477 unsigned long _verify_counter; | |
478 unsigned long _verify_full_passes; | |
479 enum { _verify_window_size = 30 }; | |
480 Node* _verify_window[_verify_window_size]; | |
481 void verify_step(Node* n); | |
482 #endif | |
483 }; | |
484 | |
485 //------------------------------PhaseCCP--------------------------------------- | |
486 // Phase for performing global Conditional Constant Propagation. | |
487 // Should be replaced with combined CCP & GVN someday. | |
488 class PhaseCCP : public PhaseIterGVN { | |
489 // Non-recursive. Use analysis to transform single Node. | |
490 virtual Node *transform_once( Node *n ); | |
491 | |
492 public: | |
493 PhaseCCP( PhaseIterGVN *igvn ); // Compute conditional constants | |
494 NOT_PRODUCT( ~PhaseCCP(); ) | |
495 | |
496 // Worklist algorithm identifies constants | |
497 void analyze(); | |
498 // Recursive traversal of program. Used analysis to modify program. | |
499 virtual Node *transform( Node *n ); | |
500 // Do any transformation after analysis | |
501 void do_transform(); | |
502 | |
503 virtual const Type* saturate(const Type* new_type, const Type* old_type, | |
504 const Type* limit_type) const; | |
505 // Returns new_type->widen(old_type), which increments the widen bits until | |
506 // giving up with TypeInt::INT or TypeLong::LONG. | |
507 // Result is clipped to limit_type if necessary. | |
508 | |
509 #ifndef PRODUCT | |
510 static uint _total_invokes; // For profiling, count invocations | |
511 void inc_invokes() { ++PhaseCCP::_total_invokes; } | |
512 | |
513 static uint _total_constants; // For profiling, count constants found | |
514 uint _count_constants; | |
515 void clear_constants() { _count_constants = 0; } | |
516 void inc_constants() { ++_count_constants; } | |
517 uint count_constants() const { return _count_constants; } | |
518 | |
519 static void print_statistics(); | |
520 #endif | |
521 }; | |
522 | |
523 | |
524 //------------------------------PhasePeephole---------------------------------- | |
525 // Phase for performing peephole optimizations on register allocated basic blocks. | |
526 class PhasePeephole : public PhaseTransform { | |
527 PhaseRegAlloc *_regalloc; | |
528 PhaseCFG &_cfg; | |
529 // Recursive traversal of program. Pure function is unused in this phase | |
530 virtual Node *transform( Node *n ); | |
531 | |
532 public: | |
533 PhasePeephole( PhaseRegAlloc *regalloc, PhaseCFG &cfg ); | |
534 NOT_PRODUCT( ~PhasePeephole(); ) | |
535 | |
536 // Do any transformation after analysis | |
537 void do_transform(); | |
538 | |
539 #ifndef PRODUCT | |
540 static uint _total_peepholes; // For profiling, count peephole rules applied | |
541 uint _count_peepholes; | |
542 void clear_peepholes() { _count_peepholes = 0; } | |
543 void inc_peepholes() { ++_count_peepholes; } | |
544 uint count_peepholes() const { return _count_peepholes; } | |
545 | |
546 static void print_statistics(); | |
547 #endif | |
548 }; | |
1972 | 549 |
550 #endif // SHARE_VM_OPTO_PHASEX_HPP |