Mercurial > hg > graal-compiler
annotate src/share/vm/opto/node.hpp @ 1941:79d04223b8a5
Added caching for resolved types and resolved fields.
This is crucial, because the local load elimination will lead to wrong results, if field equality (of two RiField objects with the same object and the same RiType) is not given. The caching makes sure that the default equals implementation is sufficient.
author | Thomas Wuerthinger <wuerthinger@ssw.jku.at> |
---|---|
date | Tue, 28 Dec 2010 18:33:26 +0100 |
parents | c18cbe5936b8 |
children | f95d63e2154a |
rev | line source |
---|---|
0 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1100
diff
changeset
|
2 * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1100
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1100
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1100
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
25 // Portions of code courtesy of Clifford Click | |
26 | |
27 // Optimization - Graph Style | |
28 | |
29 | |
30 class AbstractLockNode; | |
31 class AddNode; | |
32 class AddPNode; | |
33 class AliasInfo; | |
34 class AllocateArrayNode; | |
35 class AllocateNode; | |
36 class Block; | |
37 class Block_Array; | |
38 class BoolNode; | |
39 class BoxLockNode; | |
40 class CMoveNode; | |
41 class CallDynamicJavaNode; | |
42 class CallJavaNode; | |
43 class CallLeafNode; | |
44 class CallNode; | |
45 class CallRuntimeNode; | |
46 class CallStaticJavaNode; | |
47 class CatchNode; | |
48 class CatchProjNode; | |
49 class CheckCastPPNode; | |
1100
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
1080
diff
changeset
|
50 class ClearArrayNode; |
0 | 51 class CmpNode; |
52 class CodeBuffer; | |
53 class ConstraintCastNode; | |
54 class ConNode; | |
55 class CountedLoopNode; | |
56 class CountedLoopEndNode; | |
168
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
57 class DecodeNNode; |
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
58 class EncodePNode; |
0 | 59 class FastLockNode; |
60 class FastUnlockNode; | |
61 class IfNode; | |
62 class InitializeNode; | |
63 class JVMState; | |
64 class JumpNode; | |
65 class JumpProjNode; | |
66 class LoadNode; | |
67 class LoadStoreNode; | |
68 class LockNode; | |
69 class LoopNode; | |
70 class MachCallDynamicJavaNode; | |
71 class MachCallJavaNode; | |
72 class MachCallLeafNode; | |
73 class MachCallNode; | |
74 class MachCallRuntimeNode; | |
75 class MachCallStaticJavaNode; | |
76 class MachIfNode; | |
77 class MachNode; | |
78 class MachNullCheckNode; | |
79 class MachReturnNode; | |
80 class MachSafePointNode; | |
81 class MachSpillCopyNode; | |
82 class MachTempNode; | |
83 class Matcher; | |
84 class MemBarNode; | |
85 class MemNode; | |
86 class MergeMemNode; | |
87 class MulNode; | |
88 class MultiNode; | |
89 class MultiBranchNode; | |
90 class NeverBranchNode; | |
91 class Node; | |
92 class Node_Array; | |
93 class Node_List; | |
94 class Node_Stack; | |
95 class NullCheckNode; | |
96 class OopMap; | |
33 | 97 class ParmNode; |
0 | 98 class PCTableNode; |
99 class PhaseCCP; | |
100 class PhaseGVN; | |
101 class PhaseIterGVN; | |
102 class PhaseRegAlloc; | |
103 class PhaseTransform; | |
104 class PhaseValues; | |
105 class PhiNode; | |
106 class Pipeline; | |
107 class ProjNode; | |
108 class RegMask; | |
109 class RegionNode; | |
110 class RootNode; | |
111 class SafePointNode; | |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
40
diff
changeset
|
112 class SafePointScalarObjectNode; |
0 | 113 class StartNode; |
114 class State; | |
115 class StoreNode; | |
116 class SubNode; | |
117 class Type; | |
118 class TypeNode; | |
119 class UnlockNode; | |
120 class VectorSet; | |
121 class IfTrueNode; | |
122 class IfFalseNode; | |
123 typedef void (*NFunc)(Node&,void*); | |
124 extern "C" { | |
125 typedef int (*C_sort_func_t)(const void *, const void *); | |
126 } | |
127 | |
128 // The type of all node counts and indexes. | |
129 // It must hold at least 16 bits, but must also be fast to load and store. | |
130 // This type, if less than 32 bits, could limit the number of possible nodes. | |
131 // (To make this type platform-specific, move to globalDefinitions_xxx.hpp.) | |
132 typedef unsigned int node_idx_t; | |
133 | |
134 | |
135 #ifndef OPTO_DU_ITERATOR_ASSERT | |
136 #ifdef ASSERT | |
137 #define OPTO_DU_ITERATOR_ASSERT 1 | |
138 #else | |
139 #define OPTO_DU_ITERATOR_ASSERT 0 | |
140 #endif | |
141 #endif //OPTO_DU_ITERATOR_ASSERT | |
142 | |
143 #if OPTO_DU_ITERATOR_ASSERT | |
144 class DUIterator; | |
145 class DUIterator_Fast; | |
146 class DUIterator_Last; | |
147 #else | |
148 typedef uint DUIterator; | |
149 typedef Node** DUIterator_Fast; | |
150 typedef Node** DUIterator_Last; | |
151 #endif | |
152 | |
153 // Node Sentinel | |
154 #define NodeSentinel (Node*)-1 | |
155 | |
156 // Unknown count frequency | |
157 #define COUNT_UNKNOWN (-1.0f) | |
158 | |
159 //------------------------------Node------------------------------------------- | |
160 // Nodes define actions in the program. They create values, which have types. | |
161 // They are both vertices in a directed graph and program primitives. Nodes | |
162 // are labeled; the label is the "opcode", the primitive function in the lambda | |
163 // calculus sense that gives meaning to the Node. Node inputs are ordered (so | |
164 // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to | |
165 // the Node's function. These inputs also define a Type equation for the Node. | |
166 // Solving these Type equations amounts to doing dataflow analysis. | |
167 // Control and data are uniformly represented in the graph. Finally, Nodes | |
168 // have a unique dense integer index which is used to index into side arrays | |
169 // whenever I have phase-specific information. | |
170 | |
171 class Node { | |
172 // Lots of restrictions on cloning Nodes | |
173 Node(const Node&); // not defined; linker error to use these | |
174 Node &operator=(const Node &rhs); | |
175 | |
176 public: | |
177 friend class Compile; | |
178 #if OPTO_DU_ITERATOR_ASSERT | |
179 friend class DUIterator_Common; | |
180 friend class DUIterator; | |
181 friend class DUIterator_Fast; | |
182 friend class DUIterator_Last; | |
183 #endif | |
184 | |
185 // Because Nodes come and go, I define an Arena of Node structures to pull | |
186 // from. This should allow fast access to node creation & deletion. This | |
187 // field is a local cache of a value defined in some "program fragment" for | |
188 // which these Nodes are just a part of. | |
189 | |
190 // New Operator that takes a Compile pointer, this will eventually | |
191 // be the "new" New operator. | |
192 inline void* operator new( size_t x, Compile* C) { | |
193 Node* n = (Node*)C->node_arena()->Amalloc_D(x); | |
194 #ifdef ASSERT | |
195 n->_in = (Node**)n; // magic cookie for assertion check | |
196 #endif | |
197 n->_out = (Node**)C; | |
198 return (void*)n; | |
199 } | |
200 | |
201 // New Operator that takes a Compile pointer, this will eventually | |
202 // be the "new" New operator. | |
203 inline void* operator new( size_t x, Compile* C, int y) { | |
204 Node* n = (Node*)C->node_arena()->Amalloc_D(x + y*sizeof(void*)); | |
205 n->_in = (Node**)(((char*)n) + x); | |
206 #ifdef ASSERT | |
207 n->_in[y-1] = n; // magic cookie for assertion check | |
208 #endif | |
209 n->_out = (Node**)C; | |
210 return (void*)n; | |
211 } | |
212 | |
213 // Delete is a NOP | |
214 void operator delete( void *ptr ) {} | |
215 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage | |
216 void destruct(); | |
217 | |
218 // Create a new Node. Required is the number is of inputs required for | |
219 // semantic correctness. | |
220 Node( uint required ); | |
221 | |
222 // Create a new Node with given input edges. | |
223 // This version requires use of the "edge-count" new. | |
224 // E.g. new (C,3) FooNode( C, NULL, left, right ); | |
225 Node( Node *n0 ); | |
226 Node( Node *n0, Node *n1 ); | |
227 Node( Node *n0, Node *n1, Node *n2 ); | |
228 Node( Node *n0, Node *n1, Node *n2, Node *n3 ); | |
229 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 ); | |
230 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 ); | |
231 Node( Node *n0, Node *n1, Node *n2, Node *n3, | |
232 Node *n4, Node *n5, Node *n6 ); | |
233 | |
234 // Clone an inherited Node given only the base Node type. | |
235 Node* clone() const; | |
236 | |
237 // Clone a Node, immediately supplying one or two new edges. | |
238 // The first and second arguments, if non-null, replace in(1) and in(2), | |
239 // respectively. | |
240 Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const { | |
241 Node* nn = clone(); | |
242 if (in1 != NULL) nn->set_req(1, in1); | |
243 if (in2 != NULL) nn->set_req(2, in2); | |
244 return nn; | |
245 } | |
246 | |
247 private: | |
248 // Shared setup for the above constructors. | |
249 // Handles all interactions with Compile::current. | |
250 // Puts initial values in all Node fields except _idx. | |
251 // Returns the initial value for _idx, which cannot | |
252 // be initialized by assignment. | |
253 inline int Init(int req, Compile* C); | |
254 | |
255 //----------------- input edge handling | |
256 protected: | |
257 friend class PhaseCFG; // Access to address of _in array elements | |
258 Node **_in; // Array of use-def references to Nodes | |
259 Node **_out; // Array of def-use references to Nodes | |
260 | |
605 | 261 // Input edges are split into two categories. Required edges are required |
0 | 262 // for semantic correctness; order is important and NULLs are allowed. |
263 // Precedence edges are used to help determine execution order and are | |
264 // added, e.g., for scheduling purposes. They are unordered and not | |
265 // duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1 | |
266 // are required, from _cnt to _max-1 are precedence edges. | |
267 node_idx_t _cnt; // Total number of required Node inputs. | |
268 | |
269 node_idx_t _max; // Actual length of input array. | |
270 | |
271 // Output edges are an unordered list of def-use edges which exactly | |
272 // correspond to required input edges which point from other nodes | |
273 // to this one. Thus the count of the output edges is the number of | |
274 // users of this node. | |
275 node_idx_t _outcnt; // Total number of Node outputs. | |
276 | |
277 node_idx_t _outmax; // Actual length of output array. | |
278 | |
279 // Grow the actual input array to the next larger power-of-2 bigger than len. | |
280 void grow( uint len ); | |
281 // Grow the output array to the next larger power-of-2 bigger than len. | |
282 void out_grow( uint len ); | |
283 | |
284 public: | |
285 // Each Node is assigned a unique small/dense number. This number is used | |
286 // to index into auxiliary arrays of data and bitvectors. | |
287 // It is declared const to defend against inadvertant assignment, | |
288 // since it is used by clients as a naked field. | |
289 const node_idx_t _idx; | |
290 | |
291 // Get the (read-only) number of input edges | |
292 uint req() const { return _cnt; } | |
293 uint len() const { return _max; } | |
294 // Get the (read-only) number of output edges | |
295 uint outcnt() const { return _outcnt; } | |
296 | |
297 #if OPTO_DU_ITERATOR_ASSERT | |
298 // Iterate over the out-edges of this node. Deletions are illegal. | |
299 inline DUIterator outs() const; | |
300 // Use this when the out array might have changed to suppress asserts. | |
301 inline DUIterator& refresh_out_pos(DUIterator& i) const; | |
302 // Does the node have an out at this position? (Used for iteration.) | |
303 inline bool has_out(DUIterator& i) const; | |
304 inline Node* out(DUIterator& i) const; | |
305 // Iterate over the out-edges of this node. All changes are illegal. | |
306 inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const; | |
307 inline Node* fast_out(DUIterator_Fast& i) const; | |
308 // Iterate over the out-edges of this node, deleting one at a time. | |
309 inline DUIterator_Last last_outs(DUIterator_Last& min) const; | |
310 inline Node* last_out(DUIterator_Last& i) const; | |
311 // The inline bodies of all these methods are after the iterator definitions. | |
312 #else | |
313 // Iterate over the out-edges of this node. Deletions are illegal. | |
314 // This iteration uses integral indexes, to decouple from array reallocations. | |
315 DUIterator outs() const { return 0; } | |
316 // Use this when the out array might have changed to suppress asserts. | |
317 DUIterator refresh_out_pos(DUIterator i) const { return i; } | |
318 | |
319 // Reference to the i'th output Node. Error if out of bounds. | |
320 Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; } | |
321 // Does the node have an out at this position? (Used for iteration.) | |
322 bool has_out(DUIterator i) const { return i < _outcnt; } | |
323 | |
324 // Iterate over the out-edges of this node. All changes are illegal. | |
325 // This iteration uses a pointer internal to the out array. | |
326 DUIterator_Fast fast_outs(DUIterator_Fast& max) const { | |
327 Node** out = _out; | |
328 // Assign a limit pointer to the reference argument: | |
329 max = out + (ptrdiff_t)_outcnt; | |
330 // Return the base pointer: | |
331 return out; | |
332 } | |
333 Node* fast_out(DUIterator_Fast i) const { return *i; } | |
334 // Iterate over the out-edges of this node, deleting one at a time. | |
335 // This iteration uses a pointer internal to the out array. | |
336 DUIterator_Last last_outs(DUIterator_Last& min) const { | |
337 Node** out = _out; | |
338 // Assign a limit pointer to the reference argument: | |
339 min = out; | |
340 // Return the pointer to the start of the iteration: | |
341 return out + (ptrdiff_t)_outcnt - 1; | |
342 } | |
343 Node* last_out(DUIterator_Last i) const { return *i; } | |
344 #endif | |
345 | |
346 // Reference to the i'th input Node. Error if out of bounds. | |
347 Node* in(uint i) const { assert(i < _max,"oob"); return _in[i]; } | |
348 // Reference to the i'th output Node. Error if out of bounds. | |
349 // Use this accessor sparingly. We are going trying to use iterators instead. | |
350 Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; } | |
351 // Return the unique out edge. | |
352 Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; } | |
353 // Delete out edge at position 'i' by moving last out edge to position 'i' | |
354 void raw_del_out(uint i) { | |
355 assert(i < _outcnt,"oob"); | |
356 assert(_outcnt > 0,"oob"); | |
357 #if OPTO_DU_ITERATOR_ASSERT | |
358 // Record that a change happened here. | |
359 debug_only(_last_del = _out[i]; ++_del_tick); | |
360 #endif | |
361 _out[i] = _out[--_outcnt]; | |
362 // Smash the old edge so it can't be used accidentally. | |
363 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); | |
364 } | |
365 | |
366 #ifdef ASSERT | |
367 bool is_dead() const; | |
368 #define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead())) | |
369 #endif | |
370 | |
371 // Set a required input edge, also updates corresponding output edge | |
372 void add_req( Node *n ); // Append a NEW required input | |
373 void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n). | |
374 void del_req( uint idx ); // Delete required edge & compact | |
375 void ins_req( uint i, Node *n ); // Insert a NEW required input | |
376 void set_req( uint i, Node *n ) { | |
377 assert( is_not_dead(n), "can not use dead node"); | |
378 assert( i < _cnt, "oob"); | |
379 assert( !VerifyHashTableKeys || _hash_lock == 0, | |
380 "remove node from hash table before modifying it"); | |
381 Node** p = &_in[i]; // cache this._in, across the del_out call | |
382 if (*p != NULL) (*p)->del_out((Node *)this); | |
383 (*p) = n; | |
384 if (n != NULL) n->add_out((Node *)this); | |
385 } | |
386 // Light version of set_req() to init inputs after node creation. | |
387 void init_req( uint i, Node *n ) { | |
388 assert( i == 0 && this == n || | |
389 is_not_dead(n), "can not use dead node"); | |
390 assert( i < _cnt, "oob"); | |
391 assert( !VerifyHashTableKeys || _hash_lock == 0, | |
392 "remove node from hash table before modifying it"); | |
393 assert( _in[i] == NULL, "sanity"); | |
394 _in[i] = n; | |
395 if (n != NULL) n->add_out((Node *)this); | |
396 } | |
397 // Find first occurrence of n among my edges: | |
398 int find_edge(Node* n); | |
399 int replace_edge(Node* old, Node* neww); | |
400 // NULL out all inputs to eliminate incoming Def-Use edges. | |
401 // Return the number of edges between 'n' and 'this' | |
402 int disconnect_inputs(Node *n); | |
403 | |
404 // Quickly, return true if and only if I am Compile::current()->top(). | |
405 bool is_top() const { | |
406 assert((this == (Node*) Compile::current()->top()) == (_out == NULL), ""); | |
407 return (_out == NULL); | |
408 } | |
409 // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.) | |
410 void setup_is_top(); | |
411 | |
412 // Strip away casting. (It is depth-limited.) | |
413 Node* uncast() const; | |
414 | |
415 private: | |
416 static Node* uncast_helper(const Node* n); | |
417 | |
418 // Add an output edge to the end of the list | |
419 void add_out( Node *n ) { | |
420 if (is_top()) return; | |
421 if( _outcnt == _outmax ) out_grow(_outcnt); | |
422 _out[_outcnt++] = n; | |
423 } | |
424 // Delete an output edge | |
425 void del_out( Node *n ) { | |
426 if (is_top()) return; | |
427 Node** outp = &_out[_outcnt]; | |
428 // Find and remove n | |
429 do { | |
430 assert(outp > _out, "Missing Def-Use edge"); | |
431 } while (*--outp != n); | |
432 *outp = _out[--_outcnt]; | |
433 // Smash the old edge so it can't be used accidentally. | |
434 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); | |
435 // Record that a change happened here. | |
436 #if OPTO_DU_ITERATOR_ASSERT | |
437 debug_only(_last_del = n; ++_del_tick); | |
438 #endif | |
439 } | |
440 | |
441 public: | |
442 // Globally replace this node by a given new node, updating all uses. | |
443 void replace_by(Node* new_node); | |
168
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
444 // Globally replace this node by a given new node, updating all uses |
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
445 // and cutting input edges of old node. |
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
446 void subsume_by(Node* new_node) { |
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
447 replace_by(new_node); |
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
448 disconnect_inputs(NULL); |
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
449 } |
0 | 450 void set_req_X( uint i, Node *n, PhaseIterGVN *igvn ); |
451 // Find the one non-null required input. RegionNode only | |
452 Node *nonnull_req() const; | |
453 // Add or remove precedence edges | |
454 void add_prec( Node *n ); | |
455 void rm_prec( uint i ); | |
456 void set_prec( uint i, Node *n ) { | |
457 assert( is_not_dead(n), "can not use dead node"); | |
458 assert( i >= _cnt, "not a precedence edge"); | |
459 if (_in[i] != NULL) _in[i]->del_out((Node *)this); | |
460 _in[i] = n; | |
461 if (n != NULL) n->add_out((Node *)this); | |
462 } | |
463 // Set this node's index, used by cisc_version to replace current node | |
464 void set_idx(uint new_idx) { | |
465 const node_idx_t* ref = &_idx; | |
466 *(node_idx_t*)ref = new_idx; | |
467 } | |
468 // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.) | |
469 void swap_edges(uint i1, uint i2) { | |
470 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); | |
471 // Def-Use info is unchanged | |
472 Node* n1 = in(i1); | |
473 Node* n2 = in(i2); | |
474 _in[i1] = n2; | |
475 _in[i2] = n1; | |
476 // If this node is in the hash table, make sure it doesn't need a rehash. | |
477 assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code"); | |
478 } | |
479 | |
480 // Iterators over input Nodes for a Node X are written as: | |
481 // for( i = 0; i < X.req(); i++ ) ... X[i] ... | |
482 // NOTE: Required edges can contain embedded NULL pointers. | |
483 | |
484 //----------------- Other Node Properties | |
485 | |
486 // Generate class id for some ideal nodes to avoid virtual query | |
487 // methods is_<Node>(). | |
488 // Class id is the set of bits corresponded to the node class and all its | |
489 // super classes so that queries for super classes are also valid. | |
490 // Subclasses of the same super class have different assigned bit | |
491 // (the third parameter in the macro DEFINE_CLASS_ID). | |
492 // Classes with deeper hierarchy are declared first. | |
493 // Classes with the same hierarchy depth are sorted by usage frequency. | |
494 // | |
495 // The query method masks the bits to cut off bits of subclasses | |
496 // and then compare the result with the class id | |
497 // (see the macro DEFINE_CLASS_QUERY below). | |
498 // | |
499 // Class_MachCall=30, ClassMask_MachCall=31 | |
500 // 12 8 4 0 | |
501 // 0 0 0 0 0 0 0 0 1 1 1 1 0 | |
502 // | | | | | |
503 // | | | Bit_Mach=2 | |
504 // | | Bit_MachReturn=4 | |
505 // | Bit_MachSafePoint=8 | |
506 // Bit_MachCall=16 | |
507 // | |
508 // Class_CountedLoop=56, ClassMask_CountedLoop=63 | |
509 // 12 8 4 0 | |
510 // 0 0 0 0 0 0 0 1 1 1 0 0 0 | |
511 // | | | | |
512 // | | Bit_Region=8 | |
513 // | Bit_Loop=16 | |
514 // Bit_CountedLoop=32 | |
515 | |
516 #define DEFINE_CLASS_ID(cl, supcl, subn) \ | |
517 Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \ | |
518 Class_##cl = Class_##supcl + Bit_##cl , \ | |
519 ClassMask_##cl = ((Bit_##cl << 1) - 1) , | |
520 | |
521 // This enum is used only for C2 ideal and mach nodes with is_<node>() methods | |
522 // so that it's values fits into 16 bits. | |
523 enum NodeClasses { | |
524 Bit_Node = 0x0000, | |
525 Class_Node = 0x0000, | |
526 ClassMask_Node = 0xFFFF, | |
527 | |
528 DEFINE_CLASS_ID(Multi, Node, 0) | |
529 DEFINE_CLASS_ID(SafePoint, Multi, 0) | |
530 DEFINE_CLASS_ID(Call, SafePoint, 0) | |
531 DEFINE_CLASS_ID(CallJava, Call, 0) | |
532 DEFINE_CLASS_ID(CallStaticJava, CallJava, 0) | |
533 DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1) | |
534 DEFINE_CLASS_ID(CallRuntime, Call, 1) | |
535 DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0) | |
536 DEFINE_CLASS_ID(Allocate, Call, 2) | |
537 DEFINE_CLASS_ID(AllocateArray, Allocate, 0) | |
538 DEFINE_CLASS_ID(AbstractLock, Call, 3) | |
539 DEFINE_CLASS_ID(Lock, AbstractLock, 0) | |
540 DEFINE_CLASS_ID(Unlock, AbstractLock, 1) | |
541 DEFINE_CLASS_ID(MultiBranch, Multi, 1) | |
542 DEFINE_CLASS_ID(PCTable, MultiBranch, 0) | |
543 DEFINE_CLASS_ID(Catch, PCTable, 0) | |
544 DEFINE_CLASS_ID(Jump, PCTable, 1) | |
545 DEFINE_CLASS_ID(If, MultiBranch, 1) | |
546 DEFINE_CLASS_ID(CountedLoopEnd, If, 0) | |
547 DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2) | |
548 DEFINE_CLASS_ID(Start, Multi, 2) | |
549 DEFINE_CLASS_ID(MemBar, Multi, 3) | |
550 DEFINE_CLASS_ID(Initialize, MemBar, 0) | |
551 | |
552 DEFINE_CLASS_ID(Mach, Node, 1) | |
553 DEFINE_CLASS_ID(MachReturn, Mach, 0) | |
554 DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0) | |
555 DEFINE_CLASS_ID(MachCall, MachSafePoint, 0) | |
556 DEFINE_CLASS_ID(MachCallJava, MachCall, 0) | |
557 DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0) | |
558 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1) | |
559 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1) | |
560 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0) | |
561 DEFINE_CLASS_ID(MachSpillCopy, Mach, 1) | |
562 DEFINE_CLASS_ID(MachNullCheck, Mach, 2) | |
563 DEFINE_CLASS_ID(MachIf, Mach, 3) | |
564 DEFINE_CLASS_ID(MachTemp, Mach, 4) | |
565 | |
566 DEFINE_CLASS_ID(Proj, Node, 2) | |
567 DEFINE_CLASS_ID(CatchProj, Proj, 0) | |
568 DEFINE_CLASS_ID(JumpProj, Proj, 1) | |
569 DEFINE_CLASS_ID(IfTrue, Proj, 2) | |
570 DEFINE_CLASS_ID(IfFalse, Proj, 3) | |
33 | 571 DEFINE_CLASS_ID(Parm, Proj, 4) |
0 | 572 |
573 DEFINE_CLASS_ID(Region, Node, 3) | |
574 DEFINE_CLASS_ID(Loop, Region, 0) | |
575 DEFINE_CLASS_ID(Root, Loop, 0) | |
576 DEFINE_CLASS_ID(CountedLoop, Loop, 1) | |
577 | |
578 DEFINE_CLASS_ID(Sub, Node, 4) | |
579 DEFINE_CLASS_ID(Cmp, Sub, 0) | |
580 DEFINE_CLASS_ID(FastLock, Cmp, 0) | |
581 DEFINE_CLASS_ID(FastUnlock, Cmp, 1) | |
582 | |
583 DEFINE_CLASS_ID(Type, Node, 5) | |
584 DEFINE_CLASS_ID(Phi, Type, 0) | |
585 DEFINE_CLASS_ID(ConstraintCast, Type, 1) | |
586 DEFINE_CLASS_ID(CheckCastPP, Type, 2) | |
587 DEFINE_CLASS_ID(CMove, Type, 3) | |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
40
diff
changeset
|
588 DEFINE_CLASS_ID(SafePointScalarObject, Type, 4) |
168
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
589 DEFINE_CLASS_ID(DecodeN, Type, 5) |
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
590 DEFINE_CLASS_ID(EncodeP, Type, 6) |
0 | 591 |
592 DEFINE_CLASS_ID(Mem, Node, 6) | |
593 DEFINE_CLASS_ID(Load, Mem, 0) | |
594 DEFINE_CLASS_ID(Store, Mem, 1) | |
595 DEFINE_CLASS_ID(LoadStore, Mem, 2) | |
596 | |
597 DEFINE_CLASS_ID(MergeMem, Node, 7) | |
598 DEFINE_CLASS_ID(Bool, Node, 8) | |
599 DEFINE_CLASS_ID(AddP, Node, 9) | |
600 DEFINE_CLASS_ID(BoxLock, Node, 10) | |
601 DEFINE_CLASS_ID(Add, Node, 11) | |
602 DEFINE_CLASS_ID(Mul, Node, 12) | |
1100
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
1080
diff
changeset
|
603 DEFINE_CLASS_ID(ClearArray, Node, 13) |
0 | 604 |
1100
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
1080
diff
changeset
|
605 _max_classes = ClassMask_ClearArray |
0 | 606 }; |
607 #undef DEFINE_CLASS_ID | |
608 | |
609 // Flags are sorted by usage frequency. | |
610 enum NodeFlags { | |
611 Flag_is_Copy = 0x01, // should be first bit to avoid shift | |
612 Flag_is_Call = Flag_is_Copy << 1, | |
613 Flag_rematerialize = Flag_is_Call << 1, | |
614 Flag_needs_anti_dependence_check = Flag_rematerialize << 1, | |
615 Flag_is_macro = Flag_needs_anti_dependence_check << 1, | |
616 Flag_is_Con = Flag_is_macro << 1, | |
617 Flag_is_cisc_alternate = Flag_is_Con << 1, | |
618 Flag_is_Branch = Flag_is_cisc_alternate << 1, | |
619 Flag_is_block_start = Flag_is_Branch << 1, | |
620 Flag_is_Goto = Flag_is_block_start << 1, | |
621 Flag_is_dead_loop_safe = Flag_is_Goto << 1, | |
622 Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1, | |
623 Flag_is_safepoint_node = Flag_may_be_short_branch << 1, | |
624 Flag_is_pc_relative = Flag_is_safepoint_node << 1, | |
625 Flag_is_Vector = Flag_is_pc_relative << 1, | |
626 _max_flags = (Flag_is_Vector << 1) - 1 // allow flags combination | |
627 }; | |
628 | |
629 private: | |
630 jushort _class_id; | |
631 jushort _flags; | |
632 | |
633 protected: | |
634 // These methods should be called from constructors only. | |
635 void init_class_id(jushort c) { | |
636 assert(c <= _max_classes, "invalid node class"); | |
637 _class_id = c; // cast out const | |
638 } | |
639 void init_flags(jushort fl) { | |
640 assert(fl <= _max_flags, "invalid node flag"); | |
641 _flags |= fl; | |
642 } | |
643 void clear_flag(jushort fl) { | |
644 assert(fl <= _max_flags, "invalid node flag"); | |
645 _flags &= ~fl; | |
646 } | |
647 | |
648 public: | |
649 const jushort class_id() const { return _class_id; } | |
650 | |
651 const jushort flags() const { return _flags; } | |
652 | |
653 // Return a dense integer opcode number | |
654 virtual int Opcode() const; | |
655 | |
656 // Virtual inherited Node size | |
657 virtual uint size_of() const; | |
658 | |
659 // Other interesting Node properties | |
660 | |
661 // Special case: is_Call() returns true for both CallNode and MachCallNode. | |
662 bool is_Call() const { | |
663 return (_flags & Flag_is_Call) != 0; | |
664 } | |
665 | |
1080
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
666 CallNode* isa_Call() const { |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
667 return is_Call() ? as_Call() : NULL; |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
668 } |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
669 |
0 | 670 CallNode *as_Call() const { // Only for CallNode (not for MachCallNode) |
671 assert((_class_id & ClassMask_Call) == Class_Call, "invalid node class"); | |
672 return (CallNode*)this; | |
673 } | |
674 | |
1080
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
675 #define DEFINE_CLASS_QUERY(type) \ |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
676 bool is_##type() const { \ |
0 | 677 return ((_class_id & ClassMask_##type) == Class_##type); \ |
1080
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
678 } \ |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
679 type##Node *as_##type() const { \ |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
680 assert(is_##type(), "invalid node class"); \ |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
681 return (type##Node*)this; \ |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
682 } \ |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
683 type##Node* isa_##type() const { \ |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
684 return (is_##type()) ? as_##type() : NULL; \ |
0 | 685 } |
686 | |
687 DEFINE_CLASS_QUERY(AbstractLock) | |
688 DEFINE_CLASS_QUERY(Add) | |
689 DEFINE_CLASS_QUERY(AddP) | |
690 DEFINE_CLASS_QUERY(Allocate) | |
691 DEFINE_CLASS_QUERY(AllocateArray) | |
692 DEFINE_CLASS_QUERY(Bool) | |
693 DEFINE_CLASS_QUERY(BoxLock) | |
694 DEFINE_CLASS_QUERY(CallDynamicJava) | |
695 DEFINE_CLASS_QUERY(CallJava) | |
696 DEFINE_CLASS_QUERY(CallLeaf) | |
697 DEFINE_CLASS_QUERY(CallRuntime) | |
698 DEFINE_CLASS_QUERY(CallStaticJava) | |
699 DEFINE_CLASS_QUERY(Catch) | |
700 DEFINE_CLASS_QUERY(CatchProj) | |
701 DEFINE_CLASS_QUERY(CheckCastPP) | |
702 DEFINE_CLASS_QUERY(ConstraintCast) | |
1100
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
1080
diff
changeset
|
703 DEFINE_CLASS_QUERY(ClearArray) |
0 | 704 DEFINE_CLASS_QUERY(CMove) |
705 DEFINE_CLASS_QUERY(Cmp) | |
706 DEFINE_CLASS_QUERY(CountedLoop) | |
707 DEFINE_CLASS_QUERY(CountedLoopEnd) | |
168
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
708 DEFINE_CLASS_QUERY(DecodeN) |
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
709 DEFINE_CLASS_QUERY(EncodeP) |
0 | 710 DEFINE_CLASS_QUERY(FastLock) |
711 DEFINE_CLASS_QUERY(FastUnlock) | |
712 DEFINE_CLASS_QUERY(If) | |
713 DEFINE_CLASS_QUERY(IfFalse) | |
714 DEFINE_CLASS_QUERY(IfTrue) | |
715 DEFINE_CLASS_QUERY(Initialize) | |
716 DEFINE_CLASS_QUERY(Jump) | |
717 DEFINE_CLASS_QUERY(JumpProj) | |
718 DEFINE_CLASS_QUERY(Load) | |
719 DEFINE_CLASS_QUERY(LoadStore) | |
720 DEFINE_CLASS_QUERY(Lock) | |
721 DEFINE_CLASS_QUERY(Loop) | |
722 DEFINE_CLASS_QUERY(Mach) | |
723 DEFINE_CLASS_QUERY(MachCall) | |
724 DEFINE_CLASS_QUERY(MachCallDynamicJava) | |
725 DEFINE_CLASS_QUERY(MachCallJava) | |
726 DEFINE_CLASS_QUERY(MachCallLeaf) | |
727 DEFINE_CLASS_QUERY(MachCallRuntime) | |
728 DEFINE_CLASS_QUERY(MachCallStaticJava) | |
729 DEFINE_CLASS_QUERY(MachIf) | |
730 DEFINE_CLASS_QUERY(MachNullCheck) | |
731 DEFINE_CLASS_QUERY(MachReturn) | |
732 DEFINE_CLASS_QUERY(MachSafePoint) | |
733 DEFINE_CLASS_QUERY(MachSpillCopy) | |
734 DEFINE_CLASS_QUERY(MachTemp) | |
735 DEFINE_CLASS_QUERY(Mem) | |
736 DEFINE_CLASS_QUERY(MemBar) | |
737 DEFINE_CLASS_QUERY(MergeMem) | |
738 DEFINE_CLASS_QUERY(Mul) | |
739 DEFINE_CLASS_QUERY(Multi) | |
740 DEFINE_CLASS_QUERY(MultiBranch) | |
33 | 741 DEFINE_CLASS_QUERY(Parm) |
0 | 742 DEFINE_CLASS_QUERY(PCTable) |
743 DEFINE_CLASS_QUERY(Phi) | |
744 DEFINE_CLASS_QUERY(Proj) | |
745 DEFINE_CLASS_QUERY(Region) | |
746 DEFINE_CLASS_QUERY(Root) | |
747 DEFINE_CLASS_QUERY(SafePoint) | |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
40
diff
changeset
|
748 DEFINE_CLASS_QUERY(SafePointScalarObject) |
0 | 749 DEFINE_CLASS_QUERY(Start) |
750 DEFINE_CLASS_QUERY(Store) | |
751 DEFINE_CLASS_QUERY(Sub) | |
752 DEFINE_CLASS_QUERY(Type) | |
753 DEFINE_CLASS_QUERY(Unlock) | |
754 | |
755 #undef DEFINE_CLASS_QUERY | |
756 | |
757 // duplicate of is_MachSpillCopy() | |
758 bool is_SpillCopy () const { | |
759 return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy); | |
760 } | |
761 | |
762 bool is_Con () const { return (_flags & Flag_is_Con) != 0; } | |
763 bool is_Goto() const { return (_flags & Flag_is_Goto) != 0; } | |
764 // The data node which is safe to leave in dead loop during IGVN optimization. | |
765 bool is_dead_loop_safe() const { | |
126
72f4a668df19
6625997: CastPP, CheckCastPP and Proj nodes are not dead loop safe
kvn
parents:
119
diff
changeset
|
766 return is_Phi() || (is_Proj() && in(0) == NULL) || |
72f4a668df19
6625997: CastPP, CheckCastPP and Proj nodes are not dead loop safe
kvn
parents:
119
diff
changeset
|
767 ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0 && |
72f4a668df19
6625997: CastPP, CheckCastPP and Proj nodes are not dead loop safe
kvn
parents:
119
diff
changeset
|
768 (!is_Proj() || !in(0)->is_Allocate())); |
0 | 769 } |
770 | |
771 // is_Copy() returns copied edge index (0 or 1) | |
772 uint is_Copy() const { return (_flags & Flag_is_Copy); } | |
773 | |
774 virtual bool is_CFG() const { return false; } | |
775 | |
776 // If this node is control-dependent on a test, can it be | |
777 // rerouted to a dominating equivalent test? This is usually | |
778 // true of non-CFG nodes, but can be false for operations which | |
779 // depend for their correct sequencing on more than one test. | |
780 // (In that case, hoisting to a dominating test may silently | |
781 // skip some other important test.) | |
782 virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; }; | |
783 | |
784 // defined for MachNodes that match 'If' | 'Goto' | 'CountedLoopEnd' | |
785 bool is_Branch() const { return (_flags & Flag_is_Branch) != 0; } | |
786 | |
787 // When building basic blocks, I need to have a notion of block beginning | |
788 // Nodes, next block selector Nodes (block enders), and next block | |
789 // projections. These calls need to work on their machine equivalents. The | |
790 // Ideal beginning Nodes are RootNode, RegionNode and StartNode. | |
791 bool is_block_start() const { | |
792 if ( is_Region() ) | |
793 return this == (const Node*)in(0); | |
794 else | |
795 return (_flags & Flag_is_block_start) != 0; | |
796 } | |
797 | |
798 // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root, | |
799 // Goto and Return. This call also returns the block ending Node. | |
800 virtual const Node *is_block_proj() const; | |
801 | |
802 // The node is a "macro" node which needs to be expanded before matching | |
803 bool is_macro() const { return (_flags & Flag_is_macro) != 0; } | |
804 | |
805 // Value is a vector of primitive values | |
806 bool is_Vector() const { return (_flags & Flag_is_Vector) != 0; } | |
807 | |
808 //----------------- Optimization | |
809 | |
810 // Get the worst-case Type output for this Node. | |
811 virtual const class Type *bottom_type() const; | |
812 | |
813 // If we find a better type for a node, try to record it permanently. | |
814 // Return true if this node actually changed. | |
815 // Be sure to do the hash_delete game in the "rehash" variant. | |
816 void raise_bottom_type(const Type* new_type); | |
817 | |
818 // Get the address type with which this node uses and/or defs memory, | |
819 // or NULL if none. The address type is conservatively wide. | |
820 // Returns non-null for calls, membars, loads, stores, etc. | |
821 // Returns TypePtr::BOTTOM if the node touches memory "broadly". | |
822 virtual const class TypePtr *adr_type() const { return NULL; } | |
823 | |
824 // Return an existing node which computes the same function as this node. | |
825 // The optimistic combined algorithm requires this to return a Node which | |
826 // is a small number of steps away (e.g., one of my inputs). | |
827 virtual Node *Identity( PhaseTransform *phase ); | |
828 | |
829 // Return the set of values this Node can take on at runtime. | |
830 virtual const Type *Value( PhaseTransform *phase ) const; | |
831 | |
832 // Return a node which is more "ideal" than the current node. | |
833 // The invariants on this call are subtle. If in doubt, read the | |
834 // treatise in node.cpp above the default implemention AND TEST WITH | |
835 // +VerifyIterativeGVN! | |
836 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
837 | |
838 // Some nodes have specific Ideal subgraph transformations only if they are | |
839 // unique users of specific nodes. Such nodes should be put on IGVN worklist | |
840 // for the transformations to happen. | |
841 bool has_special_unique_user() const; | |
842 | |
119
d1a5218d7eaf
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
113
diff
changeset
|
843 // Skip Proj and CatchProj nodes chains. Check for Null and Top. |
d1a5218d7eaf
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
113
diff
changeset
|
844 Node* find_exact_control(Node* ctrl); |
d1a5218d7eaf
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
113
diff
changeset
|
845 |
d1a5218d7eaf
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
113
diff
changeset
|
846 // Check if 'this' node dominates or equal to 'sub'. |
d1a5218d7eaf
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
113
diff
changeset
|
847 bool dominates(Node* sub, Node_List &nlist); |
d1a5218d7eaf
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
113
diff
changeset
|
848 |
0 | 849 protected: |
850 bool remove_dead_region(PhaseGVN *phase, bool can_reshape); | |
851 public: | |
852 | |
853 // Idealize graph, using DU info. Done after constant propagation | |
854 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); | |
855 | |
856 // See if there is valid pipeline info | |
857 static const Pipeline *pipeline_class(); | |
858 virtual const Pipeline *pipeline() const; | |
859 | |
860 // Compute the latency from the def to this instruction of the ith input node | |
861 uint latency(uint i); | |
862 | |
863 // Hash & compare functions, for pessimistic value numbering | |
864 | |
865 // If the hash function returns the special sentinel value NO_HASH, | |
866 // the node is guaranteed never to compare equal to any other node. | |
605 | 867 // If we accidentally generate a hash with value NO_HASH the node |
0 | 868 // won't go into the table and we'll lose a little optimization. |
869 enum { NO_HASH = 0 }; | |
870 virtual uint hash() const; | |
871 virtual uint cmp( const Node &n ) const; | |
872 | |
873 // Operation appears to be iteratively computed (such as an induction variable) | |
874 // It is possible for this operation to return false for a loop-varying | |
875 // value, if it appears (by local graph inspection) to be computed by a simple conditional. | |
876 bool is_iteratively_computed(); | |
877 | |
878 // Determine if a node is Counted loop induction variable. | |
879 // The method is defined in loopnode.cpp. | |
880 const Node* is_loop_iv() const; | |
881 | |
882 // Return a node with opcode "opc" and same inputs as "this" if one can | |
883 // be found; Otherwise return NULL; | |
884 Node* find_similar(int opc); | |
885 | |
886 // Return the unique control out if only one. Null if none or more than one. | |
887 Node* unique_ctrl_out(); | |
888 | |
889 //----------------- Code Generation | |
890 | |
891 // Ideal register class for Matching. Zero means unmatched instruction | |
892 // (these are cloned instead of converted to machine nodes). | |
893 virtual uint ideal_reg() const; | |
894 | |
895 static const uint NotAMachineReg; // must be > max. machine register | |
896 | |
897 // Do we Match on this edge index or not? Generally false for Control | |
898 // and true for everything else. Weird for calls & returns. | |
899 virtual uint match_edge(uint idx) const; | |
900 | |
901 // Register class output is returned in | |
902 virtual const RegMask &out_RegMask() const; | |
903 // Register class input is expected in | |
904 virtual const RegMask &in_RegMask(uint) const; | |
905 // Should we clone rather than spill this instruction? | |
906 bool rematerialize() const; | |
907 | |
908 // Return JVM State Object if this Node carries debug info, or NULL otherwise | |
909 virtual JVMState* jvms() const; | |
910 | |
911 // Print as assembly | |
912 virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const; | |
913 // Emit bytes starting at parameter 'ptr' | |
914 // Bump 'ptr' by the number of output bytes | |
915 virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const; | |
916 // Size of instruction in bytes | |
917 virtual uint size(PhaseRegAlloc *ra_) const; | |
918 | |
919 // Convenience function to extract an integer constant from a node. | |
920 // If it is not an integer constant (either Con, CastII, or Mach), | |
921 // return value_if_unknown. | |
922 jint find_int_con(jint value_if_unknown) const { | |
923 const TypeInt* t = find_int_type(); | |
924 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; | |
925 } | |
926 // Return the constant, knowing it is an integer constant already | |
927 jint get_int() const { | |
928 const TypeInt* t = find_int_type(); | |
929 guarantee(t != NULL, "must be con"); | |
930 return t->get_con(); | |
931 } | |
932 // Here's where the work is done. Can produce non-constant int types too. | |
933 const TypeInt* find_int_type() const; | |
934 | |
935 // Same thing for long (and intptr_t, via type.hpp): | |
936 jlong get_long() const { | |
937 const TypeLong* t = find_long_type(); | |
938 guarantee(t != NULL, "must be con"); | |
939 return t->get_con(); | |
940 } | |
941 jlong find_long_con(jint value_if_unknown) const { | |
942 const TypeLong* t = find_long_type(); | |
943 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; | |
944 } | |
945 const TypeLong* find_long_type() const; | |
946 | |
947 // These guys are called by code generated by ADLC: | |
948 intptr_t get_ptr() const; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
65
diff
changeset
|
949 intptr_t get_narrowcon() const; |
0 | 950 jdouble getd() const; |
951 jfloat getf() const; | |
952 | |
953 // Nodes which are pinned into basic blocks | |
954 virtual bool pinned() const { return false; } | |
955 | |
956 // Nodes which use memory without consuming it, hence need antidependences | |
957 // More specifically, needs_anti_dependence_check returns true iff the node | |
958 // (a) does a load, and (b) does not perform a store (except perhaps to a | |
959 // stack slot or some other unaliased location). | |
960 bool needs_anti_dependence_check() const; | |
961 | |
962 // Return which operand this instruction may cisc-spill. In other words, | |
963 // return operand position that can convert from reg to memory access | |
964 virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; } | |
965 bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; } | |
966 | |
967 //----------------- Graph walking | |
968 public: | |
969 // Walk and apply member functions recursively. | |
970 // Supplied (this) pointer is root. | |
971 void walk(NFunc pre, NFunc post, void *env); | |
972 static void nop(Node &, void*); // Dummy empty function | |
973 static void packregion( Node &n, void* ); | |
974 private: | |
975 void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited); | |
976 | |
977 //----------------- Printing, etc | |
978 public: | |
979 #ifndef PRODUCT | |
980 Node* find(int idx) const; // Search the graph for the given idx. | |
981 Node* find_ctrl(int idx) const; // Search control ancestors for the given idx. | |
982 void dump() const; // Print this node, | |
983 void dump(int depth) const; // Print this node, recursively to depth d | |
984 void dump_ctrl(int depth) const; // Print control nodes, to depth d | |
985 virtual void dump_req() const; // Print required-edge info | |
986 virtual void dump_prec() const; // Print precedence-edge info | |
987 virtual void dump_out() const; // Print the output edge info | |
988 virtual void dump_spec(outputStream *st) const {}; // Print per-node info | |
989 void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges | |
990 void verify() const; // Check Def-Use info for my subgraph | |
991 static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space); | |
992 | |
993 // This call defines a class-unique string used to identify class instances | |
994 virtual const char *Name() const; | |
995 | |
996 void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...) | |
997 // RegMask Print Functions | |
998 void dump_in_regmask(int idx) { in_RegMask(idx).dump(); } | |
999 void dump_out_regmask() { out_RegMask().dump(); } | |
1000 static int _in_dump_cnt; | |
1001 static bool in_dump() { return _in_dump_cnt > 0; } | |
1002 void fast_dump() const { | |
1003 tty->print("%4d: %-17s", _idx, Name()); | |
1004 for (uint i = 0; i < len(); i++) | |
1005 if (in(i)) | |
1006 tty->print(" %4d", in(i)->_idx); | |
1007 else | |
1008 tty->print(" NULL"); | |
1009 tty->print("\n"); | |
1010 } | |
1011 #endif | |
1012 #ifdef ASSERT | |
1013 void verify_construction(); | |
1014 bool verify_jvms(const JVMState* jvms) const; | |
1015 int _debug_idx; // Unique value assigned to every node. | |
1016 int debug_idx() const { return _debug_idx; } | |
1017 void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; } | |
1018 | |
1019 Node* _debug_orig; // Original version of this, if any. | |
1020 Node* debug_orig() const { return _debug_orig; } | |
1021 void set_debug_orig(Node* orig); // _debug_orig = orig | |
1022 | |
1023 int _hash_lock; // Barrier to modifications of nodes in the hash table | |
1024 void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); } | |
1025 void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); } | |
1026 | |
1027 static void init_NodeProperty(); | |
1028 | |
1029 #if OPTO_DU_ITERATOR_ASSERT | |
1030 const Node* _last_del; // The last deleted node. | |
1031 uint _del_tick; // Bumped when a deletion happens.. | |
1032 #endif | |
1033 #endif | |
1034 }; | |
1035 | |
1036 //----------------------------------------------------------------------------- | |
1037 // Iterators over DU info, and associated Node functions. | |
1038 | |
1039 #if OPTO_DU_ITERATOR_ASSERT | |
1040 | |
1041 // Common code for assertion checking on DU iterators. | |
1042 class DUIterator_Common VALUE_OBJ_CLASS_SPEC { | |
1043 #ifdef ASSERT | |
1044 protected: | |
1045 bool _vdui; // cached value of VerifyDUIterators | |
1046 const Node* _node; // the node containing the _out array | |
1047 uint _outcnt; // cached node->_outcnt | |
1048 uint _del_tick; // cached node->_del_tick | |
1049 Node* _last; // last value produced by the iterator | |
1050 | |
1051 void sample(const Node* node); // used by c'tor to set up for verifies | |
1052 void verify(const Node* node, bool at_end_ok = false); | |
1053 void verify_resync(); | |
1054 void reset(const DUIterator_Common& that); | |
1055 | |
1056 // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators | |
1057 #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } } | |
1058 #else | |
1059 #define I_VDUI_ONLY(i,x) { } | |
1060 #endif //ASSERT | |
1061 }; | |
1062 | |
1063 #define VDUI_ONLY(x) I_VDUI_ONLY(*this, x) | |
1064 | |
1065 // Default DU iterator. Allows appends onto the out array. | |
1066 // Allows deletion from the out array only at the current point. | |
1067 // Usage: | |
1068 // for (DUIterator i = x->outs(); x->has_out(i); i++) { | |
1069 // Node* y = x->out(i); | |
1070 // ... | |
1071 // } | |
1072 // Compiles in product mode to a unsigned integer index, which indexes | |
1073 // onto a repeatedly reloaded base pointer of x->_out. The loop predicate | |
1074 // also reloads x->_outcnt. If you delete, you must perform "--i" just | |
1075 // before continuing the loop. You must delete only the last-produced | |
1076 // edge. You must delete only a single copy of the last-produced edge, | |
1077 // or else you must delete all copies at once (the first time the edge | |
1078 // is produced by the iterator). | |
1079 class DUIterator : public DUIterator_Common { | |
1080 friend class Node; | |
1081 | |
1082 // This is the index which provides the product-mode behavior. | |
1083 // Whatever the product-mode version of the system does to the | |
1084 // DUI index is done to this index. All other fields in | |
1085 // this class are used only for assertion checking. | |
1086 uint _idx; | |
1087 | |
1088 #ifdef ASSERT | |
1089 uint _refresh_tick; // Records the refresh activity. | |
1090 | |
1091 void sample(const Node* node); // Initialize _refresh_tick etc. | |
1092 void verify(const Node* node, bool at_end_ok = false); | |
1093 void verify_increment(); // Verify an increment operation. | |
1094 void verify_resync(); // Verify that we can back up over a deletion. | |
1095 void verify_finish(); // Verify that the loop terminated properly. | |
1096 void refresh(); // Resample verification info. | |
1097 void reset(const DUIterator& that); // Resample after assignment. | |
1098 #endif | |
1099 | |
1100 DUIterator(const Node* node, int dummy_to_avoid_conversion) | |
1101 { _idx = 0; debug_only(sample(node)); } | |
1102 | |
1103 public: | |
1104 // initialize to garbage; clear _vdui to disable asserts | |
1105 DUIterator() | |
1106 { /*initialize to garbage*/ debug_only(_vdui = false); } | |
1107 | |
1108 void operator++(int dummy_to_specify_postfix_op) | |
1109 { _idx++; VDUI_ONLY(verify_increment()); } | |
1110 | |
1111 void operator--() | |
1112 { VDUI_ONLY(verify_resync()); --_idx; } | |
1113 | |
1114 ~DUIterator() | |
1115 { VDUI_ONLY(verify_finish()); } | |
1116 | |
1117 void operator=(const DUIterator& that) | |
1118 { _idx = that._idx; debug_only(reset(that)); } | |
1119 }; | |
1120 | |
1121 DUIterator Node::outs() const | |
1122 { return DUIterator(this, 0); } | |
1123 DUIterator& Node::refresh_out_pos(DUIterator& i) const | |
1124 { I_VDUI_ONLY(i, i.refresh()); return i; } | |
1125 bool Node::has_out(DUIterator& i) const | |
1126 { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; } | |
1127 Node* Node::out(DUIterator& i) const | |
1128 { I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; } | |
1129 | |
1130 | |
1131 // Faster DU iterator. Disallows insertions into the out array. | |
1132 // Allows deletion from the out array only at the current point. | |
1133 // Usage: | |
1134 // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) { | |
1135 // Node* y = x->fast_out(i); | |
1136 // ... | |
1137 // } | |
1138 // Compiles in product mode to raw Node** pointer arithmetic, with | |
1139 // no reloading of pointers from the original node x. If you delete, | |
1140 // you must perform "--i; --imax" just before continuing the loop. | |
1141 // If you delete multiple copies of the same edge, you must decrement | |
1142 // imax, but not i, multiple times: "--i, imax -= num_edges". | |
1143 class DUIterator_Fast : public DUIterator_Common { | |
1144 friend class Node; | |
1145 friend class DUIterator_Last; | |
1146 | |
1147 // This is the pointer which provides the product-mode behavior. | |
1148 // Whatever the product-mode version of the system does to the | |
1149 // DUI pointer is done to this pointer. All other fields in | |
1150 // this class are used only for assertion checking. | |
1151 Node** _outp; | |
1152 | |
1153 #ifdef ASSERT | |
1154 void verify(const Node* node, bool at_end_ok = false); | |
1155 void verify_limit(); | |
1156 void verify_resync(); | |
1157 void verify_relimit(uint n); | |
1158 void reset(const DUIterator_Fast& that); | |
1159 #endif | |
1160 | |
1161 // Note: offset must be signed, since -1 is sometimes passed | |
1162 DUIterator_Fast(const Node* node, ptrdiff_t offset) | |
1163 { _outp = node->_out + offset; debug_only(sample(node)); } | |
1164 | |
1165 public: | |
1166 // initialize to garbage; clear _vdui to disable asserts | |
1167 DUIterator_Fast() | |
1168 { /*initialize to garbage*/ debug_only(_vdui = false); } | |
1169 | |
1170 void operator++(int dummy_to_specify_postfix_op) | |
1171 { _outp++; VDUI_ONLY(verify(_node, true)); } | |
1172 | |
1173 void operator--() | |
1174 { VDUI_ONLY(verify_resync()); --_outp; } | |
1175 | |
1176 void operator-=(uint n) // applied to the limit only | |
1177 { _outp -= n; VDUI_ONLY(verify_relimit(n)); } | |
1178 | |
1179 bool operator<(DUIterator_Fast& limit) { | |
1180 I_VDUI_ONLY(*this, this->verify(_node, true)); | |
1181 I_VDUI_ONLY(limit, limit.verify_limit()); | |
1182 return _outp < limit._outp; | |
1183 } | |
1184 | |
1185 void operator=(const DUIterator_Fast& that) | |
1186 { _outp = that._outp; debug_only(reset(that)); } | |
1187 }; | |
1188 | |
1189 DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const { | |
1190 // Assign a limit pointer to the reference argument: | |
1191 imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt); | |
1192 // Return the base pointer: | |
1193 return DUIterator_Fast(this, 0); | |
1194 } | |
1195 Node* Node::fast_out(DUIterator_Fast& i) const { | |
1196 I_VDUI_ONLY(i, i.verify(this)); | |
1197 return debug_only(i._last=) *i._outp; | |
1198 } | |
1199 | |
1200 | |
1201 // Faster DU iterator. Requires each successive edge to be removed. | |
1202 // Does not allow insertion of any edges. | |
1203 // Usage: | |
1204 // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) { | |
1205 // Node* y = x->last_out(i); | |
1206 // ... | |
1207 // } | |
1208 // Compiles in product mode to raw Node** pointer arithmetic, with | |
1209 // no reloading of pointers from the original node x. | |
1210 class DUIterator_Last : private DUIterator_Fast { | |
1211 friend class Node; | |
1212 | |
1213 #ifdef ASSERT | |
1214 void verify(const Node* node, bool at_end_ok = false); | |
1215 void verify_limit(); | |
1216 void verify_step(uint num_edges); | |
1217 #endif | |
1218 | |
1219 // Note: offset must be signed, since -1 is sometimes passed | |
1220 DUIterator_Last(const Node* node, ptrdiff_t offset) | |
1221 : DUIterator_Fast(node, offset) { } | |
1222 | |
1223 void operator++(int dummy_to_specify_postfix_op) {} // do not use | |
1224 void operator<(int) {} // do not use | |
1225 | |
1226 public: | |
1227 DUIterator_Last() { } | |
1228 // initialize to garbage | |
1229 | |
1230 void operator--() | |
1231 { _outp--; VDUI_ONLY(verify_step(1)); } | |
1232 | |
1233 void operator-=(uint n) | |
1234 { _outp -= n; VDUI_ONLY(verify_step(n)); } | |
1235 | |
1236 bool operator>=(DUIterator_Last& limit) { | |
1237 I_VDUI_ONLY(*this, this->verify(_node, true)); | |
1238 I_VDUI_ONLY(limit, limit.verify_limit()); | |
1239 return _outp >= limit._outp; | |
1240 } | |
1241 | |
1242 void operator=(const DUIterator_Last& that) | |
1243 { DUIterator_Fast::operator=(that); } | |
1244 }; | |
1245 | |
1246 DUIterator_Last Node::last_outs(DUIterator_Last& imin) const { | |
1247 // Assign a limit pointer to the reference argument: | |
1248 imin = DUIterator_Last(this, 0); | |
1249 // Return the initial pointer: | |
1250 return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1); | |
1251 } | |
1252 Node* Node::last_out(DUIterator_Last& i) const { | |
1253 I_VDUI_ONLY(i, i.verify(this)); | |
1254 return debug_only(i._last=) *i._outp; | |
1255 } | |
1256 | |
1257 #endif //OPTO_DU_ITERATOR_ASSERT | |
1258 | |
1259 #undef I_VDUI_ONLY | |
1260 #undef VDUI_ONLY | |
1261 | |
1080
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1262 // An Iterator that truly follows the iterator pattern. Doesn't |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1263 // support deletion but could be made to. |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1264 // |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1265 // for (SimpleDUIterator i(n); i.has_next(); i.next()) { |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1266 // Node* m = i.get(); |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1267 // |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1268 class SimpleDUIterator : public StackObj { |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1269 private: |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1270 Node* node; |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1271 DUIterator_Fast i; |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1272 DUIterator_Fast imax; |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1273 public: |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1274 SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {} |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1275 bool has_next() { return i < imax; } |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1276 void next() { i++; } |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1277 Node* get() { return node->fast_out(i); } |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1278 }; |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1279 |
0 | 1280 |
1281 //----------------------------------------------------------------------------- | |
1282 // Map dense integer indices to Nodes. Uses classic doubling-array trick. | |
1283 // Abstractly provides an infinite array of Node*'s, initialized to NULL. | |
1284 // Note that the constructor just zeros things, and since I use Arena | |
1285 // allocation I do not need a destructor to reclaim storage. | |
1286 class Node_Array : public ResourceObj { | |
1287 protected: | |
1288 Arena *_a; // Arena to allocate in | |
1289 uint _max; | |
1290 Node **_nodes; | |
1291 void grow( uint i ); // Grow array node to fit | |
1292 public: | |
1293 Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) { | |
1294 _nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize ); | |
1295 for( int i = 0; i < OptoNodeListSize; i++ ) { | |
1296 _nodes[i] = NULL; | |
1297 } | |
1298 } | |
1299 | |
1300 Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {} | |
1301 Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped | |
1302 { return (i<_max) ? _nodes[i] : (Node*)NULL; } | |
1303 Node *at( uint i ) const { assert(i<_max,"oob"); return _nodes[i]; } | |
1304 Node **adr() { return _nodes; } | |
1305 // Extend the mapping: index i maps to Node *n. | |
1306 void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; } | |
1307 void insert( uint i, Node *n ); | |
1308 void remove( uint i ); // Remove, preserving order | |
1309 void sort( C_sort_func_t func); | |
1310 void reset( Arena *new_a ); // Zap mapping to empty; reclaim storage | |
1311 void clear(); // Set all entries to NULL, keep storage | |
1312 uint Size() const { return _max; } | |
1313 void dump() const; | |
1314 }; | |
1315 | |
1316 class Node_List : public Node_Array { | |
1317 uint _cnt; | |
1318 public: | |
1319 Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {} | |
1320 Node_List(Arena *a) : Node_Array(a), _cnt(0) {} | |
1080
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1321 bool contains(Node* n) { |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1322 for (uint e = 0; e < size(); e++) { |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1323 if (at(e) == n) return true; |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1324 } |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1325 return false; |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1326 } |
0 | 1327 void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; } |
1328 void remove( uint i ) { Node_Array::remove(i); _cnt--; } | |
1329 void push( Node *b ) { map(_cnt++,b); } | |
1330 void yank( Node *n ); // Find and remove | |
1331 Node *pop() { return _nodes[--_cnt]; } | |
1332 Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;} | |
1333 void clear() { _cnt = 0; Node_Array::clear(); } // retain storage | |
1334 uint size() const { return _cnt; } | |
1335 void dump() const; | |
1336 }; | |
1337 | |
1338 //------------------------------Unique_Node_List------------------------------- | |
1339 class Unique_Node_List : public Node_List { | |
1340 VectorSet _in_worklist; | |
1341 uint _clock_index; // Index in list where to pop from next | |
1342 public: | |
1343 Unique_Node_List() : Node_List(), _in_worklist(Thread::current()->resource_area()), _clock_index(0) {} | |
1344 Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {} | |
1345 | |
1346 void remove( Node *n ); | |
1347 bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; } | |
1348 VectorSet &member_set(){ return _in_worklist; } | |
1349 | |
1350 void push( Node *b ) { | |
1351 if( !_in_worklist.test_set(b->_idx) ) | |
1352 Node_List::push(b); | |
1353 } | |
1354 Node *pop() { | |
1355 if( _clock_index >= size() ) _clock_index = 0; | |
1356 Node *b = at(_clock_index); | |
400
cc80376deb0c
6667595: Set probability FAIR for pre-, post- loops and ALWAYS for main loop
kvn
parents:
247
diff
changeset
|
1357 map( _clock_index, Node_List::pop()); |
cc80376deb0c
6667595: Set probability FAIR for pre-, post- loops and ALWAYS for main loop
kvn
parents:
247
diff
changeset
|
1358 if (size() != 0) _clock_index++; // Always start from 0 |
0 | 1359 _in_worklist >>= b->_idx; |
1360 return b; | |
1361 } | |
1362 Node *remove( uint i ) { | |
1363 Node *b = Node_List::at(i); | |
1364 _in_worklist >>= b->_idx; | |
1365 map(i,Node_List::pop()); | |
1366 return b; | |
1367 } | |
1368 void yank( Node *n ) { _in_worklist >>= n->_idx; Node_List::yank(n); } | |
1369 void clear() { | |
1370 _in_worklist.Clear(); // Discards storage but grows automatically | |
1371 Node_List::clear(); | |
1372 _clock_index = 0; | |
1373 } | |
1374 | |
1375 // Used after parsing to remove useless nodes before Iterative GVN | |
1376 void remove_useless_nodes(VectorSet &useful); | |
1377 | |
1378 #ifndef PRODUCT | |
1379 void print_set() const { _in_worklist.print(); } | |
1380 #endif | |
1381 }; | |
1382 | |
1383 // Inline definition of Compile::record_for_igvn must be deferred to this point. | |
1384 inline void Compile::record_for_igvn(Node* n) { | |
1385 _for_igvn->push(n); | |
1386 } | |
1387 | |
1388 //------------------------------Node_Stack------------------------------------- | |
1389 class Node_Stack { | |
1390 protected: | |
1391 struct INode { | |
1392 Node *node; // Processed node | |
1393 uint indx; // Index of next node's child | |
1394 }; | |
1395 INode *_inode_top; // tos, stack grows up | |
1396 INode *_inode_max; // End of _inodes == _inodes + _max | |
1397 INode *_inodes; // Array storage for the stack | |
1398 Arena *_a; // Arena to allocate in | |
1399 void grow(); | |
1400 public: | |
1401 Node_Stack(int size) { | |
1402 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; | |
1403 _a = Thread::current()->resource_area(); | |
1404 _inodes = NEW_ARENA_ARRAY( _a, INode, max ); | |
1405 _inode_max = _inodes + max; | |
1406 _inode_top = _inodes - 1; // stack is empty | |
1407 } | |
1408 | |
1409 Node_Stack(Arena *a, int size) : _a(a) { | |
1410 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; | |
1411 _inodes = NEW_ARENA_ARRAY( _a, INode, max ); | |
1412 _inode_max = _inodes + max; | |
1413 _inode_top = _inodes - 1; // stack is empty | |
1414 } | |
1415 | |
1416 void pop() { | |
1417 assert(_inode_top >= _inodes, "node stack underflow"); | |
1418 --_inode_top; | |
1419 } | |
1420 void push(Node *n, uint i) { | |
1421 ++_inode_top; | |
1422 if (_inode_top >= _inode_max) grow(); | |
1423 INode *top = _inode_top; // optimization | |
1424 top->node = n; | |
1425 top->indx = i; | |
1426 } | |
1427 Node *node() const { | |
1428 return _inode_top->node; | |
1429 } | |
1430 Node* node_at(uint i) const { | |
1431 assert(_inodes + i <= _inode_top, "in range"); | |
1432 return _inodes[i].node; | |
1433 } | |
1434 uint index() const { | |
1435 return _inode_top->indx; | |
1436 } | |
247 | 1437 uint index_at(uint i) const { |
1438 assert(_inodes + i <= _inode_top, "in range"); | |
1439 return _inodes[i].indx; | |
1440 } | |
0 | 1441 void set_node(Node *n) { |
1442 _inode_top->node = n; | |
1443 } | |
1444 void set_index(uint i) { | |
1445 _inode_top->indx = i; | |
1446 } | |
1447 uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size | |
40 | 1448 uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size |
0 | 1449 bool is_nonempty() const { return (_inode_top >= _inodes); } |
1450 bool is_empty() const { return (_inode_top < _inodes); } | |
1451 void clear() { _inode_top = _inodes - 1; } // retain storage | |
1452 }; | |
1453 | |
1454 | |
1455 //-----------------------------Node_Notes-------------------------------------- | |
1456 // Debugging or profiling annotations loosely and sparsely associated | |
1457 // with some nodes. See Compile::node_notes_at for the accessor. | |
1458 class Node_Notes VALUE_OBJ_CLASS_SPEC { | |
1459 JVMState* _jvms; | |
1460 | |
1461 public: | |
1462 Node_Notes(JVMState* jvms = NULL) { | |
1463 _jvms = jvms; | |
1464 } | |
1465 | |
1466 JVMState* jvms() { return _jvms; } | |
1467 void set_jvms(JVMState* x) { _jvms = x; } | |
1468 | |
1469 // True if there is nothing here. | |
1470 bool is_clear() { | |
1471 return (_jvms == NULL); | |
1472 } | |
1473 | |
1474 // Make there be nothing here. | |
1475 void clear() { | |
1476 _jvms = NULL; | |
1477 } | |
1478 | |
1479 // Make a new, clean node notes. | |
1480 static Node_Notes* make(Compile* C) { | |
1481 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); | |
1482 nn->clear(); | |
1483 return nn; | |
1484 } | |
1485 | |
1486 Node_Notes* clone(Compile* C) { | |
1487 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); | |
1488 (*nn) = (*this); | |
1489 return nn; | |
1490 } | |
1491 | |
1492 // Absorb any information from source. | |
1493 bool update_from(Node_Notes* source) { | |
1494 bool changed = false; | |
1495 if (source != NULL) { | |
1496 if (source->jvms() != NULL) { | |
1497 set_jvms(source->jvms()); | |
1498 changed = true; | |
1499 } | |
1500 } | |
1501 return changed; | |
1502 } | |
1503 }; | |
1504 | |
1505 // Inlined accessors for Compile::node_nodes that require the preceding class: | |
1506 inline Node_Notes* | |
1507 Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr, | |
1508 int idx, bool can_grow) { | |
1509 assert(idx >= 0, "oob"); | |
1510 int block_idx = (idx >> _log2_node_notes_block_size); | |
1511 int grow_by = (block_idx - (arr == NULL? 0: arr->length())); | |
1512 if (grow_by >= 0) { | |
1513 if (!can_grow) return NULL; | |
1514 grow_node_notes(arr, grow_by + 1); | |
1515 } | |
1516 // (Every element of arr is a sub-array of length _node_notes_block_size.) | |
1517 return arr->at(block_idx) + (idx & (_node_notes_block_size-1)); | |
1518 } | |
1519 | |
1520 inline bool | |
1521 Compile::set_node_notes_at(int idx, Node_Notes* value) { | |
1522 if (value == NULL || value->is_clear()) | |
1523 return false; // nothing to write => write nothing | |
1524 Node_Notes* loc = locate_node_notes(_node_note_array, idx, true); | |
1525 assert(loc != NULL, ""); | |
1526 return loc->update_from(value); | |
1527 } | |
1528 | |
1529 | |
1530 //------------------------------TypeNode--------------------------------------- | |
1531 // Node with a Type constant. | |
1532 class TypeNode : public Node { | |
1533 protected: | |
1534 virtual uint hash() const; // Check the type | |
1535 virtual uint cmp( const Node &n ) const; | |
1536 virtual uint size_of() const; // Size is bigger | |
1537 const Type* const _type; | |
1538 public: | |
1539 void set_type(const Type* t) { | |
1540 assert(t != NULL, "sanity"); | |
1541 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); | |
1542 *(const Type**)&_type = t; // cast away const-ness | |
1543 // If this node is in the hash table, make sure it doesn't need a rehash. | |
1544 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); | |
1545 } | |
1546 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; | |
1547 TypeNode( const Type *t, uint required ) : Node(required), _type(t) { | |
1548 init_class_id(Class_Type); | |
1549 } | |
1550 virtual const Type *Value( PhaseTransform *phase ) const; | |
1551 virtual const Type *bottom_type() const; | |
1552 virtual uint ideal_reg() const; | |
1553 #ifndef PRODUCT | |
1554 virtual void dump_spec(outputStream *st) const; | |
1555 #endif | |
1556 }; |