Mercurial > hg > truffle
annotate src/share/vm/opto/node.hpp @ 6972:bd7a7ce2e264
6830717: replay of compilations would help with debugging
Summary: When java process crashed in compiler thread, repeat the compilation process will help finding root cause. This is done with using SA dump application class data and replay data from core dump, then use debug version of jvm to recompile the problematic java method.
Reviewed-by: kvn, twisti, sspitsyn
Contributed-by: yumin.qi@oracle.com
author | minqi |
---|---|
date | Mon, 12 Nov 2012 14:03:53 -0800 |
parents | 8e47bac5643a |
children | 2aff40cb4703 |
rev | line source |
---|---|
0 | 1 /* |
6179
8c92982cbbc4
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
4778
diff
changeset
|
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1100
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1100
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1100
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef SHARE_VM_OPTO_NODE_HPP |
26 #define SHARE_VM_OPTO_NODE_HPP | |
27 | |
28 #include "libadt/port.hpp" | |
29 #include "libadt/vectset.hpp" | |
30 #include "opto/compile.hpp" | |
31 #include "opto/type.hpp" | |
32 | |
0 | 33 // Portions of code courtesy of Clifford Click |
34 | |
35 // Optimization - Graph Style | |
36 | |
37 | |
38 class AbstractLockNode; | |
39 class AddNode; | |
40 class AddPNode; | |
41 class AliasInfo; | |
42 class AllocateArrayNode; | |
43 class AllocateNode; | |
44 class Block; | |
45 class Block_Array; | |
46 class BoolNode; | |
47 class BoxLockNode; | |
48 class CMoveNode; | |
49 class CallDynamicJavaNode; | |
50 class CallJavaNode; | |
51 class CallLeafNode; | |
52 class CallNode; | |
53 class CallRuntimeNode; | |
54 class CallStaticJavaNode; | |
55 class CatchNode; | |
56 class CatchProjNode; | |
57 class CheckCastPPNode; | |
1100
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
1080
diff
changeset
|
58 class ClearArrayNode; |
0 | 59 class CmpNode; |
60 class CodeBuffer; | |
61 class ConstraintCastNode; | |
62 class ConNode; | |
63 class CountedLoopNode; | |
64 class CountedLoopEndNode; | |
6848
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6804
diff
changeset
|
65 class DecodeNarrowPtrNode; |
168
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
66 class DecodeNNode; |
6848
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6804
diff
changeset
|
67 class DecodeNKlassNode; |
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6804
diff
changeset
|
68 class EncodeNarrowPtrNode; |
168
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
69 class EncodePNode; |
6848
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6804
diff
changeset
|
70 class EncodePKlassNode; |
0 | 71 class FastLockNode; |
72 class FastUnlockNode; | |
73 class IfNode; | |
3842 | 74 class IfFalseNode; |
75 class IfTrueNode; | |
0 | 76 class InitializeNode; |
77 class JVMState; | |
78 class JumpNode; | |
79 class JumpProjNode; | |
80 class LoadNode; | |
81 class LoadStoreNode; | |
82 class LockNode; | |
83 class LoopNode; | |
3853
11211f7cb5a0
7079317: Incorrect branch's destination block in PrintoOptoAssembly output
kvn
parents:
3851
diff
changeset
|
84 class MachBranchNode; |
0 | 85 class MachCallDynamicJavaNode; |
86 class MachCallJavaNode; | |
87 class MachCallLeafNode; | |
88 class MachCallNode; | |
89 class MachCallRuntimeNode; | |
90 class MachCallStaticJavaNode; | |
2008 | 91 class MachConstantBaseNode; |
92 class MachConstantNode; | |
3842 | 93 class MachGotoNode; |
0 | 94 class MachIfNode; |
95 class MachNode; | |
96 class MachNullCheckNode; | |
3842 | 97 class MachProjNode; |
0 | 98 class MachReturnNode; |
99 class MachSafePointNode; | |
100 class MachSpillCopyNode; | |
101 class MachTempNode; | |
102 class Matcher; | |
103 class MemBarNode; | |
4763
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4120
diff
changeset
|
104 class MemBarStoreStoreNode; |
0 | 105 class MemNode; |
106 class MergeMemNode; | |
6179
8c92982cbbc4
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
4778
diff
changeset
|
107 class MulNode; |
0 | 108 class MultiNode; |
109 class MultiBranchNode; | |
110 class NeverBranchNode; | |
111 class Node; | |
112 class Node_Array; | |
113 class Node_List; | |
114 class Node_Stack; | |
115 class NullCheckNode; | |
116 class OopMap; | |
33 | 117 class ParmNode; |
0 | 118 class PCTableNode; |
119 class PhaseCCP; | |
120 class PhaseGVN; | |
121 class PhaseIterGVN; | |
122 class PhaseRegAlloc; | |
123 class PhaseTransform; | |
124 class PhaseValues; | |
125 class PhiNode; | |
126 class Pipeline; | |
127 class ProjNode; | |
128 class RegMask; | |
129 class RegionNode; | |
130 class RootNode; | |
131 class SafePointNode; | |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
40
diff
changeset
|
132 class SafePointScalarObjectNode; |
0 | 133 class StartNode; |
134 class State; | |
135 class StoreNode; | |
136 class SubNode; | |
137 class Type; | |
138 class TypeNode; | |
139 class UnlockNode; | |
3842 | 140 class VectorNode; |
6179
8c92982cbbc4
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
4778
diff
changeset
|
141 class LoadVectorNode; |
8c92982cbbc4
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
4778
diff
changeset
|
142 class StoreVectorNode; |
0 | 143 class VectorSet; |
144 typedef void (*NFunc)(Node&,void*); | |
145 extern "C" { | |
146 typedef int (*C_sort_func_t)(const void *, const void *); | |
147 } | |
148 | |
149 // The type of all node counts and indexes. | |
150 // It must hold at least 16 bits, but must also be fast to load and store. | |
151 // This type, if less than 32 bits, could limit the number of possible nodes. | |
152 // (To make this type platform-specific, move to globalDefinitions_xxx.hpp.) | |
153 typedef unsigned int node_idx_t; | |
154 | |
155 | |
156 #ifndef OPTO_DU_ITERATOR_ASSERT | |
157 #ifdef ASSERT | |
158 #define OPTO_DU_ITERATOR_ASSERT 1 | |
159 #else | |
160 #define OPTO_DU_ITERATOR_ASSERT 0 | |
161 #endif | |
162 #endif //OPTO_DU_ITERATOR_ASSERT | |
163 | |
164 #if OPTO_DU_ITERATOR_ASSERT | |
165 class DUIterator; | |
166 class DUIterator_Fast; | |
167 class DUIterator_Last; | |
168 #else | |
169 typedef uint DUIterator; | |
170 typedef Node** DUIterator_Fast; | |
171 typedef Node** DUIterator_Last; | |
172 #endif | |
173 | |
174 // Node Sentinel | |
175 #define NodeSentinel (Node*)-1 | |
176 | |
177 // Unknown count frequency | |
178 #define COUNT_UNKNOWN (-1.0f) | |
179 | |
180 //------------------------------Node------------------------------------------- | |
181 // Nodes define actions in the program. They create values, which have types. | |
182 // They are both vertices in a directed graph and program primitives. Nodes | |
183 // are labeled; the label is the "opcode", the primitive function in the lambda | |
184 // calculus sense that gives meaning to the Node. Node inputs are ordered (so | |
185 // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to | |
186 // the Node's function. These inputs also define a Type equation for the Node. | |
187 // Solving these Type equations amounts to doing dataflow analysis. | |
188 // Control and data are uniformly represented in the graph. Finally, Nodes | |
189 // have a unique dense integer index which is used to index into side arrays | |
190 // whenever I have phase-specific information. | |
191 | |
192 class Node { | |
3939 | 193 friend class VMStructs; |
194 | |
0 | 195 // Lots of restrictions on cloning Nodes |
196 Node(const Node&); // not defined; linker error to use these | |
197 Node &operator=(const Node &rhs); | |
198 | |
199 public: | |
200 friend class Compile; | |
201 #if OPTO_DU_ITERATOR_ASSERT | |
202 friend class DUIterator_Common; | |
203 friend class DUIterator; | |
204 friend class DUIterator_Fast; | |
205 friend class DUIterator_Last; | |
206 #endif | |
207 | |
208 // Because Nodes come and go, I define an Arena of Node structures to pull | |
209 // from. This should allow fast access to node creation & deletion. This | |
210 // field is a local cache of a value defined in some "program fragment" for | |
211 // which these Nodes are just a part of. | |
212 | |
213 // New Operator that takes a Compile pointer, this will eventually | |
214 // be the "new" New operator. | |
215 inline void* operator new( size_t x, Compile* C) { | |
216 Node* n = (Node*)C->node_arena()->Amalloc_D(x); | |
217 #ifdef ASSERT | |
218 n->_in = (Node**)n; // magic cookie for assertion check | |
219 #endif | |
220 n->_out = (Node**)C; | |
221 return (void*)n; | |
222 } | |
223 | |
224 // Delete is a NOP | |
225 void operator delete( void *ptr ) {} | |
226 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage | |
227 void destruct(); | |
228 | |
229 // Create a new Node. Required is the number is of inputs required for | |
230 // semantic correctness. | |
231 Node( uint required ); | |
232 | |
233 // Create a new Node with given input edges. | |
234 // This version requires use of the "edge-count" new. | |
235 // E.g. new (C,3) FooNode( C, NULL, left, right ); | |
236 Node( Node *n0 ); | |
237 Node( Node *n0, Node *n1 ); | |
238 Node( Node *n0, Node *n1, Node *n2 ); | |
239 Node( Node *n0, Node *n1, Node *n2, Node *n3 ); | |
240 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 ); | |
241 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 ); | |
242 Node( Node *n0, Node *n1, Node *n2, Node *n3, | |
243 Node *n4, Node *n5, Node *n6 ); | |
244 | |
245 // Clone an inherited Node given only the base Node type. | |
246 Node* clone() const; | |
247 | |
248 // Clone a Node, immediately supplying one or two new edges. | |
249 // The first and second arguments, if non-null, replace in(1) and in(2), | |
250 // respectively. | |
251 Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const { | |
252 Node* nn = clone(); | |
253 if (in1 != NULL) nn->set_req(1, in1); | |
254 if (in2 != NULL) nn->set_req(2, in2); | |
255 return nn; | |
256 } | |
257 | |
258 private: | |
259 // Shared setup for the above constructors. | |
260 // Handles all interactions with Compile::current. | |
261 // Puts initial values in all Node fields except _idx. | |
262 // Returns the initial value for _idx, which cannot | |
263 // be initialized by assignment. | |
264 inline int Init(int req, Compile* C); | |
265 | |
266 //----------------- input edge handling | |
267 protected: | |
268 friend class PhaseCFG; // Access to address of _in array elements | |
269 Node **_in; // Array of use-def references to Nodes | |
270 Node **_out; // Array of def-use references to Nodes | |
271 | |
605 | 272 // Input edges are split into two categories. Required edges are required |
0 | 273 // for semantic correctness; order is important and NULLs are allowed. |
274 // Precedence edges are used to help determine execution order and are | |
275 // added, e.g., for scheduling purposes. They are unordered and not | |
276 // duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1 | |
277 // are required, from _cnt to _max-1 are precedence edges. | |
278 node_idx_t _cnt; // Total number of required Node inputs. | |
279 | |
280 node_idx_t _max; // Actual length of input array. | |
281 | |
282 // Output edges are an unordered list of def-use edges which exactly | |
283 // correspond to required input edges which point from other nodes | |
284 // to this one. Thus the count of the output edges is the number of | |
285 // users of this node. | |
286 node_idx_t _outcnt; // Total number of Node outputs. | |
287 | |
288 node_idx_t _outmax; // Actual length of output array. | |
289 | |
290 // Grow the actual input array to the next larger power-of-2 bigger than len. | |
291 void grow( uint len ); | |
292 // Grow the output array to the next larger power-of-2 bigger than len. | |
293 void out_grow( uint len ); | |
294 | |
295 public: | |
296 // Each Node is assigned a unique small/dense number. This number is used | |
297 // to index into auxiliary arrays of data and bitvectors. | |
298 // It is declared const to defend against inadvertant assignment, | |
299 // since it is used by clients as a naked field. | |
300 const node_idx_t _idx; | |
301 | |
302 // Get the (read-only) number of input edges | |
303 uint req() const { return _cnt; } | |
304 uint len() const { return _max; } | |
305 // Get the (read-only) number of output edges | |
306 uint outcnt() const { return _outcnt; } | |
307 | |
308 #if OPTO_DU_ITERATOR_ASSERT | |
309 // Iterate over the out-edges of this node. Deletions are illegal. | |
310 inline DUIterator outs() const; | |
311 // Use this when the out array might have changed to suppress asserts. | |
312 inline DUIterator& refresh_out_pos(DUIterator& i) const; | |
313 // Does the node have an out at this position? (Used for iteration.) | |
314 inline bool has_out(DUIterator& i) const; | |
315 inline Node* out(DUIterator& i) const; | |
316 // Iterate over the out-edges of this node. All changes are illegal. | |
317 inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const; | |
318 inline Node* fast_out(DUIterator_Fast& i) const; | |
319 // Iterate over the out-edges of this node, deleting one at a time. | |
320 inline DUIterator_Last last_outs(DUIterator_Last& min) const; | |
321 inline Node* last_out(DUIterator_Last& i) const; | |
322 // The inline bodies of all these methods are after the iterator definitions. | |
323 #else | |
324 // Iterate over the out-edges of this node. Deletions are illegal. | |
325 // This iteration uses integral indexes, to decouple from array reallocations. | |
326 DUIterator outs() const { return 0; } | |
327 // Use this when the out array might have changed to suppress asserts. | |
328 DUIterator refresh_out_pos(DUIterator i) const { return i; } | |
329 | |
330 // Reference to the i'th output Node. Error if out of bounds. | |
331 Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; } | |
332 // Does the node have an out at this position? (Used for iteration.) | |
333 bool has_out(DUIterator i) const { return i < _outcnt; } | |
334 | |
335 // Iterate over the out-edges of this node. All changes are illegal. | |
336 // This iteration uses a pointer internal to the out array. | |
337 DUIterator_Fast fast_outs(DUIterator_Fast& max) const { | |
338 Node** out = _out; | |
339 // Assign a limit pointer to the reference argument: | |
340 max = out + (ptrdiff_t)_outcnt; | |
341 // Return the base pointer: | |
342 return out; | |
343 } | |
344 Node* fast_out(DUIterator_Fast i) const { return *i; } | |
345 // Iterate over the out-edges of this node, deleting one at a time. | |
346 // This iteration uses a pointer internal to the out array. | |
347 DUIterator_Last last_outs(DUIterator_Last& min) const { | |
348 Node** out = _out; | |
349 // Assign a limit pointer to the reference argument: | |
350 min = out; | |
351 // Return the pointer to the start of the iteration: | |
352 return out + (ptrdiff_t)_outcnt - 1; | |
353 } | |
354 Node* last_out(DUIterator_Last i) const { return *i; } | |
355 #endif | |
356 | |
357 // Reference to the i'th input Node. Error if out of bounds. | |
6268
6c5b7a6becc8
7187454: stack overflow in C2 compiler thread on Solaris x86
kvn
parents:
6266
diff
changeset
|
358 Node* in(uint i) const { assert(i < _max, err_msg_res("oob: i=%d, _max=%d", i, _max)); return _in[i]; } |
0 | 359 // Reference to the i'th output Node. Error if out of bounds. |
360 // Use this accessor sparingly. We are going trying to use iterators instead. | |
361 Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; } | |
362 // Return the unique out edge. | |
363 Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; } | |
364 // Delete out edge at position 'i' by moving last out edge to position 'i' | |
365 void raw_del_out(uint i) { | |
366 assert(i < _outcnt,"oob"); | |
367 assert(_outcnt > 0,"oob"); | |
368 #if OPTO_DU_ITERATOR_ASSERT | |
369 // Record that a change happened here. | |
370 debug_only(_last_del = _out[i]; ++_del_tick); | |
371 #endif | |
372 _out[i] = _out[--_outcnt]; | |
373 // Smash the old edge so it can't be used accidentally. | |
374 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); | |
375 } | |
376 | |
377 #ifdef ASSERT | |
378 bool is_dead() const; | |
379 #define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead())) | |
380 #endif | |
381 | |
382 // Set a required input edge, also updates corresponding output edge | |
383 void add_req( Node *n ); // Append a NEW required input | |
384 void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n). | |
385 void del_req( uint idx ); // Delete required edge & compact | |
386 void ins_req( uint i, Node *n ); // Insert a NEW required input | |
387 void set_req( uint i, Node *n ) { | |
388 assert( is_not_dead(n), "can not use dead node"); | |
6268
6c5b7a6becc8
7187454: stack overflow in C2 compiler thread on Solaris x86
kvn
parents:
6266
diff
changeset
|
389 assert( i < _cnt, err_msg_res("oob: i=%d, _cnt=%d", i, _cnt)); |
0 | 390 assert( !VerifyHashTableKeys || _hash_lock == 0, |
391 "remove node from hash table before modifying it"); | |
392 Node** p = &_in[i]; // cache this._in, across the del_out call | |
393 if (*p != NULL) (*p)->del_out((Node *)this); | |
394 (*p) = n; | |
395 if (n != NULL) n->add_out((Node *)this); | |
396 } | |
397 // Light version of set_req() to init inputs after node creation. | |
398 void init_req( uint i, Node *n ) { | |
399 assert( i == 0 && this == n || | |
400 is_not_dead(n), "can not use dead node"); | |
401 assert( i < _cnt, "oob"); | |
402 assert( !VerifyHashTableKeys || _hash_lock == 0, | |
403 "remove node from hash table before modifying it"); | |
404 assert( _in[i] == NULL, "sanity"); | |
405 _in[i] = n; | |
406 if (n != NULL) n->add_out((Node *)this); | |
407 } | |
408 // Find first occurrence of n among my edges: | |
409 int find_edge(Node* n); | |
410 int replace_edge(Node* old, Node* neww); | |
411 // NULL out all inputs to eliminate incoming Def-Use edges. | |
412 // Return the number of edges between 'n' and 'this' | |
413 int disconnect_inputs(Node *n); | |
414 | |
415 // Quickly, return true if and only if I am Compile::current()->top(). | |
416 bool is_top() const { | |
417 assert((this == (Node*) Compile::current()->top()) == (_out == NULL), ""); | |
418 return (_out == NULL); | |
419 } | |
420 // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.) | |
421 void setup_is_top(); | |
422 | |
423 // Strip away casting. (It is depth-limited.) | |
424 Node* uncast() const; | |
4778 | 425 // Return whether two Nodes are equivalent, after stripping casting. |
426 bool eqv_uncast(const Node* n) const { | |
427 return (this->uncast() == n->uncast()); | |
428 } | |
0 | 429 |
430 private: | |
431 static Node* uncast_helper(const Node* n); | |
432 | |
433 // Add an output edge to the end of the list | |
434 void add_out( Node *n ) { | |
435 if (is_top()) return; | |
436 if( _outcnt == _outmax ) out_grow(_outcnt); | |
437 _out[_outcnt++] = n; | |
438 } | |
439 // Delete an output edge | |
440 void del_out( Node *n ) { | |
441 if (is_top()) return; | |
442 Node** outp = &_out[_outcnt]; | |
443 // Find and remove n | |
444 do { | |
445 assert(outp > _out, "Missing Def-Use edge"); | |
446 } while (*--outp != n); | |
447 *outp = _out[--_outcnt]; | |
448 // Smash the old edge so it can't be used accidentally. | |
449 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); | |
450 // Record that a change happened here. | |
451 #if OPTO_DU_ITERATOR_ASSERT | |
452 debug_only(_last_del = n; ++_del_tick); | |
453 #endif | |
454 } | |
455 | |
456 public: | |
457 // Globally replace this node by a given new node, updating all uses. | |
458 void replace_by(Node* new_node); | |
168
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
459 // Globally replace this node by a given new node, updating all uses |
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
460 // and cutting input edges of old node. |
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
461 void subsume_by(Node* new_node) { |
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
462 replace_by(new_node); |
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
463 disconnect_inputs(NULL); |
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
464 } |
0 | 465 void set_req_X( uint i, Node *n, PhaseIterGVN *igvn ); |
466 // Find the one non-null required input. RegionNode only | |
467 Node *nonnull_req() const; | |
468 // Add or remove precedence edges | |
469 void add_prec( Node *n ); | |
470 void rm_prec( uint i ); | |
471 void set_prec( uint i, Node *n ) { | |
472 assert( is_not_dead(n), "can not use dead node"); | |
473 assert( i >= _cnt, "not a precedence edge"); | |
474 if (_in[i] != NULL) _in[i]->del_out((Node *)this); | |
475 _in[i] = n; | |
476 if (n != NULL) n->add_out((Node *)this); | |
477 } | |
478 // Set this node's index, used by cisc_version to replace current node | |
479 void set_idx(uint new_idx) { | |
480 const node_idx_t* ref = &_idx; | |
481 *(node_idx_t*)ref = new_idx; | |
482 } | |
483 // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.) | |
484 void swap_edges(uint i1, uint i2) { | |
485 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); | |
486 // Def-Use info is unchanged | |
487 Node* n1 = in(i1); | |
488 Node* n2 = in(i2); | |
489 _in[i1] = n2; | |
490 _in[i2] = n1; | |
491 // If this node is in the hash table, make sure it doesn't need a rehash. | |
492 assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code"); | |
493 } | |
494 | |
495 // Iterators over input Nodes for a Node X are written as: | |
496 // for( i = 0; i < X.req(); i++ ) ... X[i] ... | |
497 // NOTE: Required edges can contain embedded NULL pointers. | |
498 | |
499 //----------------- Other Node Properties | |
500 | |
501 // Generate class id for some ideal nodes to avoid virtual query | |
502 // methods is_<Node>(). | |
503 // Class id is the set of bits corresponded to the node class and all its | |
504 // super classes so that queries for super classes are also valid. | |
505 // Subclasses of the same super class have different assigned bit | |
506 // (the third parameter in the macro DEFINE_CLASS_ID). | |
507 // Classes with deeper hierarchy are declared first. | |
508 // Classes with the same hierarchy depth are sorted by usage frequency. | |
509 // | |
510 // The query method masks the bits to cut off bits of subclasses | |
511 // and then compare the result with the class id | |
512 // (see the macro DEFINE_CLASS_QUERY below). | |
513 // | |
514 // Class_MachCall=30, ClassMask_MachCall=31 | |
515 // 12 8 4 0 | |
516 // 0 0 0 0 0 0 0 0 1 1 1 1 0 | |
517 // | | | | | |
518 // | | | Bit_Mach=2 | |
519 // | | Bit_MachReturn=4 | |
520 // | Bit_MachSafePoint=8 | |
521 // Bit_MachCall=16 | |
522 // | |
523 // Class_CountedLoop=56, ClassMask_CountedLoop=63 | |
524 // 12 8 4 0 | |
525 // 0 0 0 0 0 0 0 1 1 1 0 0 0 | |
526 // | | | | |
527 // | | Bit_Region=8 | |
528 // | Bit_Loop=16 | |
529 // Bit_CountedLoop=32 | |
530 | |
531 #define DEFINE_CLASS_ID(cl, supcl, subn) \ | |
532 Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \ | |
533 Class_##cl = Class_##supcl + Bit_##cl , \ | |
534 ClassMask_##cl = ((Bit_##cl << 1) - 1) , | |
535 | |
536 // This enum is used only for C2 ideal and mach nodes with is_<node>() methods | |
537 // so that it's values fits into 16 bits. | |
538 enum NodeClasses { | |
539 Bit_Node = 0x0000, | |
540 Class_Node = 0x0000, | |
541 ClassMask_Node = 0xFFFF, | |
542 | |
543 DEFINE_CLASS_ID(Multi, Node, 0) | |
544 DEFINE_CLASS_ID(SafePoint, Multi, 0) | |
545 DEFINE_CLASS_ID(Call, SafePoint, 0) | |
546 DEFINE_CLASS_ID(CallJava, Call, 0) | |
547 DEFINE_CLASS_ID(CallStaticJava, CallJava, 0) | |
548 DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1) | |
549 DEFINE_CLASS_ID(CallRuntime, Call, 1) | |
550 DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0) | |
551 DEFINE_CLASS_ID(Allocate, Call, 2) | |
552 DEFINE_CLASS_ID(AllocateArray, Allocate, 0) | |
553 DEFINE_CLASS_ID(AbstractLock, Call, 3) | |
554 DEFINE_CLASS_ID(Lock, AbstractLock, 0) | |
555 DEFINE_CLASS_ID(Unlock, AbstractLock, 1) | |
556 DEFINE_CLASS_ID(MultiBranch, Multi, 1) | |
557 DEFINE_CLASS_ID(PCTable, MultiBranch, 0) | |
558 DEFINE_CLASS_ID(Catch, PCTable, 0) | |
559 DEFINE_CLASS_ID(Jump, PCTable, 1) | |
560 DEFINE_CLASS_ID(If, MultiBranch, 1) | |
561 DEFINE_CLASS_ID(CountedLoopEnd, If, 0) | |
562 DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2) | |
563 DEFINE_CLASS_ID(Start, Multi, 2) | |
564 DEFINE_CLASS_ID(MemBar, Multi, 3) | |
4763
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4120
diff
changeset
|
565 DEFINE_CLASS_ID(Initialize, MemBar, 0) |
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4120
diff
changeset
|
566 DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1) |
0 | 567 |
568 DEFINE_CLASS_ID(Mach, Node, 1) | |
569 DEFINE_CLASS_ID(MachReturn, Mach, 0) | |
570 DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0) | |
571 DEFINE_CLASS_ID(MachCall, MachSafePoint, 0) | |
572 DEFINE_CLASS_ID(MachCallJava, MachCall, 0) | |
573 DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0) | |
574 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1) | |
575 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1) | |
576 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0) | |
3853
11211f7cb5a0
7079317: Incorrect branch's destination block in PrintoOptoAssembly output
kvn
parents:
3851
diff
changeset
|
577 DEFINE_CLASS_ID(MachBranch, Mach, 1) |
11211f7cb5a0
7079317: Incorrect branch's destination block in PrintoOptoAssembly output
kvn
parents:
3851
diff
changeset
|
578 DEFINE_CLASS_ID(MachIf, MachBranch, 0) |
11211f7cb5a0
7079317: Incorrect branch's destination block in PrintoOptoAssembly output
kvn
parents:
3851
diff
changeset
|
579 DEFINE_CLASS_ID(MachGoto, MachBranch, 1) |
11211f7cb5a0
7079317: Incorrect branch's destination block in PrintoOptoAssembly output
kvn
parents:
3851
diff
changeset
|
580 DEFINE_CLASS_ID(MachNullCheck, MachBranch, 2) |
11211f7cb5a0
7079317: Incorrect branch's destination block in PrintoOptoAssembly output
kvn
parents:
3851
diff
changeset
|
581 DEFINE_CLASS_ID(MachSpillCopy, Mach, 2) |
11211f7cb5a0
7079317: Incorrect branch's destination block in PrintoOptoAssembly output
kvn
parents:
3851
diff
changeset
|
582 DEFINE_CLASS_ID(MachTemp, Mach, 3) |
11211f7cb5a0
7079317: Incorrect branch's destination block in PrintoOptoAssembly output
kvn
parents:
3851
diff
changeset
|
583 DEFINE_CLASS_ID(MachConstantBase, Mach, 4) |
11211f7cb5a0
7079317: Incorrect branch's destination block in PrintoOptoAssembly output
kvn
parents:
3851
diff
changeset
|
584 DEFINE_CLASS_ID(MachConstant, Mach, 5) |
0 | 585 |
3842 | 586 DEFINE_CLASS_ID(Type, Node, 2) |
0 | 587 DEFINE_CLASS_ID(Phi, Type, 0) |
588 DEFINE_CLASS_ID(ConstraintCast, Type, 1) | |
589 DEFINE_CLASS_ID(CheckCastPP, Type, 2) | |
590 DEFINE_CLASS_ID(CMove, Type, 3) | |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
40
diff
changeset
|
591 DEFINE_CLASS_ID(SafePointScalarObject, Type, 4) |
6848
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6804
diff
changeset
|
592 DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5) |
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6804
diff
changeset
|
593 DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0) |
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6804
diff
changeset
|
594 DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1) |
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6804
diff
changeset
|
595 DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6) |
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6804
diff
changeset
|
596 DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0) |
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6804
diff
changeset
|
597 DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1) |
0 | 598 |
3842 | 599 DEFINE_CLASS_ID(Proj, Node, 3) |
600 DEFINE_CLASS_ID(CatchProj, Proj, 0) | |
601 DEFINE_CLASS_ID(JumpProj, Proj, 1) | |
602 DEFINE_CLASS_ID(IfTrue, Proj, 2) | |
603 DEFINE_CLASS_ID(IfFalse, Proj, 3) | |
604 DEFINE_CLASS_ID(Parm, Proj, 4) | |
605 DEFINE_CLASS_ID(MachProj, Proj, 5) | |
606 | |
607 DEFINE_CLASS_ID(Mem, Node, 4) | |
0 | 608 DEFINE_CLASS_ID(Load, Mem, 0) |
6179
8c92982cbbc4
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
4778
diff
changeset
|
609 DEFINE_CLASS_ID(LoadVector, Load, 0) |
0 | 610 DEFINE_CLASS_ID(Store, Mem, 1) |
6179
8c92982cbbc4
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
4778
diff
changeset
|
611 DEFINE_CLASS_ID(StoreVector, Store, 0) |
0 | 612 DEFINE_CLASS_ID(LoadStore, Mem, 2) |
613 | |
3842 | 614 DEFINE_CLASS_ID(Region, Node, 5) |
615 DEFINE_CLASS_ID(Loop, Region, 0) | |
616 DEFINE_CLASS_ID(Root, Loop, 0) | |
617 DEFINE_CLASS_ID(CountedLoop, Loop, 1) | |
618 | |
619 DEFINE_CLASS_ID(Sub, Node, 6) | |
620 DEFINE_CLASS_ID(Cmp, Sub, 0) | |
621 DEFINE_CLASS_ID(FastLock, Cmp, 0) | |
622 DEFINE_CLASS_ID(FastUnlock, Cmp, 1) | |
623 | |
0 | 624 DEFINE_CLASS_ID(MergeMem, Node, 7) |
625 DEFINE_CLASS_ID(Bool, Node, 8) | |
626 DEFINE_CLASS_ID(AddP, Node, 9) | |
627 DEFINE_CLASS_ID(BoxLock, Node, 10) | |
628 DEFINE_CLASS_ID(Add, Node, 11) | |
6179
8c92982cbbc4
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
4778
diff
changeset
|
629 DEFINE_CLASS_ID(Mul, Node, 12) |
8c92982cbbc4
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
4778
diff
changeset
|
630 DEFINE_CLASS_ID(Vector, Node, 13) |
8c92982cbbc4
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
4778
diff
changeset
|
631 DEFINE_CLASS_ID(ClearArray, Node, 14) |
0 | 632 |
1100
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
1080
diff
changeset
|
633 _max_classes = ClassMask_ClearArray |
0 | 634 }; |
635 #undef DEFINE_CLASS_ID | |
636 | |
637 // Flags are sorted by usage frequency. | |
638 enum NodeFlags { | |
639 Flag_is_Copy = 0x01, // should be first bit to avoid shift | |
3842 | 640 Flag_rematerialize = Flag_is_Copy << 1, |
0 | 641 Flag_needs_anti_dependence_check = Flag_rematerialize << 1, |
642 Flag_is_macro = Flag_needs_anti_dependence_check << 1, | |
643 Flag_is_Con = Flag_is_macro << 1, | |
644 Flag_is_cisc_alternate = Flag_is_Con << 1, | |
3853
11211f7cb5a0
7079317: Incorrect branch's destination block in PrintoOptoAssembly output
kvn
parents:
3851
diff
changeset
|
645 Flag_is_dead_loop_safe = Flag_is_cisc_alternate << 1, |
0 | 646 Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1, |
3851 | 647 Flag_avoid_back_to_back = Flag_may_be_short_branch << 1, |
4120
f03a3c8bd5e5
7077312: Provide a CALL effect for instruct declaration in the ad file
roland
parents:
3939
diff
changeset
|
648 Flag_has_call = Flag_avoid_back_to_back << 1, |
f03a3c8bd5e5
7077312: Provide a CALL effect for instruct declaration in the ad file
roland
parents:
3939
diff
changeset
|
649 _max_flags = (Flag_has_call << 1) - 1 // allow flags combination |
0 | 650 }; |
651 | |
652 private: | |
653 jushort _class_id; | |
654 jushort _flags; | |
655 | |
656 protected: | |
657 // These methods should be called from constructors only. | |
658 void init_class_id(jushort c) { | |
659 assert(c <= _max_classes, "invalid node class"); | |
660 _class_id = c; // cast out const | |
661 } | |
662 void init_flags(jushort fl) { | |
663 assert(fl <= _max_flags, "invalid node flag"); | |
664 _flags |= fl; | |
665 } | |
666 void clear_flag(jushort fl) { | |
667 assert(fl <= _max_flags, "invalid node flag"); | |
668 _flags &= ~fl; | |
669 } | |
670 | |
671 public: | |
672 const jushort class_id() const { return _class_id; } | |
673 | |
674 const jushort flags() const { return _flags; } | |
675 | |
676 // Return a dense integer opcode number | |
677 virtual int Opcode() const; | |
678 | |
679 // Virtual inherited Node size | |
680 virtual uint size_of() const; | |
681 | |
682 // Other interesting Node properties | |
1080
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
683 #define DEFINE_CLASS_QUERY(type) \ |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
684 bool is_##type() const { \ |
0 | 685 return ((_class_id & ClassMask_##type) == Class_##type); \ |
1080
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
686 } \ |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
687 type##Node *as_##type() const { \ |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
688 assert(is_##type(), "invalid node class"); \ |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
689 return (type##Node*)this; \ |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
690 } \ |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
691 type##Node* isa_##type() const { \ |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
692 return (is_##type()) ? as_##type() : NULL; \ |
0 | 693 } |
694 | |
695 DEFINE_CLASS_QUERY(AbstractLock) | |
696 DEFINE_CLASS_QUERY(Add) | |
697 DEFINE_CLASS_QUERY(AddP) | |
698 DEFINE_CLASS_QUERY(Allocate) | |
699 DEFINE_CLASS_QUERY(AllocateArray) | |
700 DEFINE_CLASS_QUERY(Bool) | |
701 DEFINE_CLASS_QUERY(BoxLock) | |
3842 | 702 DEFINE_CLASS_QUERY(Call) |
0 | 703 DEFINE_CLASS_QUERY(CallDynamicJava) |
704 DEFINE_CLASS_QUERY(CallJava) | |
705 DEFINE_CLASS_QUERY(CallLeaf) | |
706 DEFINE_CLASS_QUERY(CallRuntime) | |
707 DEFINE_CLASS_QUERY(CallStaticJava) | |
708 DEFINE_CLASS_QUERY(Catch) | |
709 DEFINE_CLASS_QUERY(CatchProj) | |
710 DEFINE_CLASS_QUERY(CheckCastPP) | |
711 DEFINE_CLASS_QUERY(ConstraintCast) | |
1100
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
1080
diff
changeset
|
712 DEFINE_CLASS_QUERY(ClearArray) |
0 | 713 DEFINE_CLASS_QUERY(CMove) |
714 DEFINE_CLASS_QUERY(Cmp) | |
715 DEFINE_CLASS_QUERY(CountedLoop) | |
716 DEFINE_CLASS_QUERY(CountedLoopEnd) | |
6848
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6804
diff
changeset
|
717 DEFINE_CLASS_QUERY(DecodeNarrowPtr) |
168
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
718 DEFINE_CLASS_QUERY(DecodeN) |
6848
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6804
diff
changeset
|
719 DEFINE_CLASS_QUERY(DecodeNKlass) |
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6804
diff
changeset
|
720 DEFINE_CLASS_QUERY(EncodeNarrowPtr) |
168
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
128
diff
changeset
|
721 DEFINE_CLASS_QUERY(EncodeP) |
6848
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6804
diff
changeset
|
722 DEFINE_CLASS_QUERY(EncodePKlass) |
0 | 723 DEFINE_CLASS_QUERY(FastLock) |
724 DEFINE_CLASS_QUERY(FastUnlock) | |
725 DEFINE_CLASS_QUERY(If) | |
726 DEFINE_CLASS_QUERY(IfFalse) | |
727 DEFINE_CLASS_QUERY(IfTrue) | |
728 DEFINE_CLASS_QUERY(Initialize) | |
729 DEFINE_CLASS_QUERY(Jump) | |
730 DEFINE_CLASS_QUERY(JumpProj) | |
731 DEFINE_CLASS_QUERY(Load) | |
732 DEFINE_CLASS_QUERY(LoadStore) | |
733 DEFINE_CLASS_QUERY(Lock) | |
734 DEFINE_CLASS_QUERY(Loop) | |
735 DEFINE_CLASS_QUERY(Mach) | |
3853
11211f7cb5a0
7079317: Incorrect branch's destination block in PrintoOptoAssembly output
kvn
parents:
3851
diff
changeset
|
736 DEFINE_CLASS_QUERY(MachBranch) |
0 | 737 DEFINE_CLASS_QUERY(MachCall) |
738 DEFINE_CLASS_QUERY(MachCallDynamicJava) | |
739 DEFINE_CLASS_QUERY(MachCallJava) | |
740 DEFINE_CLASS_QUERY(MachCallLeaf) | |
741 DEFINE_CLASS_QUERY(MachCallRuntime) | |
742 DEFINE_CLASS_QUERY(MachCallStaticJava) | |
2008 | 743 DEFINE_CLASS_QUERY(MachConstantBase) |
744 DEFINE_CLASS_QUERY(MachConstant) | |
3842 | 745 DEFINE_CLASS_QUERY(MachGoto) |
0 | 746 DEFINE_CLASS_QUERY(MachIf) |
747 DEFINE_CLASS_QUERY(MachNullCheck) | |
3842 | 748 DEFINE_CLASS_QUERY(MachProj) |
0 | 749 DEFINE_CLASS_QUERY(MachReturn) |
750 DEFINE_CLASS_QUERY(MachSafePoint) | |
751 DEFINE_CLASS_QUERY(MachSpillCopy) | |
752 DEFINE_CLASS_QUERY(MachTemp) | |
753 DEFINE_CLASS_QUERY(Mem) | |
754 DEFINE_CLASS_QUERY(MemBar) | |
4763
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4120
diff
changeset
|
755 DEFINE_CLASS_QUERY(MemBarStoreStore) |
0 | 756 DEFINE_CLASS_QUERY(MergeMem) |
6179
8c92982cbbc4
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
4778
diff
changeset
|
757 DEFINE_CLASS_QUERY(Mul) |
0 | 758 DEFINE_CLASS_QUERY(Multi) |
759 DEFINE_CLASS_QUERY(MultiBranch) | |
33 | 760 DEFINE_CLASS_QUERY(Parm) |
0 | 761 DEFINE_CLASS_QUERY(PCTable) |
762 DEFINE_CLASS_QUERY(Phi) | |
763 DEFINE_CLASS_QUERY(Proj) | |
764 DEFINE_CLASS_QUERY(Region) | |
765 DEFINE_CLASS_QUERY(Root) | |
766 DEFINE_CLASS_QUERY(SafePoint) | |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
40
diff
changeset
|
767 DEFINE_CLASS_QUERY(SafePointScalarObject) |
0 | 768 DEFINE_CLASS_QUERY(Start) |
769 DEFINE_CLASS_QUERY(Store) | |
770 DEFINE_CLASS_QUERY(Sub) | |
771 DEFINE_CLASS_QUERY(Type) | |
3842 | 772 DEFINE_CLASS_QUERY(Vector) |
6179
8c92982cbbc4
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
4778
diff
changeset
|
773 DEFINE_CLASS_QUERY(LoadVector) |
8c92982cbbc4
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
4778
diff
changeset
|
774 DEFINE_CLASS_QUERY(StoreVector) |
0 | 775 DEFINE_CLASS_QUERY(Unlock) |
776 | |
777 #undef DEFINE_CLASS_QUERY | |
778 | |
779 // duplicate of is_MachSpillCopy() | |
780 bool is_SpillCopy () const { | |
781 return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy); | |
782 } | |
783 | |
784 bool is_Con () const { return (_flags & Flag_is_Con) != 0; } | |
785 // The data node which is safe to leave in dead loop during IGVN optimization. | |
786 bool is_dead_loop_safe() const { | |
126
72f4a668df19
6625997: CastPP, CheckCastPP and Proj nodes are not dead loop safe
kvn
parents:
119
diff
changeset
|
787 return is_Phi() || (is_Proj() && in(0) == NULL) || |
72f4a668df19
6625997: CastPP, CheckCastPP and Proj nodes are not dead loop safe
kvn
parents:
119
diff
changeset
|
788 ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0 && |
72f4a668df19
6625997: CastPP, CheckCastPP and Proj nodes are not dead loop safe
kvn
parents:
119
diff
changeset
|
789 (!is_Proj() || !in(0)->is_Allocate())); |
0 | 790 } |
791 | |
792 // is_Copy() returns copied edge index (0 or 1) | |
793 uint is_Copy() const { return (_flags & Flag_is_Copy); } | |
794 | |
795 virtual bool is_CFG() const { return false; } | |
796 | |
797 // If this node is control-dependent on a test, can it be | |
798 // rerouted to a dominating equivalent test? This is usually | |
799 // true of non-CFG nodes, but can be false for operations which | |
800 // depend for their correct sequencing on more than one test. | |
801 // (In that case, hoisting to a dominating test may silently | |
802 // skip some other important test.) | |
803 virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; }; | |
804 | |
805 // When building basic blocks, I need to have a notion of block beginning | |
806 // Nodes, next block selector Nodes (block enders), and next block | |
807 // projections. These calls need to work on their machine equivalents. The | |
808 // Ideal beginning Nodes are RootNode, RegionNode and StartNode. | |
809 bool is_block_start() const { | |
810 if ( is_Region() ) | |
811 return this == (const Node*)in(0); | |
812 else | |
3842 | 813 return is_Start(); |
0 | 814 } |
815 | |
816 // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root, | |
817 // Goto and Return. This call also returns the block ending Node. | |
818 virtual const Node *is_block_proj() const; | |
819 | |
820 // The node is a "macro" node which needs to be expanded before matching | |
821 bool is_macro() const { return (_flags & Flag_is_macro) != 0; } | |
822 | |
823 //----------------- Optimization | |
824 | |
825 // Get the worst-case Type output for this Node. | |
826 virtual const class Type *bottom_type() const; | |
827 | |
828 // If we find a better type for a node, try to record it permanently. | |
829 // Return true if this node actually changed. | |
830 // Be sure to do the hash_delete game in the "rehash" variant. | |
831 void raise_bottom_type(const Type* new_type); | |
832 | |
833 // Get the address type with which this node uses and/or defs memory, | |
834 // or NULL if none. The address type is conservatively wide. | |
835 // Returns non-null for calls, membars, loads, stores, etc. | |
836 // Returns TypePtr::BOTTOM if the node touches memory "broadly". | |
837 virtual const class TypePtr *adr_type() const { return NULL; } | |
838 | |
839 // Return an existing node which computes the same function as this node. | |
840 // The optimistic combined algorithm requires this to return a Node which | |
841 // is a small number of steps away (e.g., one of my inputs). | |
842 virtual Node *Identity( PhaseTransform *phase ); | |
843 | |
844 // Return the set of values this Node can take on at runtime. | |
845 virtual const Type *Value( PhaseTransform *phase ) const; | |
846 | |
847 // Return a node which is more "ideal" than the current node. | |
848 // The invariants on this call are subtle. If in doubt, read the | |
849 // treatise in node.cpp above the default implemention AND TEST WITH | |
850 // +VerifyIterativeGVN! | |
851 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
852 | |
853 // Some nodes have specific Ideal subgraph transformations only if they are | |
854 // unique users of specific nodes. Such nodes should be put on IGVN worklist | |
855 // for the transformations to happen. | |
856 bool has_special_unique_user() const; | |
857 | |
119
d1a5218d7eaf
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
113
diff
changeset
|
858 // Skip Proj and CatchProj nodes chains. Check for Null and Top. |
d1a5218d7eaf
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
113
diff
changeset
|
859 Node* find_exact_control(Node* ctrl); |
d1a5218d7eaf
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
113
diff
changeset
|
860 |
d1a5218d7eaf
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
113
diff
changeset
|
861 // Check if 'this' node dominates or equal to 'sub'. |
d1a5218d7eaf
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
113
diff
changeset
|
862 bool dominates(Node* sub, Node_List &nlist); |
d1a5218d7eaf
6686791: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
113
diff
changeset
|
863 |
0 | 864 protected: |
865 bool remove_dead_region(PhaseGVN *phase, bool can_reshape); | |
866 public: | |
867 | |
868 // Idealize graph, using DU info. Done after constant propagation | |
869 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); | |
870 | |
871 // See if there is valid pipeline info | |
872 static const Pipeline *pipeline_class(); | |
873 virtual const Pipeline *pipeline() const; | |
874 | |
875 // Compute the latency from the def to this instruction of the ith input node | |
876 uint latency(uint i); | |
877 | |
878 // Hash & compare functions, for pessimistic value numbering | |
879 | |
880 // If the hash function returns the special sentinel value NO_HASH, | |
881 // the node is guaranteed never to compare equal to any other node. | |
605 | 882 // If we accidentally generate a hash with value NO_HASH the node |
0 | 883 // won't go into the table and we'll lose a little optimization. |
884 enum { NO_HASH = 0 }; | |
885 virtual uint hash() const; | |
886 virtual uint cmp( const Node &n ) const; | |
887 | |
888 // Operation appears to be iteratively computed (such as an induction variable) | |
889 // It is possible for this operation to return false for a loop-varying | |
890 // value, if it appears (by local graph inspection) to be computed by a simple conditional. | |
891 bool is_iteratively_computed(); | |
892 | |
893 // Determine if a node is Counted loop induction variable. | |
894 // The method is defined in loopnode.cpp. | |
895 const Node* is_loop_iv() const; | |
896 | |
897 // Return a node with opcode "opc" and same inputs as "this" if one can | |
898 // be found; Otherwise return NULL; | |
899 Node* find_similar(int opc); | |
900 | |
901 // Return the unique control out if only one. Null if none or more than one. | |
902 Node* unique_ctrl_out(); | |
903 | |
904 //----------------- Code Generation | |
905 | |
906 // Ideal register class for Matching. Zero means unmatched instruction | |
907 // (these are cloned instead of converted to machine nodes). | |
908 virtual uint ideal_reg() const; | |
909 | |
910 static const uint NotAMachineReg; // must be > max. machine register | |
911 | |
912 // Do we Match on this edge index or not? Generally false for Control | |
913 // and true for everything else. Weird for calls & returns. | |
914 virtual uint match_edge(uint idx) const; | |
915 | |
916 // Register class output is returned in | |
917 virtual const RegMask &out_RegMask() const; | |
918 // Register class input is expected in | |
919 virtual const RegMask &in_RegMask(uint) const; | |
920 // Should we clone rather than spill this instruction? | |
921 bool rematerialize() const; | |
922 | |
923 // Return JVM State Object if this Node carries debug info, or NULL otherwise | |
924 virtual JVMState* jvms() const; | |
925 | |
926 // Print as assembly | |
927 virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const; | |
928 // Emit bytes starting at parameter 'ptr' | |
929 // Bump 'ptr' by the number of output bytes | |
930 virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const; | |
931 // Size of instruction in bytes | |
932 virtual uint size(PhaseRegAlloc *ra_) const; | |
933 | |
934 // Convenience function to extract an integer constant from a node. | |
935 // If it is not an integer constant (either Con, CastII, or Mach), | |
936 // return value_if_unknown. | |
937 jint find_int_con(jint value_if_unknown) const { | |
938 const TypeInt* t = find_int_type(); | |
939 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; | |
940 } | |
941 // Return the constant, knowing it is an integer constant already | |
942 jint get_int() const { | |
943 const TypeInt* t = find_int_type(); | |
944 guarantee(t != NULL, "must be con"); | |
945 return t->get_con(); | |
946 } | |
947 // Here's where the work is done. Can produce non-constant int types too. | |
948 const TypeInt* find_int_type() const; | |
949 | |
950 // Same thing for long (and intptr_t, via type.hpp): | |
951 jlong get_long() const { | |
952 const TypeLong* t = find_long_type(); | |
953 guarantee(t != NULL, "must be con"); | |
954 return t->get_con(); | |
955 } | |
956 jlong find_long_con(jint value_if_unknown) const { | |
957 const TypeLong* t = find_long_type(); | |
958 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; | |
959 } | |
960 const TypeLong* find_long_type() const; | |
961 | |
962 // These guys are called by code generated by ADLC: | |
963 intptr_t get_ptr() const; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
65
diff
changeset
|
964 intptr_t get_narrowcon() const; |
0 | 965 jdouble getd() const; |
966 jfloat getf() const; | |
967 | |
968 // Nodes which are pinned into basic blocks | |
969 virtual bool pinned() const { return false; } | |
970 | |
971 // Nodes which use memory without consuming it, hence need antidependences | |
972 // More specifically, needs_anti_dependence_check returns true iff the node | |
973 // (a) does a load, and (b) does not perform a store (except perhaps to a | |
974 // stack slot or some other unaliased location). | |
975 bool needs_anti_dependence_check() const; | |
976 | |
977 // Return which operand this instruction may cisc-spill. In other words, | |
978 // return operand position that can convert from reg to memory access | |
979 virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; } | |
980 bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; } | |
981 | |
982 //----------------- Graph walking | |
983 public: | |
984 // Walk and apply member functions recursively. | |
985 // Supplied (this) pointer is root. | |
986 void walk(NFunc pre, NFunc post, void *env); | |
987 static void nop(Node &, void*); // Dummy empty function | |
988 static void packregion( Node &n, void* ); | |
989 private: | |
990 void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited); | |
991 | |
992 //----------------- Printing, etc | |
993 public: | |
994 #ifndef PRODUCT | |
995 Node* find(int idx) const; // Search the graph for the given idx. | |
996 Node* find_ctrl(int idx) const; // Search control ancestors for the given idx. | |
997 void dump() const; // Print this node, | |
998 void dump(int depth) const; // Print this node, recursively to depth d | |
999 void dump_ctrl(int depth) const; // Print control nodes, to depth d | |
1000 virtual void dump_req() const; // Print required-edge info | |
1001 virtual void dump_prec() const; // Print precedence-edge info | |
1002 virtual void dump_out() const; // Print the output edge info | |
1003 virtual void dump_spec(outputStream *st) const {}; // Print per-node info | |
1004 void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges | |
1005 void verify() const; // Check Def-Use info for my subgraph | |
1006 static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space); | |
1007 | |
1008 // This call defines a class-unique string used to identify class instances | |
1009 virtual const char *Name() const; | |
1010 | |
1011 void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...) | |
1012 // RegMask Print Functions | |
1013 void dump_in_regmask(int idx) { in_RegMask(idx).dump(); } | |
1014 void dump_out_regmask() { out_RegMask().dump(); } | |
1015 static int _in_dump_cnt; | |
1016 static bool in_dump() { return _in_dump_cnt > 0; } | |
1017 void fast_dump() const { | |
1018 tty->print("%4d: %-17s", _idx, Name()); | |
1019 for (uint i = 0; i < len(); i++) | |
1020 if (in(i)) | |
1021 tty->print(" %4d", in(i)->_idx); | |
1022 else | |
1023 tty->print(" NULL"); | |
1024 tty->print("\n"); | |
1025 } | |
1026 #endif | |
1027 #ifdef ASSERT | |
1028 void verify_construction(); | |
1029 bool verify_jvms(const JVMState* jvms) const; | |
1030 int _debug_idx; // Unique value assigned to every node. | |
1031 int debug_idx() const { return _debug_idx; } | |
1032 void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; } | |
1033 | |
1034 Node* _debug_orig; // Original version of this, if any. | |
1035 Node* debug_orig() const { return _debug_orig; } | |
1036 void set_debug_orig(Node* orig); // _debug_orig = orig | |
1037 | |
1038 int _hash_lock; // Barrier to modifications of nodes in the hash table | |
1039 void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); } | |
1040 void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); } | |
1041 | |
1042 static void init_NodeProperty(); | |
1043 | |
1044 #if OPTO_DU_ITERATOR_ASSERT | |
1045 const Node* _last_del; // The last deleted node. | |
1046 uint _del_tick; // Bumped when a deletion happens.. | |
1047 #endif | |
1048 #endif | |
1049 }; | |
1050 | |
1051 //----------------------------------------------------------------------------- | |
1052 // Iterators over DU info, and associated Node functions. | |
1053 | |
1054 #if OPTO_DU_ITERATOR_ASSERT | |
1055 | |
1056 // Common code for assertion checking on DU iterators. | |
1057 class DUIterator_Common VALUE_OBJ_CLASS_SPEC { | |
1058 #ifdef ASSERT | |
1059 protected: | |
1060 bool _vdui; // cached value of VerifyDUIterators | |
1061 const Node* _node; // the node containing the _out array | |
1062 uint _outcnt; // cached node->_outcnt | |
1063 uint _del_tick; // cached node->_del_tick | |
1064 Node* _last; // last value produced by the iterator | |
1065 | |
1066 void sample(const Node* node); // used by c'tor to set up for verifies | |
1067 void verify(const Node* node, bool at_end_ok = false); | |
1068 void verify_resync(); | |
1069 void reset(const DUIterator_Common& that); | |
1070 | |
1071 // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators | |
1072 #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } } | |
1073 #else | |
1074 #define I_VDUI_ONLY(i,x) { } | |
1075 #endif //ASSERT | |
1076 }; | |
1077 | |
1078 #define VDUI_ONLY(x) I_VDUI_ONLY(*this, x) | |
1079 | |
1080 // Default DU iterator. Allows appends onto the out array. | |
1081 // Allows deletion from the out array only at the current point. | |
1082 // Usage: | |
1083 // for (DUIterator i = x->outs(); x->has_out(i); i++) { | |
1084 // Node* y = x->out(i); | |
1085 // ... | |
1086 // } | |
1087 // Compiles in product mode to a unsigned integer index, which indexes | |
1088 // onto a repeatedly reloaded base pointer of x->_out. The loop predicate | |
1089 // also reloads x->_outcnt. If you delete, you must perform "--i" just | |
1090 // before continuing the loop. You must delete only the last-produced | |
1091 // edge. You must delete only a single copy of the last-produced edge, | |
1092 // or else you must delete all copies at once (the first time the edge | |
1093 // is produced by the iterator). | |
1094 class DUIterator : public DUIterator_Common { | |
1095 friend class Node; | |
1096 | |
1097 // This is the index which provides the product-mode behavior. | |
1098 // Whatever the product-mode version of the system does to the | |
1099 // DUI index is done to this index. All other fields in | |
1100 // this class are used only for assertion checking. | |
1101 uint _idx; | |
1102 | |
1103 #ifdef ASSERT | |
1104 uint _refresh_tick; // Records the refresh activity. | |
1105 | |
1106 void sample(const Node* node); // Initialize _refresh_tick etc. | |
1107 void verify(const Node* node, bool at_end_ok = false); | |
1108 void verify_increment(); // Verify an increment operation. | |
1109 void verify_resync(); // Verify that we can back up over a deletion. | |
1110 void verify_finish(); // Verify that the loop terminated properly. | |
1111 void refresh(); // Resample verification info. | |
1112 void reset(const DUIterator& that); // Resample after assignment. | |
1113 #endif | |
1114 | |
1115 DUIterator(const Node* node, int dummy_to_avoid_conversion) | |
1116 { _idx = 0; debug_only(sample(node)); } | |
1117 | |
1118 public: | |
1119 // initialize to garbage; clear _vdui to disable asserts | |
1120 DUIterator() | |
1121 { /*initialize to garbage*/ debug_only(_vdui = false); } | |
1122 | |
1123 void operator++(int dummy_to_specify_postfix_op) | |
1124 { _idx++; VDUI_ONLY(verify_increment()); } | |
1125 | |
1126 void operator--() | |
1127 { VDUI_ONLY(verify_resync()); --_idx; } | |
1128 | |
1129 ~DUIterator() | |
1130 { VDUI_ONLY(verify_finish()); } | |
1131 | |
1132 void operator=(const DUIterator& that) | |
1133 { _idx = that._idx; debug_only(reset(that)); } | |
1134 }; | |
1135 | |
1136 DUIterator Node::outs() const | |
1137 { return DUIterator(this, 0); } | |
1138 DUIterator& Node::refresh_out_pos(DUIterator& i) const | |
1139 { I_VDUI_ONLY(i, i.refresh()); return i; } | |
1140 bool Node::has_out(DUIterator& i) const | |
1141 { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; } | |
1142 Node* Node::out(DUIterator& i) const | |
1143 { I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; } | |
1144 | |
1145 | |
1146 // Faster DU iterator. Disallows insertions into the out array. | |
1147 // Allows deletion from the out array only at the current point. | |
1148 // Usage: | |
1149 // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) { | |
1150 // Node* y = x->fast_out(i); | |
1151 // ... | |
1152 // } | |
1153 // Compiles in product mode to raw Node** pointer arithmetic, with | |
1154 // no reloading of pointers from the original node x. If you delete, | |
1155 // you must perform "--i; --imax" just before continuing the loop. | |
1156 // If you delete multiple copies of the same edge, you must decrement | |
1157 // imax, but not i, multiple times: "--i, imax -= num_edges". | |
1158 class DUIterator_Fast : public DUIterator_Common { | |
1159 friend class Node; | |
1160 friend class DUIterator_Last; | |
1161 | |
1162 // This is the pointer which provides the product-mode behavior. | |
1163 // Whatever the product-mode version of the system does to the | |
1164 // DUI pointer is done to this pointer. All other fields in | |
1165 // this class are used only for assertion checking. | |
1166 Node** _outp; | |
1167 | |
1168 #ifdef ASSERT | |
1169 void verify(const Node* node, bool at_end_ok = false); | |
1170 void verify_limit(); | |
1171 void verify_resync(); | |
1172 void verify_relimit(uint n); | |
1173 void reset(const DUIterator_Fast& that); | |
1174 #endif | |
1175 | |
1176 // Note: offset must be signed, since -1 is sometimes passed | |
1177 DUIterator_Fast(const Node* node, ptrdiff_t offset) | |
1178 { _outp = node->_out + offset; debug_only(sample(node)); } | |
1179 | |
1180 public: | |
1181 // initialize to garbage; clear _vdui to disable asserts | |
1182 DUIterator_Fast() | |
1183 { /*initialize to garbage*/ debug_only(_vdui = false); } | |
1184 | |
1185 void operator++(int dummy_to_specify_postfix_op) | |
1186 { _outp++; VDUI_ONLY(verify(_node, true)); } | |
1187 | |
1188 void operator--() | |
1189 { VDUI_ONLY(verify_resync()); --_outp; } | |
1190 | |
1191 void operator-=(uint n) // applied to the limit only | |
1192 { _outp -= n; VDUI_ONLY(verify_relimit(n)); } | |
1193 | |
1194 bool operator<(DUIterator_Fast& limit) { | |
1195 I_VDUI_ONLY(*this, this->verify(_node, true)); | |
1196 I_VDUI_ONLY(limit, limit.verify_limit()); | |
1197 return _outp < limit._outp; | |
1198 } | |
1199 | |
1200 void operator=(const DUIterator_Fast& that) | |
1201 { _outp = that._outp; debug_only(reset(that)); } | |
1202 }; | |
1203 | |
1204 DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const { | |
1205 // Assign a limit pointer to the reference argument: | |
1206 imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt); | |
1207 // Return the base pointer: | |
1208 return DUIterator_Fast(this, 0); | |
1209 } | |
1210 Node* Node::fast_out(DUIterator_Fast& i) const { | |
1211 I_VDUI_ONLY(i, i.verify(this)); | |
1212 return debug_only(i._last=) *i._outp; | |
1213 } | |
1214 | |
1215 | |
1216 // Faster DU iterator. Requires each successive edge to be removed. | |
1217 // Does not allow insertion of any edges. | |
1218 // Usage: | |
1219 // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) { | |
1220 // Node* y = x->last_out(i); | |
1221 // ... | |
1222 // } | |
1223 // Compiles in product mode to raw Node** pointer arithmetic, with | |
1224 // no reloading of pointers from the original node x. | |
1225 class DUIterator_Last : private DUIterator_Fast { | |
1226 friend class Node; | |
1227 | |
1228 #ifdef ASSERT | |
1229 void verify(const Node* node, bool at_end_ok = false); | |
1230 void verify_limit(); | |
1231 void verify_step(uint num_edges); | |
1232 #endif | |
1233 | |
1234 // Note: offset must be signed, since -1 is sometimes passed | |
1235 DUIterator_Last(const Node* node, ptrdiff_t offset) | |
1236 : DUIterator_Fast(node, offset) { } | |
1237 | |
1238 void operator++(int dummy_to_specify_postfix_op) {} // do not use | |
1239 void operator<(int) {} // do not use | |
1240 | |
1241 public: | |
1242 DUIterator_Last() { } | |
1243 // initialize to garbage | |
1244 | |
1245 void operator--() | |
1246 { _outp--; VDUI_ONLY(verify_step(1)); } | |
1247 | |
1248 void operator-=(uint n) | |
1249 { _outp -= n; VDUI_ONLY(verify_step(n)); } | |
1250 | |
1251 bool operator>=(DUIterator_Last& limit) { | |
1252 I_VDUI_ONLY(*this, this->verify(_node, true)); | |
1253 I_VDUI_ONLY(limit, limit.verify_limit()); | |
1254 return _outp >= limit._outp; | |
1255 } | |
1256 | |
1257 void operator=(const DUIterator_Last& that) | |
1258 { DUIterator_Fast::operator=(that); } | |
1259 }; | |
1260 | |
1261 DUIterator_Last Node::last_outs(DUIterator_Last& imin) const { | |
1262 // Assign a limit pointer to the reference argument: | |
1263 imin = DUIterator_Last(this, 0); | |
1264 // Return the initial pointer: | |
1265 return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1); | |
1266 } | |
1267 Node* Node::last_out(DUIterator_Last& i) const { | |
1268 I_VDUI_ONLY(i, i.verify(this)); | |
1269 return debug_only(i._last=) *i._outp; | |
1270 } | |
1271 | |
1272 #endif //OPTO_DU_ITERATOR_ASSERT | |
1273 | |
1274 #undef I_VDUI_ONLY | |
1275 #undef VDUI_ONLY | |
1276 | |
1080
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1277 // An Iterator that truly follows the iterator pattern. Doesn't |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1278 // support deletion but could be made to. |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1279 // |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1280 // for (SimpleDUIterator i(n); i.has_next(); i.next()) { |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1281 // Node* m = i.get(); |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1282 // |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1283 class SimpleDUIterator : public StackObj { |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1284 private: |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1285 Node* node; |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1286 DUIterator_Fast i; |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1287 DUIterator_Fast imax; |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1288 public: |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1289 SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {} |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1290 bool has_next() { return i < imax; } |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1291 void next() { i++; } |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1292 Node* get() { return node->fast_out(i); } |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1293 }; |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1294 |
0 | 1295 |
1296 //----------------------------------------------------------------------------- | |
1297 // Map dense integer indices to Nodes. Uses classic doubling-array trick. | |
1298 // Abstractly provides an infinite array of Node*'s, initialized to NULL. | |
1299 // Note that the constructor just zeros things, and since I use Arena | |
1300 // allocation I do not need a destructor to reclaim storage. | |
1301 class Node_Array : public ResourceObj { | |
3939 | 1302 friend class VMStructs; |
0 | 1303 protected: |
1304 Arena *_a; // Arena to allocate in | |
1305 uint _max; | |
1306 Node **_nodes; | |
1307 void grow( uint i ); // Grow array node to fit | |
1308 public: | |
1309 Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) { | |
1310 _nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize ); | |
1311 for( int i = 0; i < OptoNodeListSize; i++ ) { | |
1312 _nodes[i] = NULL; | |
1313 } | |
1314 } | |
1315 | |
1316 Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {} | |
1317 Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped | |
1318 { return (i<_max) ? _nodes[i] : (Node*)NULL; } | |
1319 Node *at( uint i ) const { assert(i<_max,"oob"); return _nodes[i]; } | |
1320 Node **adr() { return _nodes; } | |
1321 // Extend the mapping: index i maps to Node *n. | |
1322 void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; } | |
1323 void insert( uint i, Node *n ); | |
1324 void remove( uint i ); // Remove, preserving order | |
1325 void sort( C_sort_func_t func); | |
1326 void reset( Arena *new_a ); // Zap mapping to empty; reclaim storage | |
1327 void clear(); // Set all entries to NULL, keep storage | |
1328 uint Size() const { return _max; } | |
1329 void dump() const; | |
1330 }; | |
1331 | |
1332 class Node_List : public Node_Array { | |
3939 | 1333 friend class VMStructs; |
0 | 1334 uint _cnt; |
1335 public: | |
1336 Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {} | |
1337 Node_List(Arena *a) : Node_Array(a), _cnt(0) {} | |
1080
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1338 bool contains(Node* n) { |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1339 for (uint e = 0; e < size(); e++) { |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1340 if (at(e) == n) return true; |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1341 } |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1342 return false; |
7c57aead6d3e
6892658: C2 should optimize some stringbuilder patterns
never
parents:
605
diff
changeset
|
1343 } |
0 | 1344 void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; } |
1345 void remove( uint i ) { Node_Array::remove(i); _cnt--; } | |
1346 void push( Node *b ) { map(_cnt++,b); } | |
1347 void yank( Node *n ); // Find and remove | |
1348 Node *pop() { return _nodes[--_cnt]; } | |
1349 Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;} | |
1350 void clear() { _cnt = 0; Node_Array::clear(); } // retain storage | |
1351 uint size() const { return _cnt; } | |
1352 void dump() const; | |
1353 }; | |
1354 | |
1355 //------------------------------Unique_Node_List------------------------------- | |
1356 class Unique_Node_List : public Node_List { | |
3939 | 1357 friend class VMStructs; |
0 | 1358 VectorSet _in_worklist; |
1359 uint _clock_index; // Index in list where to pop from next | |
1360 public: | |
1361 Unique_Node_List() : Node_List(), _in_worklist(Thread::current()->resource_area()), _clock_index(0) {} | |
1362 Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {} | |
1363 | |
1364 void remove( Node *n ); | |
1365 bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; } | |
1366 VectorSet &member_set(){ return _in_worklist; } | |
1367 | |
1368 void push( Node *b ) { | |
1369 if( !_in_worklist.test_set(b->_idx) ) | |
1370 Node_List::push(b); | |
1371 } | |
1372 Node *pop() { | |
1373 if( _clock_index >= size() ) _clock_index = 0; | |
1374 Node *b = at(_clock_index); | |
400
cc80376deb0c
6667595: Set probability FAIR for pre-, post- loops and ALWAYS for main loop
kvn
parents:
247
diff
changeset
|
1375 map( _clock_index, Node_List::pop()); |
cc80376deb0c
6667595: Set probability FAIR for pre-, post- loops and ALWAYS for main loop
kvn
parents:
247
diff
changeset
|
1376 if (size() != 0) _clock_index++; // Always start from 0 |
0 | 1377 _in_worklist >>= b->_idx; |
1378 return b; | |
1379 } | |
1380 Node *remove( uint i ) { | |
1381 Node *b = Node_List::at(i); | |
1382 _in_worklist >>= b->_idx; | |
1383 map(i,Node_List::pop()); | |
1384 return b; | |
1385 } | |
1386 void yank( Node *n ) { _in_worklist >>= n->_idx; Node_List::yank(n); } | |
1387 void clear() { | |
1388 _in_worklist.Clear(); // Discards storage but grows automatically | |
1389 Node_List::clear(); | |
1390 _clock_index = 0; | |
1391 } | |
1392 | |
1393 // Used after parsing to remove useless nodes before Iterative GVN | |
1394 void remove_useless_nodes(VectorSet &useful); | |
1395 | |
1396 #ifndef PRODUCT | |
1397 void print_set() const { _in_worklist.print(); } | |
1398 #endif | |
1399 }; | |
1400 | |
1401 // Inline definition of Compile::record_for_igvn must be deferred to this point. | |
1402 inline void Compile::record_for_igvn(Node* n) { | |
1403 _for_igvn->push(n); | |
1404 } | |
1405 | |
1406 //------------------------------Node_Stack------------------------------------- | |
1407 class Node_Stack { | |
3939 | 1408 friend class VMStructs; |
0 | 1409 protected: |
1410 struct INode { | |
1411 Node *node; // Processed node | |
1412 uint indx; // Index of next node's child | |
1413 }; | |
1414 INode *_inode_top; // tos, stack grows up | |
1415 INode *_inode_max; // End of _inodes == _inodes + _max | |
1416 INode *_inodes; // Array storage for the stack | |
1417 Arena *_a; // Arena to allocate in | |
1418 void grow(); | |
1419 public: | |
1420 Node_Stack(int size) { | |
1421 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; | |
1422 _a = Thread::current()->resource_area(); | |
1423 _inodes = NEW_ARENA_ARRAY( _a, INode, max ); | |
1424 _inode_max = _inodes + max; | |
1425 _inode_top = _inodes - 1; // stack is empty | |
1426 } | |
1427 | |
1428 Node_Stack(Arena *a, int size) : _a(a) { | |
1429 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; | |
1430 _inodes = NEW_ARENA_ARRAY( _a, INode, max ); | |
1431 _inode_max = _inodes + max; | |
1432 _inode_top = _inodes - 1; // stack is empty | |
1433 } | |
1434 | |
1435 void pop() { | |
1436 assert(_inode_top >= _inodes, "node stack underflow"); | |
1437 --_inode_top; | |
1438 } | |
1439 void push(Node *n, uint i) { | |
1440 ++_inode_top; | |
1441 if (_inode_top >= _inode_max) grow(); | |
1442 INode *top = _inode_top; // optimization | |
1443 top->node = n; | |
1444 top->indx = i; | |
1445 } | |
1446 Node *node() const { | |
1447 return _inode_top->node; | |
1448 } | |
1449 Node* node_at(uint i) const { | |
1450 assert(_inodes + i <= _inode_top, "in range"); | |
1451 return _inodes[i].node; | |
1452 } | |
1453 uint index() const { | |
1454 return _inode_top->indx; | |
1455 } | |
247 | 1456 uint index_at(uint i) const { |
1457 assert(_inodes + i <= _inode_top, "in range"); | |
1458 return _inodes[i].indx; | |
1459 } | |
0 | 1460 void set_node(Node *n) { |
1461 _inode_top->node = n; | |
1462 } | |
1463 void set_index(uint i) { | |
1464 _inode_top->indx = i; | |
1465 } | |
1466 uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size | |
40 | 1467 uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size |
0 | 1468 bool is_nonempty() const { return (_inode_top >= _inodes); } |
1469 bool is_empty() const { return (_inode_top < _inodes); } | |
1470 void clear() { _inode_top = _inodes - 1; } // retain storage | |
3788
e3cbc9ddd434
7044738: Loop unroll optimization causes incorrect result
kvn
parents:
2008
diff
changeset
|
1471 |
e3cbc9ddd434
7044738: Loop unroll optimization causes incorrect result
kvn
parents:
2008
diff
changeset
|
1472 // Node_Stack is used to map nodes. |
e3cbc9ddd434
7044738: Loop unroll optimization causes incorrect result
kvn
parents:
2008
diff
changeset
|
1473 Node* find(uint idx) const; |
0 | 1474 }; |
1475 | |
1476 | |
1477 //-----------------------------Node_Notes-------------------------------------- | |
1478 // Debugging or profiling annotations loosely and sparsely associated | |
1479 // with some nodes. See Compile::node_notes_at for the accessor. | |
1480 class Node_Notes VALUE_OBJ_CLASS_SPEC { | |
3939 | 1481 friend class VMStructs; |
0 | 1482 JVMState* _jvms; |
1483 | |
1484 public: | |
1485 Node_Notes(JVMState* jvms = NULL) { | |
1486 _jvms = jvms; | |
1487 } | |
1488 | |
1489 JVMState* jvms() { return _jvms; } | |
1490 void set_jvms(JVMState* x) { _jvms = x; } | |
1491 | |
1492 // True if there is nothing here. | |
1493 bool is_clear() { | |
1494 return (_jvms == NULL); | |
1495 } | |
1496 | |
1497 // Make there be nothing here. | |
1498 void clear() { | |
1499 _jvms = NULL; | |
1500 } | |
1501 | |
1502 // Make a new, clean node notes. | |
1503 static Node_Notes* make(Compile* C) { | |
1504 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); | |
1505 nn->clear(); | |
1506 return nn; | |
1507 } | |
1508 | |
1509 Node_Notes* clone(Compile* C) { | |
1510 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); | |
1511 (*nn) = (*this); | |
1512 return nn; | |
1513 } | |
1514 | |
1515 // Absorb any information from source. | |
1516 bool update_from(Node_Notes* source) { | |
1517 bool changed = false; | |
1518 if (source != NULL) { | |
1519 if (source->jvms() != NULL) { | |
1520 set_jvms(source->jvms()); | |
1521 changed = true; | |
1522 } | |
1523 } | |
1524 return changed; | |
1525 } | |
1526 }; | |
1527 | |
1528 // Inlined accessors for Compile::node_nodes that require the preceding class: | |
1529 inline Node_Notes* | |
1530 Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr, | |
1531 int idx, bool can_grow) { | |
1532 assert(idx >= 0, "oob"); | |
1533 int block_idx = (idx >> _log2_node_notes_block_size); | |
1534 int grow_by = (block_idx - (arr == NULL? 0: arr->length())); | |
1535 if (grow_by >= 0) { | |
1536 if (!can_grow) return NULL; | |
1537 grow_node_notes(arr, grow_by + 1); | |
1538 } | |
1539 // (Every element of arr is a sub-array of length _node_notes_block_size.) | |
1540 return arr->at(block_idx) + (idx & (_node_notes_block_size-1)); | |
1541 } | |
1542 | |
1543 inline bool | |
1544 Compile::set_node_notes_at(int idx, Node_Notes* value) { | |
1545 if (value == NULL || value->is_clear()) | |
1546 return false; // nothing to write => write nothing | |
1547 Node_Notes* loc = locate_node_notes(_node_note_array, idx, true); | |
1548 assert(loc != NULL, ""); | |
1549 return loc->update_from(value); | |
1550 } | |
1551 | |
1552 | |
1553 //------------------------------TypeNode--------------------------------------- | |
1554 // Node with a Type constant. | |
1555 class TypeNode : public Node { | |
1556 protected: | |
1557 virtual uint hash() const; // Check the type | |
1558 virtual uint cmp( const Node &n ) const; | |
1559 virtual uint size_of() const; // Size is bigger | |
1560 const Type* const _type; | |
1561 public: | |
1562 void set_type(const Type* t) { | |
1563 assert(t != NULL, "sanity"); | |
1564 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); | |
1565 *(const Type**)&_type = t; // cast away const-ness | |
1566 // If this node is in the hash table, make sure it doesn't need a rehash. | |
1567 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); | |
1568 } | |
1569 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; | |
1570 TypeNode( const Type *t, uint required ) : Node(required), _type(t) { | |
1571 init_class_id(Class_Type); | |
1572 } | |
1573 virtual const Type *Value( PhaseTransform *phase ) const; | |
1574 virtual const Type *bottom_type() const; | |
1575 virtual uint ideal_reg() const; | |
1576 #ifndef PRODUCT | |
1577 virtual void dump_spec(outputStream *st) const; | |
1578 #endif | |
1579 }; | |
1972 | 1580 |
1581 #endif // SHARE_VM_OPTO_NODE_HPP |