Mercurial > hg > graal-compiler
annotate src/share/vm/opto/memnode.hpp @ 1941:79d04223b8a5
Added caching for resolved types and resolved fields.
This is crucial, because the local load elimination will lead to wrong results, if field equality (of two RiField objects with the same object and the same RiType) is not given. The caching makes sure that the default equals implementation is sufficient.
author | Thomas Wuerthinger <wuerthinger@ssw.jku.at> |
---|---|
date | Tue, 28 Dec 2010 18:33:26 +0100 |
parents | 4311f23817fd |
children | f95d63e2154a |
rev | line source |
---|---|
0 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1367
diff
changeset
|
2 * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1367
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1367
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1367
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
25 // Portions of code courtesy of Clifford Click | |
26 | |
27 class MultiNode; | |
28 class PhaseCCP; | |
29 class PhaseTransform; | |
30 | |
31 //------------------------------MemNode---------------------------------------- | |
32 // Load or Store, possibly throwing a NULL pointer exception | |
33 class MemNode : public Node { | |
34 protected: | |
35 #ifdef ASSERT | |
36 const TypePtr* _adr_type; // What kind of memory is being addressed? | |
37 #endif | |
38 virtual uint size_of() const; // Size is bigger (ASSERT only) | |
39 public: | |
40 enum { Control, // When is it safe to do this load? | |
41 Memory, // Chunk of memory is being loaded from | |
42 Address, // Actually address, derived from base | |
43 ValueIn, // Value to store | |
44 OopStore // Preceeding oop store, only in StoreCM | |
45 }; | |
46 protected: | |
47 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) | |
48 : Node(c0,c1,c2 ) { | |
49 init_class_id(Class_Mem); | |
50 debug_only(_adr_type=at; adr_type();) | |
51 } | |
52 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) | |
53 : Node(c0,c1,c2,c3) { | |
54 init_class_id(Class_Mem); | |
55 debug_only(_adr_type=at; adr_type();) | |
56 } | |
57 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) | |
58 : Node(c0,c1,c2,c3,c4) { | |
59 init_class_id(Class_Mem); | |
60 debug_only(_adr_type=at; adr_type();) | |
61 } | |
62 | |
33 | 63 public: |
0 | 64 // Helpers for the optimizer. Documented in memnode.cpp. |
65 static bool detect_ptr_independence(Node* p1, AllocateNode* a1, | |
66 Node* p2, AllocateNode* a2, | |
67 PhaseTransform* phase); | |
68 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); | |
69 | |
74
2a9af0b9cb1c
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
64
diff
changeset
|
70 static Node *optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase); |
2a9af0b9cb1c
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
64
diff
changeset
|
71 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase); |
0 | 72 // This one should probably be a phase-specific function: |
85
f3b3fe64f59f
6692301: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
74
diff
changeset
|
73 static bool all_controls_dominate(Node* dom, Node* sub); |
0 | 74 |
163 | 75 // Find any cast-away of null-ness and keep its control. |
76 static Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ); | |
0 | 77 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); |
78 | |
79 virtual const class TypePtr *adr_type() const; // returns bottom_type of address | |
80 | |
81 // Shared code for Ideal methods: | |
82 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL. | |
83 | |
84 // Helper function for adr_type() implementations. | |
85 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL); | |
86 | |
87 // Raw access function, to allow copying of adr_type efficiently in | |
88 // product builds and retain the debug info for debug builds. | |
89 const TypePtr *raw_adr_type() const { | |
90 #ifdef ASSERT | |
91 return _adr_type; | |
92 #else | |
93 return 0; | |
94 #endif | |
95 } | |
96 | |
97 // Map a load or store opcode to its corresponding store opcode. | |
98 // (Return -1 if unknown.) | |
99 virtual int store_Opcode() const { return -1; } | |
100 | |
101 // What is the type of the value in memory? (T_VOID mean "unspecified".) | |
102 virtual BasicType memory_type() const = 0; | |
29
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
17
diff
changeset
|
103 virtual int memory_size() const { |
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
17
diff
changeset
|
104 #ifdef ASSERT |
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
17
diff
changeset
|
105 return type2aelembytes(memory_type(), true); |
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
17
diff
changeset
|
106 #else |
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
17
diff
changeset
|
107 return type2aelembytes(memory_type()); |
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
17
diff
changeset
|
108 #endif |
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
17
diff
changeset
|
109 } |
0 | 110 |
111 // Search through memory states which precede this node (load or store). | |
112 // Look for an exact match for the address, with no intervening | |
113 // aliased stores. | |
114 Node* find_previous_store(PhaseTransform* phase); | |
115 | |
116 // Can this node (load or store) accurately see a stored value in | |
117 // the given memory state? (The state may or may not be in(Memory).) | |
118 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; | |
119 | |
120 #ifndef PRODUCT | |
121 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); | |
122 virtual void dump_spec(outputStream *st) const; | |
123 #endif | |
124 }; | |
125 | |
126 //------------------------------LoadNode--------------------------------------- | |
127 // Load value; requires Memory and Address | |
128 class LoadNode : public MemNode { | |
129 protected: | |
130 virtual uint cmp( const Node &n ) const; | |
131 virtual uint size_of() const; // Size is bigger | |
132 const Type* const _type; // What kind of value is loaded? | |
133 public: | |
134 | |
135 LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt ) | |
136 : MemNode(c,mem,adr,at), _type(rt) { | |
137 init_class_id(Class_Load); | |
138 } | |
139 | |
140 // Polymorphic factory method: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
141 static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
142 const TypePtr* at, const Type *rt, BasicType bt ); |
0 | 143 |
144 virtual uint hash() const; // Check the type | |
145 | |
146 // Handle algebraic identities here. If we have an identity, return the Node | |
147 // we are equivalent to. We look for Load of a Store. | |
148 virtual Node *Identity( PhaseTransform *phase ); | |
149 | |
150 // If the load is from Field memory and the pointer is non-null, we can | |
151 // zero out the control input. | |
152 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
153 | |
163 | 154 // Split instance field load through Phi. |
155 Node* split_through_phi(PhaseGVN *phase); | |
156 | |
17
ff5961f4c095
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
0
diff
changeset
|
157 // Recover original value from boxed values |
ff5961f4c095
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
0
diff
changeset
|
158 Node *eliminate_autobox(PhaseGVN *phase); |
ff5961f4c095
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
0
diff
changeset
|
159 |
0 | 160 // Compute a new Type for this node. Basically we just do the pre-check, |
161 // then call the virtual add() to set the type. | |
162 virtual const Type *Value( PhaseTransform *phase ) const; | |
163 | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
164 // Common methods for LoadKlass and LoadNKlass nodes. |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
165 const Type *klass_value_common( PhaseTransform *phase ) const; |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
166 Node *klass_identity_common( PhaseTransform *phase ); |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
167 |
0 | 168 virtual uint ideal_reg() const; |
169 virtual const Type *bottom_type() const; | |
170 // Following method is copied from TypeNode: | |
171 void set_type(const Type* t) { | |
172 assert(t != NULL, "sanity"); | |
173 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); | |
174 *(const Type**)&_type = t; // cast away const-ness | |
175 // If this node is in the hash table, make sure it doesn't need a rehash. | |
176 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); | |
177 } | |
178 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; | |
179 | |
180 // Do not match memory edge | |
181 virtual uint match_edge(uint idx) const; | |
182 | |
183 // Map a load opcode to its corresponding store opcode. | |
184 virtual int store_Opcode() const = 0; | |
185 | |
64
b8f5ba577b02
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
33
diff
changeset
|
186 // Check if the load's memory input is a Phi node with the same control. |
b8f5ba577b02
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
33
diff
changeset
|
187 bool is_instance_field_load_with_local_phi(Node* ctrl); |
b8f5ba577b02
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
33
diff
changeset
|
188 |
0 | 189 #ifndef PRODUCT |
190 virtual void dump_spec(outputStream *st) const; | |
191 #endif | |
1609 | 192 #ifdef ASSERT |
193 // Helper function to allow a raw load without control edge for some cases | |
194 static bool is_immutable_value(Node* adr); | |
195 #endif | |
0 | 196 protected: |
197 const Type* load_array_final_field(const TypeKlassPtr *tkls, | |
198 ciKlass* klass) const; | |
199 }; | |
200 | |
201 //------------------------------LoadBNode-------------------------------------- | |
202 // Load a byte (8bits signed) from memory | |
203 class LoadBNode : public LoadNode { | |
204 public: | |
205 LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE ) | |
206 : LoadNode(c,mem,adr,at,ti) {} | |
207 virtual int Opcode() const; | |
208 virtual uint ideal_reg() const { return Op_RegI; } | |
209 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
210 virtual int store_Opcode() const { return Op_StoreB; } | |
211 virtual BasicType memory_type() const { return T_BYTE; } | |
212 }; | |
213 | |
624 | 214 //------------------------------LoadUBNode------------------------------------- |
215 // Load a unsigned byte (8bits unsigned) from memory | |
216 class LoadUBNode : public LoadNode { | |
217 public: | |
218 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti = TypeInt::UBYTE ) | |
219 : LoadNode(c, mem, adr, at, ti) {} | |
220 virtual int Opcode() const; | |
221 virtual uint ideal_reg() const { return Op_RegI; } | |
222 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); | |
223 virtual int store_Opcode() const { return Op_StoreB; } | |
224 virtual BasicType memory_type() const { return T_BYTE; } | |
225 }; | |
226 | |
558
3b5ac9e7e6ea
6796746: rename LoadC (char) opcode class to LoadUS (unsigned short)
twisti
parents:
420
diff
changeset
|
227 //------------------------------LoadUSNode------------------------------------- |
3b5ac9e7e6ea
6796746: rename LoadC (char) opcode class to LoadUS (unsigned short)
twisti
parents:
420
diff
changeset
|
228 // Load an unsigned short/char (16bits unsigned) from memory |
3b5ac9e7e6ea
6796746: rename LoadC (char) opcode class to LoadUS (unsigned short)
twisti
parents:
420
diff
changeset
|
229 class LoadUSNode : public LoadNode { |
0 | 230 public: |
558
3b5ac9e7e6ea
6796746: rename LoadC (char) opcode class to LoadUS (unsigned short)
twisti
parents:
420
diff
changeset
|
231 LoadUSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR ) |
0 | 232 : LoadNode(c,mem,adr,at,ti) {} |
233 virtual int Opcode() const; | |
234 virtual uint ideal_reg() const { return Op_RegI; } | |
235 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
236 virtual int store_Opcode() const { return Op_StoreC; } | |
237 virtual BasicType memory_type() const { return T_CHAR; } | |
238 }; | |
239 | |
240 //------------------------------LoadINode-------------------------------------- | |
241 // Load an integer from memory | |
242 class LoadINode : public LoadNode { | |
243 public: | |
244 LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT ) | |
245 : LoadNode(c,mem,adr,at,ti) {} | |
246 virtual int Opcode() const; | |
247 virtual uint ideal_reg() const { return Op_RegI; } | |
248 virtual int store_Opcode() const { return Op_StoreI; } | |
249 virtual BasicType memory_type() const { return T_INT; } | |
250 }; | |
251 | |
624 | 252 //------------------------------LoadUI2LNode----------------------------------- |
253 // Load an unsigned integer into long from memory | |
254 class LoadUI2LNode : public LoadNode { | |
255 public: | |
256 LoadUI2LNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeLong* t = TypeLong::UINT) | |
257 : LoadNode(c, mem, adr, at, t) {} | |
258 virtual int Opcode() const; | |
259 virtual uint ideal_reg() const { return Op_RegL; } | |
260 virtual int store_Opcode() const { return Op_StoreL; } | |
261 virtual BasicType memory_type() const { return T_LONG; } | |
262 }; | |
263 | |
0 | 264 //------------------------------LoadRangeNode---------------------------------- |
265 // Load an array length from the array | |
266 class LoadRangeNode : public LoadINode { | |
267 public: | |
268 LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS ) | |
269 : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {} | |
270 virtual int Opcode() const; | |
271 virtual const Type *Value( PhaseTransform *phase ) const; | |
272 virtual Node *Identity( PhaseTransform *phase ); | |
366
8261ee795323
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
253
diff
changeset
|
273 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
0 | 274 }; |
275 | |
276 //------------------------------LoadLNode-------------------------------------- | |
277 // Load a long from memory | |
278 class LoadLNode : public LoadNode { | |
279 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } | |
280 virtual uint cmp( const Node &n ) const { | |
281 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access | |
282 && LoadNode::cmp(n); | |
283 } | |
284 virtual uint size_of() const { return sizeof(*this); } | |
285 const bool _require_atomic_access; // is piecewise load forbidden? | |
286 | |
287 public: | |
288 LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, | |
289 const TypeLong *tl = TypeLong::LONG, | |
290 bool require_atomic_access = false ) | |
291 : LoadNode(c,mem,adr,at,tl) | |
292 , _require_atomic_access(require_atomic_access) | |
293 {} | |
294 virtual int Opcode() const; | |
295 virtual uint ideal_reg() const { return Op_RegL; } | |
296 virtual int store_Opcode() const { return Op_StoreL; } | |
297 virtual BasicType memory_type() const { return T_LONG; } | |
298 bool require_atomic_access() { return _require_atomic_access; } | |
299 static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt); | |
300 #ifndef PRODUCT | |
301 virtual void dump_spec(outputStream *st) const { | |
302 LoadNode::dump_spec(st); | |
303 if (_require_atomic_access) st->print(" Atomic!"); | |
304 } | |
305 #endif | |
306 }; | |
307 | |
308 //------------------------------LoadL_unalignedNode---------------------------- | |
309 // Load a long from unaligned memory | |
310 class LoadL_unalignedNode : public LoadLNode { | |
311 public: | |
312 LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at ) | |
313 : LoadLNode(c,mem,adr,at) {} | |
314 virtual int Opcode() const; | |
315 }; | |
316 | |
317 //------------------------------LoadFNode-------------------------------------- | |
318 // Load a float (64 bits) from memory | |
319 class LoadFNode : public LoadNode { | |
320 public: | |
321 LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT ) | |
322 : LoadNode(c,mem,adr,at,t) {} | |
323 virtual int Opcode() const; | |
324 virtual uint ideal_reg() const { return Op_RegF; } | |
325 virtual int store_Opcode() const { return Op_StoreF; } | |
326 virtual BasicType memory_type() const { return T_FLOAT; } | |
327 }; | |
328 | |
329 //------------------------------LoadDNode-------------------------------------- | |
330 // Load a double (64 bits) from memory | |
331 class LoadDNode : public LoadNode { | |
332 public: | |
333 LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE ) | |
334 : LoadNode(c,mem,adr,at,t) {} | |
335 virtual int Opcode() const; | |
336 virtual uint ideal_reg() const { return Op_RegD; } | |
337 virtual int store_Opcode() const { return Op_StoreD; } | |
338 virtual BasicType memory_type() const { return T_DOUBLE; } | |
339 }; | |
340 | |
341 //------------------------------LoadD_unalignedNode---------------------------- | |
342 // Load a double from unaligned memory | |
343 class LoadD_unalignedNode : public LoadDNode { | |
344 public: | |
345 LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at ) | |
346 : LoadDNode(c,mem,adr,at) {} | |
347 virtual int Opcode() const; | |
348 }; | |
349 | |
350 //------------------------------LoadPNode-------------------------------------- | |
351 // Load a pointer from memory (either object or array) | |
352 class LoadPNode : public LoadNode { | |
353 public: | |
354 LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t ) | |
355 : LoadNode(c,mem,adr,at,t) {} | |
356 virtual int Opcode() const; | |
357 virtual uint ideal_reg() const { return Op_RegP; } | |
358 virtual int store_Opcode() const { return Op_StoreP; } | |
359 virtual BasicType memory_type() const { return T_ADDRESS; } | |
360 // depends_only_on_test is almost always true, and needs to be almost always | |
361 // true to enable key hoisting & commoning optimizations. However, for the | |
362 // special case of RawPtr loads from TLS top & end, the control edge carries | |
363 // the dependence preventing hoisting past a Safepoint instead of the memory | |
364 // edge. (An unfortunate consequence of having Safepoints not set Raw | |
365 // Memory; itself an unfortunate consequence of having Nodes which produce | |
366 // results (new raw memory state) inside of loops preventing all manner of | |
367 // other optimizations). Basically, it's ugly but so is the alternative. | |
368 // See comment in macro.cpp, around line 125 expand_allocate_common(). | |
369 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } | |
370 }; | |
371 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
372 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
373 //------------------------------LoadNNode-------------------------------------- |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
374 // Load a narrow oop from memory (either object or array) |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
375 class LoadNNode : public LoadNode { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
376 public: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
377 LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t ) |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
378 : LoadNode(c,mem,adr,at,t) {} |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
379 virtual int Opcode() const; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
380 virtual uint ideal_reg() const { return Op_RegN; } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
381 virtual int store_Opcode() const { return Op_StoreN; } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
382 virtual BasicType memory_type() const { return T_NARROWOOP; } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
383 // depends_only_on_test is almost always true, and needs to be almost always |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
384 // true to enable key hoisting & commoning optimizations. However, for the |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
385 // special case of RawPtr loads from TLS top & end, the control edge carries |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
386 // the dependence preventing hoisting past a Safepoint instead of the memory |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
387 // edge. (An unfortunate consequence of having Safepoints not set Raw |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
388 // Memory; itself an unfortunate consequence of having Nodes which produce |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
389 // results (new raw memory state) inside of loops preventing all manner of |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
390 // other optimizations). Basically, it's ugly but so is the alternative. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
391 // See comment in macro.cpp, around line 125 expand_allocate_common(). |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
392 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
393 }; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
394 |
0 | 395 //------------------------------LoadKlassNode---------------------------------- |
396 // Load a Klass from an object | |
397 class LoadKlassNode : public LoadPNode { | |
398 public: | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
399 LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk ) |
0 | 400 : LoadPNode(c,mem,adr,at,tk) {} |
401 virtual int Opcode() const; | |
402 virtual const Type *Value( PhaseTransform *phase ) const; | |
403 virtual Node *Identity( PhaseTransform *phase ); | |
404 virtual bool depends_only_on_test() const { return true; } | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
405 |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
406 // Polymorphic factory method: |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
407 static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
408 const TypeKlassPtr *tk = TypeKlassPtr::OBJECT ); |
0 | 409 }; |
410 | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
411 //------------------------------LoadNKlassNode--------------------------------- |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
412 // Load a narrow Klass from an object. |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
413 class LoadNKlassNode : public LoadNNode { |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
414 public: |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
415 LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowOop *tk ) |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
416 : LoadNNode(c,mem,adr,at,tk) {} |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
417 virtual int Opcode() const; |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
418 virtual uint ideal_reg() const { return Op_RegN; } |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
419 virtual int store_Opcode() const { return Op_StoreN; } |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
420 virtual BasicType memory_type() const { return T_NARROWOOP; } |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
421 |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
422 virtual const Type *Value( PhaseTransform *phase ) const; |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
423 virtual Node *Identity( PhaseTransform *phase ); |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
424 virtual bool depends_only_on_test() const { return true; } |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
425 }; |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
426 |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
427 |
0 | 428 //------------------------------LoadSNode-------------------------------------- |
429 // Load a short (16bits signed) from memory | |
430 class LoadSNode : public LoadNode { | |
431 public: | |
432 LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT ) | |
433 : LoadNode(c,mem,adr,at,ti) {} | |
434 virtual int Opcode() const; | |
435 virtual uint ideal_reg() const { return Op_RegI; } | |
436 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
437 virtual int store_Opcode() const { return Op_StoreC; } | |
438 virtual BasicType memory_type() const { return T_SHORT; } | |
439 }; | |
440 | |
441 //------------------------------StoreNode-------------------------------------- | |
442 // Store value; requires Store, Address and Value | |
443 class StoreNode : public MemNode { | |
444 protected: | |
445 virtual uint cmp( const Node &n ) const; | |
446 virtual bool depends_only_on_test() const { return false; } | |
447 | |
448 Node *Ideal_masked_input (PhaseGVN *phase, uint mask); | |
449 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); | |
450 | |
451 public: | |
452 StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) | |
453 : MemNode(c,mem,adr,at,val) { | |
454 init_class_id(Class_Store); | |
455 } | |
456 StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store ) | |
457 : MemNode(c,mem,adr,at,val,oop_store) { | |
458 init_class_id(Class_Store); | |
459 } | |
460 | |
461 // Polymorphic factory method: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
462 static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
463 const TypePtr* at, Node *val, BasicType bt ); |
0 | 464 |
465 virtual uint hash() const; // Check the type | |
466 | |
467 // If the store is to Field memory and the pointer is non-null, we can | |
468 // zero out the control input. | |
469 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
470 | |
471 // Compute a new Type for this node. Basically we just do the pre-check, | |
472 // then call the virtual add() to set the type. | |
473 virtual const Type *Value( PhaseTransform *phase ) const; | |
474 | |
475 // Check for identity function on memory (Load then Store at same address) | |
476 virtual Node *Identity( PhaseTransform *phase ); | |
477 | |
478 // Do not match memory edge | |
479 virtual uint match_edge(uint idx) const; | |
480 | |
481 virtual const Type *bottom_type() const; // returns Type::MEMORY | |
482 | |
483 // Map a store opcode to its corresponding own opcode, trivially. | |
484 virtual int store_Opcode() const { return Opcode(); } | |
485 | |
486 // have all possible loads of the value stored been optimized away? | |
487 bool value_never_loaded(PhaseTransform *phase) const; | |
488 }; | |
489 | |
490 //------------------------------StoreBNode------------------------------------- | |
491 // Store byte to memory | |
492 class StoreBNode : public StoreNode { | |
493 public: | |
494 StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} | |
495 virtual int Opcode() const; | |
496 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
497 virtual BasicType memory_type() const { return T_BYTE; } | |
498 }; | |
499 | |
500 //------------------------------StoreCNode------------------------------------- | |
501 // Store char/short to memory | |
502 class StoreCNode : public StoreNode { | |
503 public: | |
504 StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} | |
505 virtual int Opcode() const; | |
506 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
507 virtual BasicType memory_type() const { return T_CHAR; } | |
508 }; | |
509 | |
510 //------------------------------StoreINode------------------------------------- | |
511 // Store int to memory | |
512 class StoreINode : public StoreNode { | |
513 public: | |
514 StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} | |
515 virtual int Opcode() const; | |
516 virtual BasicType memory_type() const { return T_INT; } | |
517 }; | |
518 | |
519 //------------------------------StoreLNode------------------------------------- | |
520 // Store long to memory | |
521 class StoreLNode : public StoreNode { | |
522 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } | |
523 virtual uint cmp( const Node &n ) const { | |
524 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access | |
525 && StoreNode::cmp(n); | |
526 } | |
527 virtual uint size_of() const { return sizeof(*this); } | |
528 const bool _require_atomic_access; // is piecewise store forbidden? | |
529 | |
530 public: | |
531 StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, | |
532 bool require_atomic_access = false ) | |
533 : StoreNode(c,mem,adr,at,val) | |
534 , _require_atomic_access(require_atomic_access) | |
535 {} | |
536 virtual int Opcode() const; | |
537 virtual BasicType memory_type() const { return T_LONG; } | |
538 bool require_atomic_access() { return _require_atomic_access; } | |
539 static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val); | |
540 #ifndef PRODUCT | |
541 virtual void dump_spec(outputStream *st) const { | |
542 StoreNode::dump_spec(st); | |
543 if (_require_atomic_access) st->print(" Atomic!"); | |
544 } | |
545 #endif | |
546 }; | |
547 | |
548 //------------------------------StoreFNode------------------------------------- | |
549 // Store float to memory | |
550 class StoreFNode : public StoreNode { | |
551 public: | |
552 StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} | |
553 virtual int Opcode() const; | |
554 virtual BasicType memory_type() const { return T_FLOAT; } | |
555 }; | |
556 | |
557 //------------------------------StoreDNode------------------------------------- | |
558 // Store double to memory | |
559 class StoreDNode : public StoreNode { | |
560 public: | |
561 StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} | |
562 virtual int Opcode() const; | |
563 virtual BasicType memory_type() const { return T_DOUBLE; } | |
564 }; | |
565 | |
566 //------------------------------StorePNode------------------------------------- | |
567 // Store pointer to memory | |
568 class StorePNode : public StoreNode { | |
569 public: | |
570 StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} | |
571 virtual int Opcode() const; | |
572 virtual BasicType memory_type() const { return T_ADDRESS; } | |
573 }; | |
574 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
575 //------------------------------StoreNNode------------------------------------- |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
576 // Store narrow oop to memory |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
577 class StoreNNode : public StoreNode { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
578 public: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
579 StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
580 virtual int Opcode() const; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
581 virtual BasicType memory_type() const { return T_NARROWOOP; } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
582 }; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
583 |
0 | 584 //------------------------------StoreCMNode----------------------------------- |
585 // Store card-mark byte to memory for CM | |
586 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store | |
587 // Preceeding equivalent StoreCMs may be eliminated. | |
588 class StoreCMNode : public StoreNode { | |
985
685e959d09ea
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
681
diff
changeset
|
589 private: |
1198
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
590 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; } |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
591 virtual uint cmp( const Node &n ) const { |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
592 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
593 && StoreNode::cmp(n); |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
594 } |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
595 virtual uint size_of() const { return sizeof(*this); } |
985
685e959d09ea
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
681
diff
changeset
|
596 int _oop_alias_idx; // The alias_idx of OopStore |
1198
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
597 |
0 | 598 public: |
1198
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
599 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
600 StoreNode(c,mem,adr,at,val,oop_store), |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
601 _oop_alias_idx(oop_alias_idx) { |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
602 assert(_oop_alias_idx >= Compile::AliasIdxRaw || |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
603 _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
604 "bad oop alias idx"); |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
605 } |
0 | 606 virtual int Opcode() const; |
607 virtual Node *Identity( PhaseTransform *phase ); | |
985
685e959d09ea
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
681
diff
changeset
|
608 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
0 | 609 virtual const Type *Value( PhaseTransform *phase ) const; |
610 virtual BasicType memory_type() const { return T_VOID; } // unspecific | |
985
685e959d09ea
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
681
diff
changeset
|
611 int oop_alias_idx() const { return _oop_alias_idx; } |
0 | 612 }; |
613 | |
614 //------------------------------LoadPLockedNode--------------------------------- | |
615 // Load-locked a pointer from memory (either object or array). | |
616 // On Sparc & Intel this is implemented as a normal pointer load. | |
617 // On PowerPC and friends it's a real load-locked. | |
618 class LoadPLockedNode : public LoadPNode { | |
619 public: | |
620 LoadPLockedNode( Node *c, Node *mem, Node *adr ) | |
621 : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {} | |
622 virtual int Opcode() const; | |
623 virtual int store_Opcode() const { return Op_StorePConditional; } | |
624 virtual bool depends_only_on_test() const { return true; } | |
625 }; | |
626 | |
627 //------------------------------LoadLLockedNode--------------------------------- | |
628 // Load-locked a pointer from memory (either object or array). | |
629 // On Sparc & Intel this is implemented as a normal long load. | |
630 class LoadLLockedNode : public LoadLNode { | |
631 public: | |
632 LoadLLockedNode( Node *c, Node *mem, Node *adr ) | |
633 : LoadLNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeLong::LONG) {} | |
634 virtual int Opcode() const; | |
635 virtual int store_Opcode() const { return Op_StoreLConditional; } | |
636 }; | |
637 | |
638 //------------------------------SCMemProjNode--------------------------------------- | |
639 // This class defines a projection of the memory state of a store conditional node. | |
640 // These nodes return a value, but also update memory. | |
641 class SCMemProjNode : public ProjNode { | |
642 public: | |
643 enum {SCMEMPROJCON = (uint)-2}; | |
644 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } | |
645 virtual int Opcode() const; | |
646 virtual bool is_CFG() const { return false; } | |
647 virtual const Type *bottom_type() const {return Type::MEMORY;} | |
648 virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();} | |
649 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register | |
650 virtual const Type *Value( PhaseTransform *phase ) const; | |
651 #ifndef PRODUCT | |
652 virtual void dump_spec(outputStream *st) const {}; | |
653 #endif | |
654 }; | |
655 | |
656 //------------------------------LoadStoreNode--------------------------- | |
253
b0fe4deeb9fb
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
196
diff
changeset
|
657 // Note: is_Mem() method returns 'true' for this class. |
0 | 658 class LoadStoreNode : public Node { |
659 public: | |
660 enum { | |
661 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode | |
662 }; | |
663 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex); | |
664 virtual bool depends_only_on_test() const { return false; } | |
665 virtual const Type *bottom_type() const { return TypeInt::BOOL; } | |
666 virtual uint ideal_reg() const { return Op_RegI; } | |
667 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } | |
668 }; | |
669 | |
670 //------------------------------StorePConditionalNode--------------------------- | |
671 // Conditionally store pointer to memory, if no change since prior | |
672 // load-locked. Sets flags for success or failure of the store. | |
673 class StorePConditionalNode : public LoadStoreNode { | |
674 public: | |
675 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { } | |
676 virtual int Opcode() const; | |
677 // Produces flags | |
678 virtual uint ideal_reg() const { return Op_RegFlags; } | |
679 }; | |
680 | |
420
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
681 //------------------------------StoreIConditionalNode--------------------------- |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
682 // Conditionally store int to memory, if no change since prior |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
683 // load-locked. Sets flags for success or failure of the store. |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
684 class StoreIConditionalNode : public LoadStoreNode { |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
685 public: |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
686 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreNode(c, mem, adr, val, ii) { } |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
687 virtual int Opcode() const; |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
688 // Produces flags |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
689 virtual uint ideal_reg() const { return Op_RegFlags; } |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
690 }; |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
691 |
0 | 692 //------------------------------StoreLConditionalNode--------------------------- |
693 // Conditionally store long to memory, if no change since prior | |
694 // load-locked. Sets flags for success or failure of the store. | |
695 class StoreLConditionalNode : public LoadStoreNode { | |
696 public: | |
697 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { } | |
698 virtual int Opcode() const; | |
420
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
699 // Produces flags |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
700 virtual uint ideal_reg() const { return Op_RegFlags; } |
0 | 701 }; |
702 | |
703 | |
704 //------------------------------CompareAndSwapLNode--------------------------- | |
705 class CompareAndSwapLNode : public LoadStoreNode { | |
706 public: | |
707 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { } | |
708 virtual int Opcode() const; | |
709 }; | |
710 | |
711 | |
712 //------------------------------CompareAndSwapINode--------------------------- | |
713 class CompareAndSwapINode : public LoadStoreNode { | |
714 public: | |
715 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { } | |
716 virtual int Opcode() const; | |
717 }; | |
718 | |
719 | |
720 //------------------------------CompareAndSwapPNode--------------------------- | |
721 class CompareAndSwapPNode : public LoadStoreNode { | |
722 public: | |
723 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { } | |
724 virtual int Opcode() const; | |
725 }; | |
726 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
727 //------------------------------CompareAndSwapNNode--------------------------- |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
728 class CompareAndSwapNNode : public LoadStoreNode { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
729 public: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
730 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
731 virtual int Opcode() const; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
732 }; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
733 |
0 | 734 //------------------------------ClearArray------------------------------------- |
735 class ClearArrayNode: public Node { | |
736 public: | |
1100
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
986
diff
changeset
|
737 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base ) |
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
986
diff
changeset
|
738 : Node(ctrl,arymem,word_cnt,base) { |
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
986
diff
changeset
|
739 init_class_id(Class_ClearArray); |
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
986
diff
changeset
|
740 } |
0 | 741 virtual int Opcode() const; |
742 virtual const Type *bottom_type() const { return Type::MEMORY; } | |
743 // ClearArray modifies array elements, and so affects only the | |
744 // array memory addressed by the bottom_type of its base address. | |
745 virtual const class TypePtr *adr_type() const; | |
746 virtual Node *Identity( PhaseTransform *phase ); | |
747 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
748 virtual uint match_edge(uint idx) const; | |
749 | |
750 // Clear the given area of an object or array. | |
751 // The start offset must always be aligned mod BytesPerInt. | |
752 // The end offset must always be aligned mod BytesPerLong. | |
753 // Return the new memory. | |
754 static Node* clear_memory(Node* control, Node* mem, Node* dest, | |
755 intptr_t start_offset, | |
756 intptr_t end_offset, | |
757 PhaseGVN* phase); | |
758 static Node* clear_memory(Node* control, Node* mem, Node* dest, | |
759 intptr_t start_offset, | |
760 Node* end_offset, | |
761 PhaseGVN* phase); | |
762 static Node* clear_memory(Node* control, Node* mem, Node* dest, | |
763 Node* start_offset, | |
764 Node* end_offset, | |
765 PhaseGVN* phase); | |
1100
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
986
diff
changeset
|
766 // Return allocation input memory edge if it is different instance |
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
986
diff
changeset
|
767 // or itself if it is the one we are looking for. |
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
986
diff
changeset
|
768 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase); |
0 | 769 }; |
770 | |
771 //------------------------------StrComp------------------------------------- | |
772 class StrCompNode: public Node { | |
773 public: | |
986
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
774 StrCompNode(Node* control, Node* char_array_mem, |
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
775 Node* s1, Node* c1, |
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
776 Node* s2, Node* c2): Node(control, char_array_mem, |
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
777 s1, c1, |
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
778 s2, c2) {}; |
0 | 779 virtual int Opcode() const; |
780 virtual bool depends_only_on_test() const { return false; } | |
781 virtual const Type* bottom_type() const { return TypeInt::INT; } | |
986
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
782 virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; } |
0 | 783 virtual uint match_edge(uint idx) const; |
784 virtual uint ideal_reg() const { return Op_RegI; } | |
785 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
786 }; | |
787 | |
681 | 788 //------------------------------StrEquals------------------------------------- |
789 class StrEqualsNode: public Node { | |
790 public: | |
986
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
791 StrEqualsNode(Node* control, Node* char_array_mem, |
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
792 Node* s1, Node* s2, Node* c): Node(control, char_array_mem, |
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
793 s1, s2, c) {}; |
681 | 794 virtual int Opcode() const; |
795 virtual bool depends_only_on_test() const { return false; } | |
796 virtual const Type* bottom_type() const { return TypeInt::BOOL; } | |
986
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
797 virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; } |
681 | 798 virtual uint match_edge(uint idx) const; |
799 virtual uint ideal_reg() const { return Op_RegI; } | |
800 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
801 }; | |
802 | |
803 //------------------------------StrIndexOf------------------------------------- | |
804 class StrIndexOfNode: public Node { | |
805 public: | |
986
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
806 StrIndexOfNode(Node* control, Node* char_array_mem, |
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
807 Node* s1, Node* c1, |
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
808 Node* s2, Node* c2): Node(control, char_array_mem, |
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
809 s1, c1, |
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
810 s2, c2) {}; |
681 | 811 virtual int Opcode() const; |
812 virtual bool depends_only_on_test() const { return false; } | |
813 virtual const Type* bottom_type() const { return TypeInt::INT; } | |
986
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
814 virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; } |
681 | 815 virtual uint match_edge(uint idx) const; |
816 virtual uint ideal_reg() const { return Op_RegI; } | |
817 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
818 }; | |
819 | |
169
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
820 //------------------------------AryEq--------------------------------------- |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
821 class AryEqNode: public Node { |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
822 public: |
986
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
823 AryEqNode(Node* control, Node* char_array_mem, |
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
824 Node* s1, Node* s2): Node(control, char_array_mem, s1, s2) {}; |
169
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
825 virtual int Opcode() const; |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
826 virtual bool depends_only_on_test() const { return false; } |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
827 virtual const Type* bottom_type() const { return TypeInt::BOOL; } |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
828 virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; } |
986
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
829 virtual uint match_edge(uint idx) const; |
169
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
830 virtual uint ideal_reg() const { return Op_RegI; } |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
831 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
832 }; |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
833 |
0 | 834 //------------------------------MemBar----------------------------------------- |
835 // There are different flavors of Memory Barriers to match the Java Memory | |
836 // Model. Monitor-enter and volatile-load act as Aquires: no following ref | |
837 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or | |
838 // volatile-load. Monitor-exit and volatile-store act as Release: no | |
605 | 839 // preceding ref can be moved to after them. We insert a MemBar-Release |
0 | 840 // before a FastUnlock or volatile-store. All volatiles need to be |
841 // serialized, so we follow all volatile-stores with a MemBar-Volatile to | |
605 | 842 // separate it from any following volatile-load. |
0 | 843 class MemBarNode: public MultiNode { |
844 virtual uint hash() const ; // { return NO_HASH; } | |
845 virtual uint cmp( const Node &n ) const ; // Always fail, except on self | |
846 | |
847 virtual uint size_of() const { return sizeof(*this); } | |
848 // Memory type this node is serializing. Usually either rawptr or bottom. | |
849 const TypePtr* _adr_type; | |
850 | |
851 public: | |
852 enum { | |
853 Precedent = TypeFunc::Parms // optional edge to force precedence | |
854 }; | |
855 MemBarNode(Compile* C, int alias_idx, Node* precedent); | |
856 virtual int Opcode() const = 0; | |
857 virtual const class TypePtr *adr_type() const { return _adr_type; } | |
858 virtual const Type *Value( PhaseTransform *phase ) const; | |
859 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
860 virtual uint match_edge(uint idx) const { return 0; } | |
861 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } | |
862 virtual Node *match( const ProjNode *proj, const Matcher *m ); | |
863 // Factory method. Builds a wide or narrow membar. | |
864 // Optional 'precedent' becomes an extra edge if not null. | |
865 static MemBarNode* make(Compile* C, int opcode, | |
866 int alias_idx = Compile::AliasIdxBot, | |
867 Node* precedent = NULL); | |
868 }; | |
869 | |
870 // "Acquire" - no following ref can move before (but earlier refs can | |
871 // follow, like an early Load stalled in cache). Requires multi-cpu | |
872 // visibility. Inserted after a volatile load or FastLock. | |
873 class MemBarAcquireNode: public MemBarNode { | |
874 public: | |
875 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) | |
876 : MemBarNode(C, alias_idx, precedent) {} | |
877 virtual int Opcode() const; | |
878 }; | |
879 | |
880 // "Release" - no earlier ref can move after (but later refs can move | |
881 // up, like a speculative pipelined cache-hitting Load). Requires | |
882 // multi-cpu visibility. Inserted before a volatile store or FastUnLock. | |
883 class MemBarReleaseNode: public MemBarNode { | |
884 public: | |
885 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) | |
886 : MemBarNode(C, alias_idx, precedent) {} | |
887 virtual int Opcode() const; | |
888 }; | |
889 | |
890 // Ordering between a volatile store and a following volatile load. | |
891 // Requires multi-CPU visibility? | |
892 class MemBarVolatileNode: public MemBarNode { | |
893 public: | |
894 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) | |
895 : MemBarNode(C, alias_idx, precedent) {} | |
896 virtual int Opcode() const; | |
897 }; | |
898 | |
899 // Ordering within the same CPU. Used to order unsafe memory references | |
900 // inside the compiler when we lack alias info. Not needed "outside" the | |
901 // compiler because the CPU does all the ordering for us. | |
902 class MemBarCPUOrderNode: public MemBarNode { | |
903 public: | |
904 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) | |
905 : MemBarNode(C, alias_idx, precedent) {} | |
906 virtual int Opcode() const; | |
907 virtual uint ideal_reg() const { return 0; } // not matched in the AD file | |
908 }; | |
909 | |
910 // Isolation of object setup after an AllocateNode and before next safepoint. | |
911 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) | |
912 class InitializeNode: public MemBarNode { | |
913 friend class AllocateNode; | |
914 | |
915 bool _is_complete; | |
916 | |
917 public: | |
918 enum { | |
919 Control = TypeFunc::Control, | |
920 Memory = TypeFunc::Memory, // MergeMem for states affected by this op | |
921 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address | |
922 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) | |
923 }; | |
924 | |
925 InitializeNode(Compile* C, int adr_type, Node* rawoop); | |
926 virtual int Opcode() const; | |
927 virtual uint size_of() const { return sizeof(*this); } | |
928 virtual uint ideal_reg() const { return 0; } // not matched in the AD file | |
929 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress | |
930 | |
931 // Manage incoming memory edges via a MergeMem on in(Memory): | |
932 Node* memory(uint alias_idx); | |
933 | |
934 // The raw memory edge coming directly from the Allocation. | |
935 // The contents of this memory are *always* all-zero-bits. | |
936 Node* zero_memory() { return memory(Compile::AliasIdxRaw); } | |
937 | |
938 // Return the corresponding allocation for this initialization (or null if none). | |
939 // (Note: Both InitializeNode::allocation and AllocateNode::initialization | |
940 // are defined in graphKit.cpp, which sets up the bidirectional relation.) | |
941 AllocateNode* allocation(); | |
942 | |
943 // Anything other than zeroing in this init? | |
944 bool is_non_zero(); | |
945 | |
946 // An InitializeNode must completed before macro expansion is done. | |
947 // Completion requires that the AllocateNode must be followed by | |
948 // initialization of the new memory to zero, then to any initializers. | |
949 bool is_complete() { return _is_complete; } | |
950 | |
951 // Mark complete. (Must not yet be complete.) | |
952 void set_complete(PhaseGVN* phase); | |
953 | |
954 #ifdef ASSERT | |
955 // ensure all non-degenerate stores are ordered and non-overlapping | |
956 bool stores_are_sane(PhaseTransform* phase); | |
957 #endif //ASSERT | |
958 | |
959 // See if this store can be captured; return offset where it initializes. | |
960 // Return 0 if the store cannot be moved (any sort of problem). | |
961 intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase); | |
962 | |
963 // Capture another store; reformat it to write my internal raw memory. | |
964 // Return the captured copy, else NULL if there is some sort of problem. | |
965 Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase); | |
966 | |
967 // Find captured store which corresponds to the range [start..start+size). | |
968 // Return my own memory projection (meaning the initial zero bits) | |
969 // if there is no such store. Return NULL if there is a problem. | |
970 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase); | |
971 | |
972 // Called when the associated AllocateNode is expanded into CFG. | |
973 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, | |
974 intptr_t header_size, Node* size_in_bytes, | |
975 PhaseGVN* phase); | |
976 | |
977 private: | |
978 void remove_extra_zeroes(); | |
979 | |
980 // Find out where a captured store should be placed (or already is placed). | |
981 int captured_store_insertion_point(intptr_t start, int size_in_bytes, | |
982 PhaseTransform* phase); | |
983 | |
984 static intptr_t get_store_offset(Node* st, PhaseTransform* phase); | |
985 | |
986 Node* make_raw_address(intptr_t offset, PhaseTransform* phase); | |
987 | |
988 bool detect_init_independence(Node* n, bool st_is_pinned, int& count); | |
989 | |
990 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, | |
991 PhaseGVN* phase); | |
992 | |
993 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); | |
994 }; | |
995 | |
996 //------------------------------MergeMem--------------------------------------- | |
997 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) | |
998 class MergeMemNode: public Node { | |
999 virtual uint hash() const ; // { return NO_HASH; } | |
1000 virtual uint cmp( const Node &n ) const ; // Always fail, except on self | |
1001 friend class MergeMemStream; | |
1002 MergeMemNode(Node* def); // clients use MergeMemNode::make | |
1003 | |
1004 public: | |
1005 // If the input is a whole memory state, clone it with all its slices intact. | |
1006 // Otherwise, make a new memory state with just that base memory input. | |
1007 // In either case, the result is a newly created MergeMem. | |
1008 static MergeMemNode* make(Compile* C, Node* base_memory); | |
1009 | |
1010 virtual int Opcode() const; | |
1011 virtual Node *Identity( PhaseTransform *phase ); | |
1012 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
1013 virtual uint ideal_reg() const { return NotAMachineReg; } | |
1014 virtual uint match_edge(uint idx) const { return 0; } | |
1015 virtual const RegMask &out_RegMask() const; | |
1016 virtual const Type *bottom_type() const { return Type::MEMORY; } | |
1017 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } | |
1018 // sparse accessors | |
1019 // Fetch the previously stored "set_memory_at", or else the base memory. | |
1020 // (Caller should clone it if it is a phi-nest.) | |
1021 Node* memory_at(uint alias_idx) const; | |
1022 // set the memory, regardless of its previous value | |
1023 void set_memory_at(uint alias_idx, Node* n); | |
1024 // the "base" is the memory that provides the non-finite support | |
1025 Node* base_memory() const { return in(Compile::AliasIdxBot); } | |
1026 // warning: setting the base can implicitly set any of the other slices too | |
1027 void set_base_memory(Node* def); | |
1028 // sentinel value which denotes a copy of the base memory: | |
1029 Node* empty_memory() const { return in(Compile::AliasIdxTop); } | |
1030 static Node* make_empty_memory(); // where the sentinel comes from | |
1031 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } | |
1032 // hook for the iterator, to perform any necessary setup | |
1033 void iteration_setup(const MergeMemNode* other = NULL); | |
1034 // push sentinels until I am at least as long as the other (semantic no-op) | |
1035 void grow_to_match(const MergeMemNode* other); | |
1036 bool verify_sparse() const PRODUCT_RETURN0; | |
1037 #ifndef PRODUCT | |
1038 virtual void dump_spec(outputStream *st) const; | |
1039 #endif | |
1040 }; | |
1041 | |
1042 class MergeMemStream : public StackObj { | |
1043 private: | |
1044 MergeMemNode* _mm; | |
1045 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations | |
1046 Node* _mm_base; // loop-invariant base memory of _mm | |
1047 int _idx; | |
1048 int _cnt; | |
1049 Node* _mem; | |
1050 Node* _mem2; | |
1051 int _cnt2; | |
1052 | |
1053 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) { | |
1054 // subsume_node will break sparseness at times, whenever a memory slice | |
1055 // folds down to a copy of the base ("fat") memory. In such a case, | |
1056 // the raw edge will update to base, although it should be top. | |
1057 // This iterator will recognize either top or base_memory as an | |
1058 // "empty" slice. See is_empty, is_empty2, and next below. | |
1059 // | |
1060 // The sparseness property is repaired in MergeMemNode::Ideal. | |
1061 // As long as access to a MergeMem goes through this iterator | |
1062 // or the memory_at accessor, flaws in the sparseness will | |
1063 // never be observed. | |
1064 // | |
1065 // Also, iteration_setup repairs sparseness. | |
1066 assert(mm->verify_sparse(), "please, no dups of base"); | |
1067 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base"); | |
1068 | |
1069 _mm = mm; | |
1070 _mm_base = mm->base_memory(); | |
1071 _mm2 = mm2; | |
1072 _cnt = mm->req(); | |
1073 _idx = Compile::AliasIdxBot-1; // start at the base memory | |
1074 _mem = NULL; | |
1075 _mem2 = NULL; | |
1076 } | |
1077 | |
1078 #ifdef ASSERT | |
1079 Node* check_memory() const { | |
1080 if (at_base_memory()) | |
1081 return _mm->base_memory(); | |
1082 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) | |
1083 return _mm->memory_at(_idx); | |
1084 else | |
1085 return _mm_base; | |
1086 } | |
1087 Node* check_memory2() const { | |
1088 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); | |
1089 } | |
1090 #endif | |
1091 | |
1092 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; | |
1093 void assert_synch() const { | |
1094 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), | |
1095 "no side-effects except through the stream"); | |
1096 } | |
1097 | |
1098 public: | |
1099 | |
1100 // expected usages: | |
1101 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } | |
1102 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } | |
1103 | |
1104 // iterate over one merge | |
1105 MergeMemStream(MergeMemNode* mm) { | |
1106 mm->iteration_setup(); | |
1107 init(mm); | |
1108 debug_only(_cnt2 = 999); | |
1109 } | |
1110 // iterate in parallel over two merges | |
1111 // only iterates through non-empty elements of mm2 | |
1112 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { | |
1113 assert(mm2, "second argument must be a MergeMem also"); | |
1114 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state | |
1115 mm->iteration_setup(mm2); | |
1116 init(mm, mm2); | |
1117 _cnt2 = mm2->req(); | |
1118 } | |
1119 #ifdef ASSERT | |
1120 ~MergeMemStream() { | |
1121 assert_synch(); | |
1122 } | |
1123 #endif | |
1124 | |
1125 MergeMemNode* all_memory() const { | |
1126 return _mm; | |
1127 } | |
1128 Node* base_memory() const { | |
1129 assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); | |
1130 return _mm_base; | |
1131 } | |
1132 const MergeMemNode* all_memory2() const { | |
1133 assert(_mm2 != NULL, ""); | |
1134 return _mm2; | |
1135 } | |
1136 bool at_base_memory() const { | |
1137 return _idx == Compile::AliasIdxBot; | |
1138 } | |
1139 int alias_idx() const { | |
1140 assert(_mem, "must call next 1st"); | |
1141 return _idx; | |
1142 } | |
1143 | |
1144 const TypePtr* adr_type() const { | |
1145 return Compile::current()->get_adr_type(alias_idx()); | |
1146 } | |
1147 | |
1148 const TypePtr* adr_type(Compile* C) const { | |
1149 return C->get_adr_type(alias_idx()); | |
1150 } | |
1151 bool is_empty() const { | |
1152 assert(_mem, "must call next 1st"); | |
1153 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); | |
1154 return _mem->is_top(); | |
1155 } | |
1156 bool is_empty2() const { | |
1157 assert(_mem2, "must call next 1st"); | |
1158 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); | |
1159 return _mem2->is_top(); | |
1160 } | |
1161 Node* memory() const { | |
1162 assert(!is_empty(), "must not be empty"); | |
1163 assert_synch(); | |
1164 return _mem; | |
1165 } | |
1166 // get the current memory, regardless of empty or non-empty status | |
1167 Node* force_memory() const { | |
1168 assert(!is_empty() || !at_base_memory(), ""); | |
1169 // Use _mm_base to defend against updates to _mem->base_memory(). | |
1170 Node *mem = _mem->is_top() ? _mm_base : _mem; | |
1171 assert(mem == check_memory(), ""); | |
1172 return mem; | |
1173 } | |
1174 Node* memory2() const { | |
1175 assert(_mem2 == check_memory2(), ""); | |
1176 return _mem2; | |
1177 } | |
1178 void set_memory(Node* mem) { | |
1179 if (at_base_memory()) { | |
1180 // Note that this does not change the invariant _mm_base. | |
1181 _mm->set_base_memory(mem); | |
1182 } else { | |
1183 _mm->set_memory_at(_idx, mem); | |
1184 } | |
1185 _mem = mem; | |
1186 assert_synch(); | |
1187 } | |
1188 | |
1189 // Recover from a side effect to the MergeMemNode. | |
1190 void set_memory() { | |
1191 _mem = _mm->in(_idx); | |
1192 } | |
1193 | |
1194 bool next() { return next(false); } | |
1195 bool next2() { return next(true); } | |
1196 | |
1197 bool next_non_empty() { return next_non_empty(false); } | |
1198 bool next_non_empty2() { return next_non_empty(true); } | |
1199 // next_non_empty2 can yield states where is_empty() is true | |
1200 | |
1201 private: | |
1202 // find the next item, which might be empty | |
1203 bool next(bool have_mm2) { | |
1204 assert((_mm2 != NULL) == have_mm2, "use other next"); | |
1205 assert_synch(); | |
1206 if (++_idx < _cnt) { | |
1207 // Note: This iterator allows _mm to be non-sparse. | |
1208 // It behaves the same whether _mem is top or base_memory. | |
1209 _mem = _mm->in(_idx); | |
1210 if (have_mm2) | |
1211 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); | |
1212 return true; | |
1213 } | |
1214 return false; | |
1215 } | |
1216 | |
1217 // find the next non-empty item | |
1218 bool next_non_empty(bool have_mm2) { | |
1219 while (next(have_mm2)) { | |
1220 if (!is_empty()) { | |
1221 // make sure _mem2 is filled in sensibly | |
1222 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); | |
1223 return true; | |
1224 } else if (have_mm2 && !is_empty2()) { | |
1225 return true; // is_empty() == true | |
1226 } | |
1227 } | |
1228 return false; | |
1229 } | |
1230 }; | |
1231 | |
1232 //------------------------------Prefetch--------------------------------------- | |
1233 | |
1234 // Non-faulting prefetch load. Prefetch for many reads. | |
1235 class PrefetchReadNode : public Node { | |
1236 public: | |
1237 PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {} | |
1238 virtual int Opcode() const; | |
1239 virtual uint ideal_reg() const { return NotAMachineReg; } | |
1240 virtual uint match_edge(uint idx) const { return idx==2; } | |
1241 virtual const Type *bottom_type() const { return Type::ABIO; } | |
1242 }; | |
1243 | |
1244 // Non-faulting prefetch load. Prefetch for many reads & many writes. | |
1245 class PrefetchWriteNode : public Node { | |
1246 public: | |
1247 PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {} | |
1248 virtual int Opcode() const; | |
1249 virtual uint ideal_reg() const { return NotAMachineReg; } | |
1250 virtual uint match_edge(uint idx) const { return idx==2; } | |
1367
9e321dcfa5b7
6940726: Use BIS instruction for allocation prefetch on Sparc
kvn
parents:
1198
diff
changeset
|
1251 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } |
0 | 1252 }; |