Mercurial > hg > truffle
annotate src/share/vm/opto/memnode.hpp @ 17716:cdb71841f4bc
6498581: ThreadInterruptTest3 produces wrong output on Windows
Summary: There is race condition between os::interrupt and os::is_interrupted on Windows. In JVM_Sleep(Thread.sleep), check if thread gets interrupted, it may see interrupted but not really interrupted so cause spurious waking up (early return from sleep). Fix by checking if interrupt event really gets set thus prevent false return. For intrinsic of _isInterrupted, on Windows, go fastpath only on bit not set.
Reviewed-by: acorn, kvn
Contributed-by: david.holmes@oracle.com, yumin.qi@oracle.com
author | minqi |
---|---|
date | Wed, 26 Feb 2014 15:20:41 -0800 |
parents | 55fb97c4c58d |
children | abec000618bf |
rev | line source |
---|---|
0 | 1 /* |
17467
55fb97c4c58d
8029233: Update copyright year to match last edit in jdk8 hotspot repository for 2013
mikael
parents:
13045
diff
changeset
|
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1367
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1367
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1367
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef SHARE_VM_OPTO_MEMNODE_HPP |
26 #define SHARE_VM_OPTO_MEMNODE_HPP | |
27 | |
28 #include "opto/multnode.hpp" | |
29 #include "opto/node.hpp" | |
30 #include "opto/opcodes.hpp" | |
31 #include "opto/type.hpp" | |
32 | |
0 | 33 // Portions of code courtesy of Clifford Click |
34 | |
35 class MultiNode; | |
36 class PhaseCCP; | |
37 class PhaseTransform; | |
38 | |
39 //------------------------------MemNode---------------------------------------- | |
40 // Load or Store, possibly throwing a NULL pointer exception | |
41 class MemNode : public Node { | |
42 protected: | |
43 #ifdef ASSERT | |
44 const TypePtr* _adr_type; // What kind of memory is being addressed? | |
45 #endif | |
46 virtual uint size_of() const; // Size is bigger (ASSERT only) | |
47 public: | |
48 enum { Control, // When is it safe to do this load? | |
49 Memory, // Chunk of memory is being loaded from | |
50 Address, // Actually address, derived from base | |
51 ValueIn, // Value to store | |
52 OopStore // Preceeding oop store, only in StoreCM | |
53 }; | |
54 protected: | |
55 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) | |
56 : Node(c0,c1,c2 ) { | |
57 init_class_id(Class_Mem); | |
58 debug_only(_adr_type=at; adr_type();) | |
59 } | |
60 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) | |
61 : Node(c0,c1,c2,c3) { | |
62 init_class_id(Class_Mem); | |
63 debug_only(_adr_type=at; adr_type();) | |
64 } | |
65 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) | |
66 : Node(c0,c1,c2,c3,c4) { | |
67 init_class_id(Class_Mem); | |
68 debug_only(_adr_type=at; adr_type();) | |
69 } | |
70 | |
33 | 71 public: |
0 | 72 // Helpers for the optimizer. Documented in memnode.cpp. |
73 static bool detect_ptr_independence(Node* p1, AllocateNode* a1, | |
74 Node* p2, AllocateNode* a2, | |
75 PhaseTransform* phase); | |
76 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); | |
77 | |
10278 | 78 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase); |
79 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase); | |
0 | 80 // This one should probably be a phase-specific function: |
85
f3b3fe64f59f
6692301: Side effect in NumberFormat tests with -server -Xcomp
kvn
parents:
74
diff
changeset
|
81 static bool all_controls_dominate(Node* dom, Node* sub); |
0 | 82 |
163 | 83 // Find any cast-away of null-ness and keep its control. |
84 static Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ); | |
0 | 85 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); |
86 | |
87 virtual const class TypePtr *adr_type() const; // returns bottom_type of address | |
88 | |
89 // Shared code for Ideal methods: | |
90 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL. | |
91 | |
92 // Helper function for adr_type() implementations. | |
93 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL); | |
94 | |
95 // Raw access function, to allow copying of adr_type efficiently in | |
96 // product builds and retain the debug info for debug builds. | |
97 const TypePtr *raw_adr_type() const { | |
98 #ifdef ASSERT | |
99 return _adr_type; | |
100 #else | |
101 return 0; | |
102 #endif | |
103 } | |
104 | |
105 // Map a load or store opcode to its corresponding store opcode. | |
106 // (Return -1 if unknown.) | |
107 virtual int store_Opcode() const { return -1; } | |
108 | |
109 // What is the type of the value in memory? (T_VOID mean "unspecified".) | |
110 virtual BasicType memory_type() const = 0; | |
29
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
17
diff
changeset
|
111 virtual int memory_size() const { |
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
17
diff
changeset
|
112 #ifdef ASSERT |
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
17
diff
changeset
|
113 return type2aelembytes(memory_type(), true); |
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
17
diff
changeset
|
114 #else |
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
17
diff
changeset
|
115 return type2aelembytes(memory_type()); |
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
17
diff
changeset
|
116 #endif |
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
17
diff
changeset
|
117 } |
0 | 118 |
119 // Search through memory states which precede this node (load or store). | |
120 // Look for an exact match for the address, with no intervening | |
121 // aliased stores. | |
122 Node* find_previous_store(PhaseTransform* phase); | |
123 | |
124 // Can this node (load or store) accurately see a stored value in | |
125 // the given memory state? (The state may or may not be in(Memory).) | |
126 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; | |
127 | |
128 #ifndef PRODUCT | |
129 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); | |
130 virtual void dump_spec(outputStream *st) const; | |
131 #endif | |
132 }; | |
133 | |
134 //------------------------------LoadNode--------------------------------------- | |
135 // Load value; requires Memory and Address | |
136 class LoadNode : public MemNode { | |
137 protected: | |
138 virtual uint cmp( const Node &n ) const; | |
139 virtual uint size_of() const; // Size is bigger | |
140 const Type* const _type; // What kind of value is loaded? | |
141 public: | |
142 | |
143 LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt ) | |
144 : MemNode(c,mem,adr,at), _type(rt) { | |
145 init_class_id(Class_Load); | |
146 } | |
147 | |
148 // Polymorphic factory method: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
149 static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
150 const TypePtr* at, const Type *rt, BasicType bt ); |
0 | 151 |
152 virtual uint hash() const; // Check the type | |
153 | |
154 // Handle algebraic identities here. If we have an identity, return the Node | |
155 // we are equivalent to. We look for Load of a Store. | |
156 virtual Node *Identity( PhaseTransform *phase ); | |
157 | |
158 // If the load is from Field memory and the pointer is non-null, we can | |
159 // zero out the control input. | |
160 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
161 | |
163 | 162 // Split instance field load through Phi. |
163 Node* split_through_phi(PhaseGVN *phase); | |
164 | |
17
ff5961f4c095
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
0
diff
changeset
|
165 // Recover original value from boxed values |
ff5961f4c095
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
0
diff
changeset
|
166 Node *eliminate_autobox(PhaseGVN *phase); |
ff5961f4c095
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
never
parents:
0
diff
changeset
|
167 |
0 | 168 // Compute a new Type for this node. Basically we just do the pre-check, |
169 // then call the virtual add() to set the type. | |
170 virtual const Type *Value( PhaseTransform *phase ) const; | |
171 | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
172 // Common methods for LoadKlass and LoadNKlass nodes. |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
173 const Type *klass_value_common( PhaseTransform *phase ) const; |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
174 Node *klass_identity_common( PhaseTransform *phase ); |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
175 |
0 | 176 virtual uint ideal_reg() const; |
177 virtual const Type *bottom_type() const; | |
178 // Following method is copied from TypeNode: | |
179 void set_type(const Type* t) { | |
180 assert(t != NULL, "sanity"); | |
181 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); | |
182 *(const Type**)&_type = t; // cast away const-ness | |
183 // If this node is in the hash table, make sure it doesn't need a rehash. | |
184 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); | |
185 } | |
186 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; | |
187 | |
188 // Do not match memory edge | |
189 virtual uint match_edge(uint idx) const; | |
190 | |
191 // Map a load opcode to its corresponding store opcode. | |
192 virtual int store_Opcode() const = 0; | |
193 | |
64
b8f5ba577b02
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
33
diff
changeset
|
194 // Check if the load's memory input is a Phi node with the same control. |
b8f5ba577b02
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
33
diff
changeset
|
195 bool is_instance_field_load_with_local_phi(Node* ctrl); |
b8f5ba577b02
6673473: (Escape Analysis) Add the instance's field information to PhiNode
kvn
parents:
33
diff
changeset
|
196 |
0 | 197 #ifndef PRODUCT |
198 virtual void dump_spec(outputStream *st) const; | |
199 #endif | |
1609 | 200 #ifdef ASSERT |
201 // Helper function to allow a raw load without control edge for some cases | |
202 static bool is_immutable_value(Node* adr); | |
203 #endif | |
0 | 204 protected: |
205 const Type* load_array_final_field(const TypeKlassPtr *tkls, | |
206 ciKlass* klass) const; | |
13045
94a83e0f9ce1
8017065: C2 allows safepoint checks to leak into G1 pre-barriers
iveresov
parents:
10278
diff
changeset
|
207 // depends_only_on_test is almost always true, and needs to be almost always |
94a83e0f9ce1
8017065: C2 allows safepoint checks to leak into G1 pre-barriers
iveresov
parents:
10278
diff
changeset
|
208 // true to enable key hoisting & commoning optimizations. However, for the |
94a83e0f9ce1
8017065: C2 allows safepoint checks to leak into G1 pre-barriers
iveresov
parents:
10278
diff
changeset
|
209 // special case of RawPtr loads from TLS top & end, and other loads performed by |
94a83e0f9ce1
8017065: C2 allows safepoint checks to leak into G1 pre-barriers
iveresov
parents:
10278
diff
changeset
|
210 // GC barriers, the control edge carries the dependence preventing hoisting past |
94a83e0f9ce1
8017065: C2 allows safepoint checks to leak into G1 pre-barriers
iveresov
parents:
10278
diff
changeset
|
211 // a Safepoint instead of the memory edge. (An unfortunate consequence of having |
94a83e0f9ce1
8017065: C2 allows safepoint checks to leak into G1 pre-barriers
iveresov
parents:
10278
diff
changeset
|
212 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes |
94a83e0f9ce1
8017065: C2 allows safepoint checks to leak into G1 pre-barriers
iveresov
parents:
10278
diff
changeset
|
213 // which produce results (new raw memory state) inside of loops preventing all |
94a83e0f9ce1
8017065: C2 allows safepoint checks to leak into G1 pre-barriers
iveresov
parents:
10278
diff
changeset
|
214 // manner of other optimizations). Basically, it's ugly but so is the alternative. |
94a83e0f9ce1
8017065: C2 allows safepoint checks to leak into G1 pre-barriers
iveresov
parents:
10278
diff
changeset
|
215 // See comment in macro.cpp, around line 125 expand_allocate_common(). |
94a83e0f9ce1
8017065: C2 allows safepoint checks to leak into G1 pre-barriers
iveresov
parents:
10278
diff
changeset
|
216 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } |
94a83e0f9ce1
8017065: C2 allows safepoint checks to leak into G1 pre-barriers
iveresov
parents:
10278
diff
changeset
|
217 |
0 | 218 }; |
219 | |
220 //------------------------------LoadBNode-------------------------------------- | |
221 // Load a byte (8bits signed) from memory | |
222 class LoadBNode : public LoadNode { | |
223 public: | |
224 LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE ) | |
225 : LoadNode(c,mem,adr,at,ti) {} | |
226 virtual int Opcode() const; | |
227 virtual uint ideal_reg() const { return Op_RegI; } | |
228 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
4815 | 229 virtual const Type *Value(PhaseTransform *phase) const; |
0 | 230 virtual int store_Opcode() const { return Op_StoreB; } |
231 virtual BasicType memory_type() const { return T_BYTE; } | |
232 }; | |
233 | |
624 | 234 //------------------------------LoadUBNode------------------------------------- |
235 // Load a unsigned byte (8bits unsigned) from memory | |
236 class LoadUBNode : public LoadNode { | |
237 public: | |
238 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti = TypeInt::UBYTE ) | |
239 : LoadNode(c, mem, adr, at, ti) {} | |
240 virtual int Opcode() const; | |
241 virtual uint ideal_reg() const { return Op_RegI; } | |
242 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); | |
4815 | 243 virtual const Type *Value(PhaseTransform *phase) const; |
624 | 244 virtual int store_Opcode() const { return Op_StoreB; } |
245 virtual BasicType memory_type() const { return T_BYTE; } | |
246 }; | |
247 | |
558
3b5ac9e7e6ea
6796746: rename LoadC (char) opcode class to LoadUS (unsigned short)
twisti
parents:
420
diff
changeset
|
248 //------------------------------LoadUSNode------------------------------------- |
3b5ac9e7e6ea
6796746: rename LoadC (char) opcode class to LoadUS (unsigned short)
twisti
parents:
420
diff
changeset
|
249 // Load an unsigned short/char (16bits unsigned) from memory |
3b5ac9e7e6ea
6796746: rename LoadC (char) opcode class to LoadUS (unsigned short)
twisti
parents:
420
diff
changeset
|
250 class LoadUSNode : public LoadNode { |
0 | 251 public: |
558
3b5ac9e7e6ea
6796746: rename LoadC (char) opcode class to LoadUS (unsigned short)
twisti
parents:
420
diff
changeset
|
252 LoadUSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR ) |
0 | 253 : LoadNode(c,mem,adr,at,ti) {} |
254 virtual int Opcode() const; | |
255 virtual uint ideal_reg() const { return Op_RegI; } | |
256 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
4815 | 257 virtual const Type *Value(PhaseTransform *phase) const; |
0 | 258 virtual int store_Opcode() const { return Op_StoreC; } |
259 virtual BasicType memory_type() const { return T_CHAR; } | |
260 }; | |
261 | |
4815 | 262 //------------------------------LoadSNode-------------------------------------- |
263 // Load a short (16bits signed) from memory | |
264 class LoadSNode : public LoadNode { | |
265 public: | |
266 LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT ) | |
267 : LoadNode(c,mem,adr,at,ti) {} | |
268 virtual int Opcode() const; | |
269 virtual uint ideal_reg() const { return Op_RegI; } | |
270 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
271 virtual const Type *Value(PhaseTransform *phase) const; | |
272 virtual int store_Opcode() const { return Op_StoreC; } | |
273 virtual BasicType memory_type() const { return T_SHORT; } | |
274 }; | |
275 | |
0 | 276 //------------------------------LoadINode-------------------------------------- |
277 // Load an integer from memory | |
278 class LoadINode : public LoadNode { | |
279 public: | |
280 LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT ) | |
281 : LoadNode(c,mem,adr,at,ti) {} | |
282 virtual int Opcode() const; | |
283 virtual uint ideal_reg() const { return Op_RegI; } | |
284 virtual int store_Opcode() const { return Op_StoreI; } | |
285 virtual BasicType memory_type() const { return T_INT; } | |
286 }; | |
287 | |
288 //------------------------------LoadRangeNode---------------------------------- | |
289 // Load an array length from the array | |
290 class LoadRangeNode : public LoadINode { | |
291 public: | |
292 LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS ) | |
293 : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {} | |
294 virtual int Opcode() const; | |
295 virtual const Type *Value( PhaseTransform *phase ) const; | |
296 virtual Node *Identity( PhaseTransform *phase ); | |
366
8261ee795323
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
253
diff
changeset
|
297 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
0 | 298 }; |
299 | |
300 //------------------------------LoadLNode-------------------------------------- | |
301 // Load a long from memory | |
302 class LoadLNode : public LoadNode { | |
303 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } | |
304 virtual uint cmp( const Node &n ) const { | |
305 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access | |
306 && LoadNode::cmp(n); | |
307 } | |
308 virtual uint size_of() const { return sizeof(*this); } | |
309 const bool _require_atomic_access; // is piecewise load forbidden? | |
310 | |
311 public: | |
312 LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, | |
313 const TypeLong *tl = TypeLong::LONG, | |
314 bool require_atomic_access = false ) | |
315 : LoadNode(c,mem,adr,at,tl) | |
316 , _require_atomic_access(require_atomic_access) | |
317 {} | |
318 virtual int Opcode() const; | |
319 virtual uint ideal_reg() const { return Op_RegL; } | |
320 virtual int store_Opcode() const { return Op_StoreL; } | |
321 virtual BasicType memory_type() const { return T_LONG; } | |
322 bool require_atomic_access() { return _require_atomic_access; } | |
323 static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt); | |
324 #ifndef PRODUCT | |
325 virtual void dump_spec(outputStream *st) const { | |
326 LoadNode::dump_spec(st); | |
327 if (_require_atomic_access) st->print(" Atomic!"); | |
328 } | |
329 #endif | |
330 }; | |
331 | |
332 //------------------------------LoadL_unalignedNode---------------------------- | |
333 // Load a long from unaligned memory | |
334 class LoadL_unalignedNode : public LoadLNode { | |
335 public: | |
336 LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at ) | |
337 : LoadLNode(c,mem,adr,at) {} | |
338 virtual int Opcode() const; | |
339 }; | |
340 | |
341 //------------------------------LoadFNode-------------------------------------- | |
342 // Load a float (64 bits) from memory | |
343 class LoadFNode : public LoadNode { | |
344 public: | |
345 LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT ) | |
346 : LoadNode(c,mem,adr,at,t) {} | |
347 virtual int Opcode() const; | |
348 virtual uint ideal_reg() const { return Op_RegF; } | |
349 virtual int store_Opcode() const { return Op_StoreF; } | |
350 virtual BasicType memory_type() const { return T_FLOAT; } | |
351 }; | |
352 | |
353 //------------------------------LoadDNode-------------------------------------- | |
354 // Load a double (64 bits) from memory | |
355 class LoadDNode : public LoadNode { | |
356 public: | |
357 LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE ) | |
358 : LoadNode(c,mem,adr,at,t) {} | |
359 virtual int Opcode() const; | |
360 virtual uint ideal_reg() const { return Op_RegD; } | |
361 virtual int store_Opcode() const { return Op_StoreD; } | |
362 virtual BasicType memory_type() const { return T_DOUBLE; } | |
363 }; | |
364 | |
365 //------------------------------LoadD_unalignedNode---------------------------- | |
366 // Load a double from unaligned memory | |
367 class LoadD_unalignedNode : public LoadDNode { | |
368 public: | |
369 LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at ) | |
370 : LoadDNode(c,mem,adr,at) {} | |
371 virtual int Opcode() const; | |
372 }; | |
373 | |
374 //------------------------------LoadPNode-------------------------------------- | |
375 // Load a pointer from memory (either object or array) | |
376 class LoadPNode : public LoadNode { | |
377 public: | |
378 LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t ) | |
379 : LoadNode(c,mem,adr,at,t) {} | |
380 virtual int Opcode() const; | |
381 virtual uint ideal_reg() const { return Op_RegP; } | |
382 virtual int store_Opcode() const { return Op_StoreP; } | |
383 virtual BasicType memory_type() const { return T_ADDRESS; } | |
384 }; | |
385 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
386 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
387 //------------------------------LoadNNode-------------------------------------- |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
388 // Load a narrow oop from memory (either object or array) |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
389 class LoadNNode : public LoadNode { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
390 public: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
391 LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t ) |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
392 : LoadNode(c,mem,adr,at,t) {} |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
393 virtual int Opcode() const; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
394 virtual uint ideal_reg() const { return Op_RegN; } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
395 virtual int store_Opcode() const { return Op_StoreN; } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
396 virtual BasicType memory_type() const { return T_NARROWOOP; } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
397 }; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
398 |
0 | 399 //------------------------------LoadKlassNode---------------------------------- |
400 // Load a Klass from an object | |
401 class LoadKlassNode : public LoadPNode { | |
402 public: | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
403 LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk ) |
0 | 404 : LoadPNode(c,mem,adr,at,tk) {} |
405 virtual int Opcode() const; | |
406 virtual const Type *Value( PhaseTransform *phase ) const; | |
407 virtual Node *Identity( PhaseTransform *phase ); | |
408 virtual bool depends_only_on_test() const { return true; } | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
409 |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
410 // Polymorphic factory method: |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
411 static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
412 const TypeKlassPtr *tk = TypeKlassPtr::OBJECT ); |
0 | 413 }; |
414 | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
415 //------------------------------LoadNKlassNode--------------------------------- |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
416 // Load a narrow Klass from an object. |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
417 class LoadNKlassNode : public LoadNNode { |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
418 public: |
6848
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6795
diff
changeset
|
419 LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk ) |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
420 : LoadNNode(c,mem,adr,at,tk) {} |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
421 virtual int Opcode() const; |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
422 virtual uint ideal_reg() const { return Op_RegN; } |
6848
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6795
diff
changeset
|
423 virtual int store_Opcode() const { return Op_StoreNKlass; } |
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6795
diff
changeset
|
424 virtual BasicType memory_type() const { return T_NARROWKLASS; } |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
425 |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
426 virtual const Type *Value( PhaseTransform *phase ) const; |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
427 virtual Node *Identity( PhaseTransform *phase ); |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
428 virtual bool depends_only_on_test() const { return true; } |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
429 }; |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
430 |
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
163
diff
changeset
|
431 |
0 | 432 //------------------------------StoreNode-------------------------------------- |
433 // Store value; requires Store, Address and Value | |
434 class StoreNode : public MemNode { | |
435 protected: | |
436 virtual uint cmp( const Node &n ) const; | |
437 virtual bool depends_only_on_test() const { return false; } | |
438 | |
439 Node *Ideal_masked_input (PhaseGVN *phase, uint mask); | |
440 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); | |
441 | |
442 public: | |
443 StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) | |
444 : MemNode(c,mem,adr,at,val) { | |
445 init_class_id(Class_Store); | |
446 } | |
447 StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store ) | |
448 : MemNode(c,mem,adr,at,val,oop_store) { | |
449 init_class_id(Class_Store); | |
450 } | |
451 | |
452 // Polymorphic factory method: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
453 static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
454 const TypePtr* at, Node *val, BasicType bt ); |
0 | 455 |
456 virtual uint hash() const; // Check the type | |
457 | |
458 // If the store is to Field memory and the pointer is non-null, we can | |
459 // zero out the control input. | |
460 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
461 | |
462 // Compute a new Type for this node. Basically we just do the pre-check, | |
463 // then call the virtual add() to set the type. | |
464 virtual const Type *Value( PhaseTransform *phase ) const; | |
465 | |
466 // Check for identity function on memory (Load then Store at same address) | |
467 virtual Node *Identity( PhaseTransform *phase ); | |
468 | |
469 // Do not match memory edge | |
470 virtual uint match_edge(uint idx) const; | |
471 | |
472 virtual const Type *bottom_type() const; // returns Type::MEMORY | |
473 | |
474 // Map a store opcode to its corresponding own opcode, trivially. | |
475 virtual int store_Opcode() const { return Opcode(); } | |
476 | |
477 // have all possible loads of the value stored been optimized away? | |
478 bool value_never_loaded(PhaseTransform *phase) const; | |
479 }; | |
480 | |
481 //------------------------------StoreBNode------------------------------------- | |
482 // Store byte to memory | |
483 class StoreBNode : public StoreNode { | |
484 public: | |
485 StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} | |
486 virtual int Opcode() const; | |
487 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
488 virtual BasicType memory_type() const { return T_BYTE; } | |
489 }; | |
490 | |
491 //------------------------------StoreCNode------------------------------------- | |
492 // Store char/short to memory | |
493 class StoreCNode : public StoreNode { | |
494 public: | |
495 StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} | |
496 virtual int Opcode() const; | |
497 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
498 virtual BasicType memory_type() const { return T_CHAR; } | |
499 }; | |
500 | |
501 //------------------------------StoreINode------------------------------------- | |
502 // Store int to memory | |
503 class StoreINode : public StoreNode { | |
504 public: | |
505 StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} | |
506 virtual int Opcode() const; | |
507 virtual BasicType memory_type() const { return T_INT; } | |
508 }; | |
509 | |
510 //------------------------------StoreLNode------------------------------------- | |
511 // Store long to memory | |
512 class StoreLNode : public StoreNode { | |
513 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } | |
514 virtual uint cmp( const Node &n ) const { | |
515 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access | |
516 && StoreNode::cmp(n); | |
517 } | |
518 virtual uint size_of() const { return sizeof(*this); } | |
519 const bool _require_atomic_access; // is piecewise store forbidden? | |
520 | |
521 public: | |
522 StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, | |
523 bool require_atomic_access = false ) | |
524 : StoreNode(c,mem,adr,at,val) | |
525 , _require_atomic_access(require_atomic_access) | |
526 {} | |
527 virtual int Opcode() const; | |
528 virtual BasicType memory_type() const { return T_LONG; } | |
529 bool require_atomic_access() { return _require_atomic_access; } | |
530 static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val); | |
531 #ifndef PRODUCT | |
532 virtual void dump_spec(outputStream *st) const { | |
533 StoreNode::dump_spec(st); | |
534 if (_require_atomic_access) st->print(" Atomic!"); | |
535 } | |
536 #endif | |
537 }; | |
538 | |
539 //------------------------------StoreFNode------------------------------------- | |
540 // Store float to memory | |
541 class StoreFNode : public StoreNode { | |
542 public: | |
543 StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} | |
544 virtual int Opcode() const; | |
545 virtual BasicType memory_type() const { return T_FLOAT; } | |
546 }; | |
547 | |
548 //------------------------------StoreDNode------------------------------------- | |
549 // Store double to memory | |
550 class StoreDNode : public StoreNode { | |
551 public: | |
552 StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} | |
553 virtual int Opcode() const; | |
554 virtual BasicType memory_type() const { return T_DOUBLE; } | |
555 }; | |
556 | |
557 //------------------------------StorePNode------------------------------------- | |
558 // Store pointer to memory | |
559 class StorePNode : public StoreNode { | |
560 public: | |
561 StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} | |
562 virtual int Opcode() const; | |
563 virtual BasicType memory_type() const { return T_ADDRESS; } | |
564 }; | |
565 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
566 //------------------------------StoreNNode------------------------------------- |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
567 // Store narrow oop to memory |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
568 class StoreNNode : public StoreNode { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
569 public: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
570 StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
571 virtual int Opcode() const; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
572 virtual BasicType memory_type() const { return T_NARROWOOP; } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
573 }; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
574 |
6848
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6795
diff
changeset
|
575 //------------------------------StoreNKlassNode-------------------------------------- |
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6795
diff
changeset
|
576 // Store narrow klass to memory |
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6795
diff
changeset
|
577 class StoreNKlassNode : public StoreNNode { |
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6795
diff
changeset
|
578 public: |
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6795
diff
changeset
|
579 StoreNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNNode(c,mem,adr,at,val) {} |
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6795
diff
changeset
|
580 virtual int Opcode() const; |
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6795
diff
changeset
|
581 virtual BasicType memory_type() const { return T_NARROWKLASS; } |
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6795
diff
changeset
|
582 }; |
8e47bac5643a
7054512: Compress class pointers after perm gen removal
roland
parents:
6795
diff
changeset
|
583 |
0 | 584 //------------------------------StoreCMNode----------------------------------- |
585 // Store card-mark byte to memory for CM | |
586 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store | |
587 // Preceeding equivalent StoreCMs may be eliminated. | |
588 class StoreCMNode : public StoreNode { | |
985
685e959d09ea
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
681
diff
changeset
|
589 private: |
1198
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
590 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; } |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
591 virtual uint cmp( const Node &n ) const { |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
592 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
593 && StoreNode::cmp(n); |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
594 } |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
595 virtual uint size_of() const { return sizeof(*this); } |
985
685e959d09ea
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
681
diff
changeset
|
596 int _oop_alias_idx; // The alias_idx of OopStore |
1198
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
597 |
0 | 598 public: |
1198
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
599 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
600 StoreNode(c,mem,adr,at,val,oop_store), |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
601 _oop_alias_idx(oop_alias_idx) { |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
602 assert(_oop_alias_idx >= Compile::AliasIdxRaw || |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
603 _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
604 "bad oop alias idx"); |
8d9bfe6a446b
6920346: G1: "must avoid base_memory and AliasIdxTop"
never
parents:
1100
diff
changeset
|
605 } |
0 | 606 virtual int Opcode() const; |
607 virtual Node *Identity( PhaseTransform *phase ); | |
985
685e959d09ea
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
681
diff
changeset
|
608 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
0 | 609 virtual const Type *Value( PhaseTransform *phase ) const; |
610 virtual BasicType memory_type() const { return T_VOID; } // unspecific | |
985
685e959d09ea
6877254: Server vm crashes with no branches off of store slice" when run with CMS and UseSuperWord(default)
cfang
parents:
681
diff
changeset
|
611 int oop_alias_idx() const { return _oop_alias_idx; } |
0 | 612 }; |
613 | |
614 //------------------------------LoadPLockedNode--------------------------------- | |
615 // Load-locked a pointer from memory (either object or array). | |
616 // On Sparc & Intel this is implemented as a normal pointer load. | |
617 // On PowerPC and friends it's a real load-locked. | |
618 class LoadPLockedNode : public LoadPNode { | |
619 public: | |
620 LoadPLockedNode( Node *c, Node *mem, Node *adr ) | |
621 : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {} | |
622 virtual int Opcode() const; | |
623 virtual int store_Opcode() const { return Op_StorePConditional; } | |
624 virtual bool depends_only_on_test() const { return true; } | |
625 }; | |
626 | |
627 //------------------------------SCMemProjNode--------------------------------------- | |
628 // This class defines a projection of the memory state of a store conditional node. | |
629 // These nodes return a value, but also update memory. | |
630 class SCMemProjNode : public ProjNode { | |
631 public: | |
632 enum {SCMEMPROJCON = (uint)-2}; | |
633 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } | |
634 virtual int Opcode() const; | |
635 virtual bool is_CFG() const { return false; } | |
636 virtual const Type *bottom_type() const {return Type::MEMORY;} | |
637 virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();} | |
638 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register | |
639 virtual const Type *Value( PhaseTransform *phase ) const; | |
640 #ifndef PRODUCT | |
641 virtual void dump_spec(outputStream *st) const {}; | |
642 #endif | |
643 }; | |
644 | |
645 //------------------------------LoadStoreNode--------------------------- | |
253
b0fe4deeb9fb
6726999: nsk/stress/jck12a/jck12a010 assert(n != null,"Bad immediate dominator info.")
kvn
parents:
196
diff
changeset
|
646 // Note: is_Mem() method returns 'true' for this class. |
0 | 647 class LoadStoreNode : public Node { |
6795
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
648 private: |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
649 const Type* const _type; // What kind of value is loaded? |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
650 const TypePtr* _adr_type; // What kind of memory is being addressed? |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
651 virtual uint size_of() const; // Size is bigger |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
652 public: |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
653 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
654 virtual bool depends_only_on_test() const { return false; } |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
655 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
656 |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
657 virtual const Type *bottom_type() const { return _type; } |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
658 virtual uint ideal_reg() const; |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
659 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
660 |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
661 bool result_not_used() const; |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
662 }; |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
663 |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
664 class LoadStoreConditionalNode : public LoadStoreNode { |
0 | 665 public: |
666 enum { | |
667 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode | |
668 }; | |
6795
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
669 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex); |
0 | 670 }; |
671 | |
672 //------------------------------StorePConditionalNode--------------------------- | |
673 // Conditionally store pointer to memory, if no change since prior | |
674 // load-locked. Sets flags for success or failure of the store. | |
6795
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
675 class StorePConditionalNode : public LoadStoreConditionalNode { |
0 | 676 public: |
6795
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
677 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } |
0 | 678 virtual int Opcode() const; |
679 // Produces flags | |
680 virtual uint ideal_reg() const { return Op_RegFlags; } | |
681 }; | |
682 | |
420
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
683 //------------------------------StoreIConditionalNode--------------------------- |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
684 // Conditionally store int to memory, if no change since prior |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
685 // load-locked. Sets flags for success or failure of the store. |
6795
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
686 class StoreIConditionalNode : public LoadStoreConditionalNode { |
420
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
687 public: |
6795
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
688 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { } |
420
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
689 virtual int Opcode() const; |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
690 // Produces flags |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
691 virtual uint ideal_reg() const { return Op_RegFlags; } |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
692 }; |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
693 |
0 | 694 //------------------------------StoreLConditionalNode--------------------------- |
695 // Conditionally store long to memory, if no change since prior | |
696 // load-locked. Sets flags for success or failure of the store. | |
6795
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
697 class StoreLConditionalNode : public LoadStoreConditionalNode { |
0 | 698 public: |
6795
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
699 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } |
0 | 700 virtual int Opcode() const; |
420
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
701 // Produces flags |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
702 virtual uint ideal_reg() const { return Op_RegFlags; } |
0 | 703 }; |
704 | |
705 | |
706 //------------------------------CompareAndSwapLNode--------------------------- | |
6795
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
707 class CompareAndSwapLNode : public LoadStoreConditionalNode { |
0 | 708 public: |
6795
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
709 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } |
0 | 710 virtual int Opcode() const; |
711 }; | |
712 | |
713 | |
714 //------------------------------CompareAndSwapINode--------------------------- | |
6795
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
715 class CompareAndSwapINode : public LoadStoreConditionalNode { |
0 | 716 public: |
6795
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
717 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } |
0 | 718 virtual int Opcode() const; |
719 }; | |
720 | |
721 | |
722 //------------------------------CompareAndSwapPNode--------------------------- | |
6795
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
723 class CompareAndSwapPNode : public LoadStoreConditionalNode { |
0 | 724 public: |
6795
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
725 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } |
0 | 726 virtual int Opcode() const; |
727 }; | |
728 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
729 //------------------------------CompareAndSwapNNode--------------------------- |
6795
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
730 class CompareAndSwapNNode : public LoadStoreConditionalNode { |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
731 public: |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
732 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
733 virtual int Opcode() const; |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
734 }; |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
735 |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
736 //------------------------------GetAndAddINode--------------------------- |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
737 class GetAndAddINode : public LoadStoreNode { |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
738 public: |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
739 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
740 virtual int Opcode() const; |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
741 }; |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
742 |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
743 //------------------------------GetAndAddLNode--------------------------- |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
744 class GetAndAddLNode : public LoadStoreNode { |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
745 public: |
6795
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
746 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
747 virtual int Opcode() const; |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
748 }; |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
749 |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
750 |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
751 //------------------------------GetAndSetINode--------------------------- |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
752 class GetAndSetINode : public LoadStoreNode { |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
753 public: |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
754 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
755 virtual int Opcode() const; |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
756 }; |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
757 |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
758 //------------------------------GetAndSetINode--------------------------- |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
759 class GetAndSetLNode : public LoadStoreNode { |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
760 public: |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
761 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
762 virtual int Opcode() const; |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
763 }; |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
764 |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
765 //------------------------------GetAndSetPNode--------------------------- |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
766 class GetAndSetPNode : public LoadStoreNode { |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
767 public: |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
768 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
769 virtual int Opcode() const; |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
770 }; |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
771 |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
772 //------------------------------GetAndSetNNode--------------------------- |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
773 class GetAndSetNNode : public LoadStoreNode { |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
774 public: |
7eca5de9e0b6
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
6143
diff
changeset
|
775 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
776 virtual int Opcode() const; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
777 }; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
778 |
0 | 779 //------------------------------ClearArray------------------------------------- |
780 class ClearArrayNode: public Node { | |
781 public: | |
1100
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
986
diff
changeset
|
782 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base ) |
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
986
diff
changeset
|
783 : Node(ctrl,arymem,word_cnt,base) { |
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
986
diff
changeset
|
784 init_class_id(Class_ClearArray); |
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
986
diff
changeset
|
785 } |
0 | 786 virtual int Opcode() const; |
787 virtual const Type *bottom_type() const { return Type::MEMORY; } | |
788 // ClearArray modifies array elements, and so affects only the | |
789 // array memory addressed by the bottom_type of its base address. | |
790 virtual const class TypePtr *adr_type() const; | |
791 virtual Node *Identity( PhaseTransform *phase ); | |
792 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
793 virtual uint match_edge(uint idx) const; | |
794 | |
795 // Clear the given area of an object or array. | |
796 // The start offset must always be aligned mod BytesPerInt. | |
797 // The end offset must always be aligned mod BytesPerLong. | |
798 // Return the new memory. | |
799 static Node* clear_memory(Node* control, Node* mem, Node* dest, | |
800 intptr_t start_offset, | |
801 intptr_t end_offset, | |
802 PhaseGVN* phase); | |
803 static Node* clear_memory(Node* control, Node* mem, Node* dest, | |
804 intptr_t start_offset, | |
805 Node* end_offset, | |
806 PhaseGVN* phase); | |
807 static Node* clear_memory(Node* control, Node* mem, Node* dest, | |
808 Node* start_offset, | |
809 Node* end_offset, | |
810 PhaseGVN* phase); | |
1100
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
986
diff
changeset
|
811 // Return allocation input memory edge if it is different instance |
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
986
diff
changeset
|
812 // or itself if it is the one we are looking for. |
f96a1a986f7b
6895383: JCK test throws NPE for method compiled with Escape Analysis
kvn
parents:
986
diff
changeset
|
813 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase); |
0 | 814 }; |
815 | |
2412
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
816 //------------------------------StrIntrinsic------------------------------- |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
817 // Base class for Ideal nodes used in String instrinsic code. |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
818 class StrIntrinsicNode: public Node { |
0 | 819 public: |
2412
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
820 StrIntrinsicNode(Node* control, Node* char_array_mem, |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
821 Node* s1, Node* c1, Node* s2, Node* c2): |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
822 Node(control, char_array_mem, s1, c1, s2, c2) { |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
823 } |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
824 |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
825 StrIntrinsicNode(Node* control, Node* char_array_mem, |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
826 Node* s1, Node* s2, Node* c): |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
827 Node(control, char_array_mem, s1, s2, c) { |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
828 } |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
829 |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
830 StrIntrinsicNode(Node* control, Node* char_array_mem, |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
831 Node* s1, Node* s2): |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
832 Node(control, char_array_mem, s1, s2) { |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
833 } |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
834 |
0 | 835 virtual bool depends_only_on_test() const { return false; } |
986
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
836 virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; } |
0 | 837 virtual uint match_edge(uint idx) const; |
838 virtual uint ideal_reg() const { return Op_RegI; } | |
839 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
4115 | 840 virtual const Type *Value(PhaseTransform *phase) const; |
0 | 841 }; |
842 | |
2412
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
843 //------------------------------StrComp------------------------------------- |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
844 class StrCompNode: public StrIntrinsicNode { |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
845 public: |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
846 StrCompNode(Node* control, Node* char_array_mem, |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
847 Node* s1, Node* c1, Node* s2, Node* c2): |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
848 StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {}; |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
849 virtual int Opcode() const; |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
850 virtual const Type* bottom_type() const { return TypeInt::INT; } |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
851 }; |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
852 |
681 | 853 //------------------------------StrEquals------------------------------------- |
2412
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
854 class StrEqualsNode: public StrIntrinsicNode { |
681 | 855 public: |
986
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
856 StrEqualsNode(Node* control, Node* char_array_mem, |
2412
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
857 Node* s1, Node* s2, Node* c): |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
858 StrIntrinsicNode(control, char_array_mem, s1, s2, c) {}; |
681 | 859 virtual int Opcode() const; |
860 virtual const Type* bottom_type() const { return TypeInt::BOOL; } | |
861 }; | |
862 | |
863 //------------------------------StrIndexOf------------------------------------- | |
2412
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
864 class StrIndexOfNode: public StrIntrinsicNode { |
681 | 865 public: |
986
62001a362ce9
6827605: new String intrinsics may prevent EA scalar replacement
kvn
parents:
985
diff
changeset
|
866 StrIndexOfNode(Node* control, Node* char_array_mem, |
2412
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
867 Node* s1, Node* c1, Node* s2, Node* c2): |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
868 StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {}; |
681 | 869 virtual int Opcode() const; |
870 virtual const Type* bottom_type() const { return TypeInt::INT; } | |
871 }; | |
872 | |
169
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
873 //------------------------------AryEq--------------------------------------- |
2412
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
874 class AryEqNode: public StrIntrinsicNode { |
169
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
875 public: |
2412
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
876 AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2): |
f9424955eb18
7029152: Ideal nodes for String intrinsics miss memory edge optimization
kvn
parents:
1972
diff
changeset
|
877 StrIntrinsicNode(control, char_array_mem, s1, s2) {}; |
169
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
878 virtual int Opcode() const; |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
879 virtual const Type* bottom_type() const { return TypeInt::BOOL; } |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
880 }; |
9148c65abefc
6695049: (coll) Create an x86 intrinsic for Arrays.equals
rasbold
parents:
164
diff
changeset
|
881 |
7637
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
6853
diff
changeset
|
882 |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
6853
diff
changeset
|
883 //------------------------------EncodeISOArray-------------------------------- |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
6853
diff
changeset
|
884 // encode char[] to byte[] in ISO_8859_1 |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
6853
diff
changeset
|
885 class EncodeISOArrayNode: public Node { |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
6853
diff
changeset
|
886 public: |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
6853
diff
changeset
|
887 EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {}; |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
6853
diff
changeset
|
888 virtual int Opcode() const; |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
6853
diff
changeset
|
889 virtual bool depends_only_on_test() const { return false; } |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
6853
diff
changeset
|
890 virtual const Type* bottom_type() const { return TypeInt::INT; } |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
6853
diff
changeset
|
891 virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; } |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
6853
diff
changeset
|
892 virtual uint match_edge(uint idx) const; |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
6853
diff
changeset
|
893 virtual uint ideal_reg() const { return Op_RegI; } |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
6853
diff
changeset
|
894 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
6853
diff
changeset
|
895 virtual const Type *Value(PhaseTransform *phase) const; |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
6853
diff
changeset
|
896 }; |
b30b3c2a0cf2
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
6853
diff
changeset
|
897 |
0 | 898 //------------------------------MemBar----------------------------------------- |
899 // There are different flavors of Memory Barriers to match the Java Memory | |
900 // Model. Monitor-enter and volatile-load act as Aquires: no following ref | |
901 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or | |
902 // volatile-load. Monitor-exit and volatile-store act as Release: no | |
605 | 903 // preceding ref can be moved to after them. We insert a MemBar-Release |
0 | 904 // before a FastUnlock or volatile-store. All volatiles need to be |
905 // serialized, so we follow all volatile-stores with a MemBar-Volatile to | |
605 | 906 // separate it from any following volatile-load. |
0 | 907 class MemBarNode: public MultiNode { |
908 virtual uint hash() const ; // { return NO_HASH; } | |
909 virtual uint cmp( const Node &n ) const ; // Always fail, except on self | |
910 | |
911 virtual uint size_of() const { return sizeof(*this); } | |
912 // Memory type this node is serializing. Usually either rawptr or bottom. | |
913 const TypePtr* _adr_type; | |
914 | |
915 public: | |
916 enum { | |
917 Precedent = TypeFunc::Parms // optional edge to force precedence | |
918 }; | |
919 MemBarNode(Compile* C, int alias_idx, Node* precedent); | |
920 virtual int Opcode() const = 0; | |
921 virtual const class TypePtr *adr_type() const { return _adr_type; } | |
922 virtual const Type *Value( PhaseTransform *phase ) const; | |
923 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
924 virtual uint match_edge(uint idx) const { return 0; } | |
925 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } | |
926 virtual Node *match( const ProjNode *proj, const Matcher *m ); | |
927 // Factory method. Builds a wide or narrow membar. | |
928 // Optional 'precedent' becomes an extra edge if not null. | |
929 static MemBarNode* make(Compile* C, int opcode, | |
930 int alias_idx = Compile::AliasIdxBot, | |
931 Node* precedent = NULL); | |
932 }; | |
933 | |
934 // "Acquire" - no following ref can move before (but earlier refs can | |
935 // follow, like an early Load stalled in cache). Requires multi-cpu | |
3849
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
936 // visibility. Inserted after a volatile load. |
0 | 937 class MemBarAcquireNode: public MemBarNode { |
938 public: | |
939 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) | |
940 : MemBarNode(C, alias_idx, precedent) {} | |
941 virtual int Opcode() const; | |
942 }; | |
943 | |
944 // "Release" - no earlier ref can move after (but later refs can move | |
945 // up, like a speculative pipelined cache-hitting Load). Requires | |
3849
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
946 // multi-cpu visibility. Inserted before a volatile store. |
0 | 947 class MemBarReleaseNode: public MemBarNode { |
948 public: | |
949 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) | |
950 : MemBarNode(C, alias_idx, precedent) {} | |
951 virtual int Opcode() const; | |
952 }; | |
953 | |
3849
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
954 // "Acquire" - no following ref can move before (but earlier refs can |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
955 // follow, like an early Load stalled in cache). Requires multi-cpu |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
956 // visibility. Inserted after a FastLock. |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
957 class MemBarAcquireLockNode: public MemBarNode { |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
958 public: |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
959 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent) |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
960 : MemBarNode(C, alias_idx, precedent) {} |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
961 virtual int Opcode() const; |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
962 }; |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
963 |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
964 // "Release" - no earlier ref can move after (but later refs can move |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
965 // up, like a speculative pipelined cache-hitting Load). Requires |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
966 // multi-cpu visibility. Inserted before a FastUnLock. |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
967 class MemBarReleaseLockNode: public MemBarNode { |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
968 public: |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
969 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent) |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
970 : MemBarNode(C, alias_idx, precedent) {} |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
971 virtual int Opcode() const; |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
972 }; |
f1c12354c3f7
7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
roland
parents:
2426
diff
changeset
|
973 |
4763
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4115
diff
changeset
|
974 class MemBarStoreStoreNode: public MemBarNode { |
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4115
diff
changeset
|
975 public: |
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4115
diff
changeset
|
976 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent) |
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4115
diff
changeset
|
977 : MemBarNode(C, alias_idx, precedent) { |
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4115
diff
changeset
|
978 init_class_id(Class_MemBarStoreStore); |
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4115
diff
changeset
|
979 } |
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4115
diff
changeset
|
980 virtual int Opcode() const; |
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4115
diff
changeset
|
981 }; |
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4115
diff
changeset
|
982 |
0 | 983 // Ordering between a volatile store and a following volatile load. |
984 // Requires multi-CPU visibility? | |
985 class MemBarVolatileNode: public MemBarNode { | |
986 public: | |
987 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) | |
988 : MemBarNode(C, alias_idx, precedent) {} | |
989 virtual int Opcode() const; | |
990 }; | |
991 | |
992 // Ordering within the same CPU. Used to order unsafe memory references | |
993 // inside the compiler when we lack alias info. Not needed "outside" the | |
994 // compiler because the CPU does all the ordering for us. | |
995 class MemBarCPUOrderNode: public MemBarNode { | |
996 public: | |
997 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) | |
998 : MemBarNode(C, alias_idx, precedent) {} | |
999 virtual int Opcode() const; | |
1000 virtual uint ideal_reg() const { return 0; } // not matched in the AD file | |
1001 }; | |
1002 | |
1003 // Isolation of object setup after an AllocateNode and before next safepoint. | |
1004 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) | |
1005 class InitializeNode: public MemBarNode { | |
1006 friend class AllocateNode; | |
1007 | |
3961
a92cdbac8b9e
7081933: Use zeroing elimination optimization for large array
kvn
parents:
3854
diff
changeset
|
1008 enum { |
a92cdbac8b9e
7081933: Use zeroing elimination optimization for large array
kvn
parents:
3854
diff
changeset
|
1009 Incomplete = 0, |
a92cdbac8b9e
7081933: Use zeroing elimination optimization for large array
kvn
parents:
3854
diff
changeset
|
1010 Complete = 1, |
a92cdbac8b9e
7081933: Use zeroing elimination optimization for large array
kvn
parents:
3854
diff
changeset
|
1011 WithArraycopy = 2 |
a92cdbac8b9e
7081933: Use zeroing elimination optimization for large array
kvn
parents:
3854
diff
changeset
|
1012 }; |
a92cdbac8b9e
7081933: Use zeroing elimination optimization for large array
kvn
parents:
3854
diff
changeset
|
1013 int _is_complete; |
0 | 1014 |
4763
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4115
diff
changeset
|
1015 bool _does_not_escape; |
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4115
diff
changeset
|
1016 |
0 | 1017 public: |
1018 enum { | |
1019 Control = TypeFunc::Control, | |
1020 Memory = TypeFunc::Memory, // MergeMem for states affected by this op | |
1021 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address | |
1022 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) | |
1023 }; | |
1024 | |
1025 InitializeNode(Compile* C, int adr_type, Node* rawoop); | |
1026 virtual int Opcode() const; | |
1027 virtual uint size_of() const { return sizeof(*this); } | |
1028 virtual uint ideal_reg() const { return 0; } // not matched in the AD file | |
1029 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress | |
1030 | |
1031 // Manage incoming memory edges via a MergeMem on in(Memory): | |
1032 Node* memory(uint alias_idx); | |
1033 | |
1034 // The raw memory edge coming directly from the Allocation. | |
1035 // The contents of this memory are *always* all-zero-bits. | |
1036 Node* zero_memory() { return memory(Compile::AliasIdxRaw); } | |
1037 | |
1038 // Return the corresponding allocation for this initialization (or null if none). | |
1039 // (Note: Both InitializeNode::allocation and AllocateNode::initialization | |
1040 // are defined in graphKit.cpp, which sets up the bidirectional relation.) | |
1041 AllocateNode* allocation(); | |
1042 | |
1043 // Anything other than zeroing in this init? | |
1044 bool is_non_zero(); | |
1045 | |
1046 // An InitializeNode must completed before macro expansion is done. | |
1047 // Completion requires that the AllocateNode must be followed by | |
1048 // initialization of the new memory to zero, then to any initializers. | |
3961
a92cdbac8b9e
7081933: Use zeroing elimination optimization for large array
kvn
parents:
3854
diff
changeset
|
1049 bool is_complete() { return _is_complete != Incomplete; } |
a92cdbac8b9e
7081933: Use zeroing elimination optimization for large array
kvn
parents:
3854
diff
changeset
|
1050 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; } |
0 | 1051 |
1052 // Mark complete. (Must not yet be complete.) | |
1053 void set_complete(PhaseGVN* phase); | |
3961
a92cdbac8b9e
7081933: Use zeroing elimination optimization for large array
kvn
parents:
3854
diff
changeset
|
1054 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; } |
0 | 1055 |
4763
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4115
diff
changeset
|
1056 bool does_not_escape() { return _does_not_escape; } |
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4115
diff
changeset
|
1057 void set_does_not_escape() { _does_not_escape = true; } |
1dc233a8c7fe
7121140: Allocation paths require explicit memory synchronization operations for RMO systems
roland
parents:
4115
diff
changeset
|
1058 |
0 | 1059 #ifdef ASSERT |
1060 // ensure all non-degenerate stores are ordered and non-overlapping | |
1061 bool stores_are_sane(PhaseTransform* phase); | |
1062 #endif //ASSERT | |
1063 | |
1064 // See if this store can be captured; return offset where it initializes. | |
1065 // Return 0 if the store cannot be moved (any sort of problem). | |
8116
6931f425c517
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
7637
diff
changeset
|
1066 intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape); |
0 | 1067 |
1068 // Capture another store; reformat it to write my internal raw memory. | |
1069 // Return the captured copy, else NULL if there is some sort of problem. | |
8116
6931f425c517
8007294: ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
roland
parents:
7637
diff
changeset
|
1070 Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape); |
0 | 1071 |
1072 // Find captured store which corresponds to the range [start..start+size). | |
1073 // Return my own memory projection (meaning the initial zero bits) | |
1074 // if there is no such store. Return NULL if there is a problem. | |
1075 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase); | |
1076 | |
1077 // Called when the associated AllocateNode is expanded into CFG. | |
1078 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, | |
1079 intptr_t header_size, Node* size_in_bytes, | |
1080 PhaseGVN* phase); | |
1081 | |
1082 private: | |
1083 void remove_extra_zeroes(); | |
1084 | |
1085 // Find out where a captured store should be placed (or already is placed). | |
1086 int captured_store_insertion_point(intptr_t start, int size_in_bytes, | |
1087 PhaseTransform* phase); | |
1088 | |
1089 static intptr_t get_store_offset(Node* st, PhaseTransform* phase); | |
1090 | |
1091 Node* make_raw_address(intptr_t offset, PhaseTransform* phase); | |
1092 | |
10278 | 1093 bool detect_init_independence(Node* n, int& count); |
0 | 1094 |
1095 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, | |
1096 PhaseGVN* phase); | |
1097 | |
1098 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); | |
1099 }; | |
1100 | |
1101 //------------------------------MergeMem--------------------------------------- | |
1102 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) | |
1103 class MergeMemNode: public Node { | |
1104 virtual uint hash() const ; // { return NO_HASH; } | |
1105 virtual uint cmp( const Node &n ) const ; // Always fail, except on self | |
1106 friend class MergeMemStream; | |
1107 MergeMemNode(Node* def); // clients use MergeMemNode::make | |
1108 | |
1109 public: | |
1110 // If the input is a whole memory state, clone it with all its slices intact. | |
1111 // Otherwise, make a new memory state with just that base memory input. | |
1112 // In either case, the result is a newly created MergeMem. | |
1113 static MergeMemNode* make(Compile* C, Node* base_memory); | |
1114 | |
1115 virtual int Opcode() const; | |
1116 virtual Node *Identity( PhaseTransform *phase ); | |
1117 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
1118 virtual uint ideal_reg() const { return NotAMachineReg; } | |
1119 virtual uint match_edge(uint idx) const { return 0; } | |
1120 virtual const RegMask &out_RegMask() const; | |
1121 virtual const Type *bottom_type() const { return Type::MEMORY; } | |
1122 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } | |
1123 // sparse accessors | |
1124 // Fetch the previously stored "set_memory_at", or else the base memory. | |
1125 // (Caller should clone it if it is a phi-nest.) | |
1126 Node* memory_at(uint alias_idx) const; | |
1127 // set the memory, regardless of its previous value | |
1128 void set_memory_at(uint alias_idx, Node* n); | |
1129 // the "base" is the memory that provides the non-finite support | |
1130 Node* base_memory() const { return in(Compile::AliasIdxBot); } | |
1131 // warning: setting the base can implicitly set any of the other slices too | |
1132 void set_base_memory(Node* def); | |
1133 // sentinel value which denotes a copy of the base memory: | |
1134 Node* empty_memory() const { return in(Compile::AliasIdxTop); } | |
1135 static Node* make_empty_memory(); // where the sentinel comes from | |
1136 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } | |
1137 // hook for the iterator, to perform any necessary setup | |
1138 void iteration_setup(const MergeMemNode* other = NULL); | |
1139 // push sentinels until I am at least as long as the other (semantic no-op) | |
1140 void grow_to_match(const MergeMemNode* other); | |
1141 bool verify_sparse() const PRODUCT_RETURN0; | |
1142 #ifndef PRODUCT | |
1143 virtual void dump_spec(outputStream *st) const; | |
1144 #endif | |
1145 }; | |
1146 | |
1147 class MergeMemStream : public StackObj { | |
1148 private: | |
1149 MergeMemNode* _mm; | |
1150 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations | |
1151 Node* _mm_base; // loop-invariant base memory of _mm | |
1152 int _idx; | |
1153 int _cnt; | |
1154 Node* _mem; | |
1155 Node* _mem2; | |
1156 int _cnt2; | |
1157 | |
1158 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) { | |
1159 // subsume_node will break sparseness at times, whenever a memory slice | |
1160 // folds down to a copy of the base ("fat") memory. In such a case, | |
1161 // the raw edge will update to base, although it should be top. | |
1162 // This iterator will recognize either top or base_memory as an | |
1163 // "empty" slice. See is_empty, is_empty2, and next below. | |
1164 // | |
1165 // The sparseness property is repaired in MergeMemNode::Ideal. | |
1166 // As long as access to a MergeMem goes through this iterator | |
1167 // or the memory_at accessor, flaws in the sparseness will | |
1168 // never be observed. | |
1169 // | |
1170 // Also, iteration_setup repairs sparseness. | |
1171 assert(mm->verify_sparse(), "please, no dups of base"); | |
1172 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base"); | |
1173 | |
1174 _mm = mm; | |
1175 _mm_base = mm->base_memory(); | |
1176 _mm2 = mm2; | |
1177 _cnt = mm->req(); | |
1178 _idx = Compile::AliasIdxBot-1; // start at the base memory | |
1179 _mem = NULL; | |
1180 _mem2 = NULL; | |
1181 } | |
1182 | |
1183 #ifdef ASSERT | |
1184 Node* check_memory() const { | |
1185 if (at_base_memory()) | |
1186 return _mm->base_memory(); | |
1187 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) | |
1188 return _mm->memory_at(_idx); | |
1189 else | |
1190 return _mm_base; | |
1191 } | |
1192 Node* check_memory2() const { | |
1193 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); | |
1194 } | |
1195 #endif | |
1196 | |
1197 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; | |
1198 void assert_synch() const { | |
1199 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), | |
1200 "no side-effects except through the stream"); | |
1201 } | |
1202 | |
1203 public: | |
1204 | |
1205 // expected usages: | |
1206 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } | |
1207 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } | |
1208 | |
1209 // iterate over one merge | |
1210 MergeMemStream(MergeMemNode* mm) { | |
1211 mm->iteration_setup(); | |
1212 init(mm); | |
1213 debug_only(_cnt2 = 999); | |
1214 } | |
1215 // iterate in parallel over two merges | |
1216 // only iterates through non-empty elements of mm2 | |
1217 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { | |
1218 assert(mm2, "second argument must be a MergeMem also"); | |
1219 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state | |
1220 mm->iteration_setup(mm2); | |
1221 init(mm, mm2); | |
1222 _cnt2 = mm2->req(); | |
1223 } | |
1224 #ifdef ASSERT | |
1225 ~MergeMemStream() { | |
1226 assert_synch(); | |
1227 } | |
1228 #endif | |
1229 | |
1230 MergeMemNode* all_memory() const { | |
1231 return _mm; | |
1232 } | |
1233 Node* base_memory() const { | |
1234 assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); | |
1235 return _mm_base; | |
1236 } | |
1237 const MergeMemNode* all_memory2() const { | |
1238 assert(_mm2 != NULL, ""); | |
1239 return _mm2; | |
1240 } | |
1241 bool at_base_memory() const { | |
1242 return _idx == Compile::AliasIdxBot; | |
1243 } | |
1244 int alias_idx() const { | |
1245 assert(_mem, "must call next 1st"); | |
1246 return _idx; | |
1247 } | |
1248 | |
1249 const TypePtr* adr_type() const { | |
1250 return Compile::current()->get_adr_type(alias_idx()); | |
1251 } | |
1252 | |
1253 const TypePtr* adr_type(Compile* C) const { | |
1254 return C->get_adr_type(alias_idx()); | |
1255 } | |
1256 bool is_empty() const { | |
1257 assert(_mem, "must call next 1st"); | |
1258 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); | |
1259 return _mem->is_top(); | |
1260 } | |
1261 bool is_empty2() const { | |
1262 assert(_mem2, "must call next 1st"); | |
1263 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); | |
1264 return _mem2->is_top(); | |
1265 } | |
1266 Node* memory() const { | |
1267 assert(!is_empty(), "must not be empty"); | |
1268 assert_synch(); | |
1269 return _mem; | |
1270 } | |
1271 // get the current memory, regardless of empty or non-empty status | |
1272 Node* force_memory() const { | |
1273 assert(!is_empty() || !at_base_memory(), ""); | |
1274 // Use _mm_base to defend against updates to _mem->base_memory(). | |
1275 Node *mem = _mem->is_top() ? _mm_base : _mem; | |
1276 assert(mem == check_memory(), ""); | |
1277 return mem; | |
1278 } | |
1279 Node* memory2() const { | |
1280 assert(_mem2 == check_memory2(), ""); | |
1281 return _mem2; | |
1282 } | |
1283 void set_memory(Node* mem) { | |
1284 if (at_base_memory()) { | |
1285 // Note that this does not change the invariant _mm_base. | |
1286 _mm->set_base_memory(mem); | |
1287 } else { | |
1288 _mm->set_memory_at(_idx, mem); | |
1289 } | |
1290 _mem = mem; | |
1291 assert_synch(); | |
1292 } | |
1293 | |
1294 // Recover from a side effect to the MergeMemNode. | |
1295 void set_memory() { | |
1296 _mem = _mm->in(_idx); | |
1297 } | |
1298 | |
1299 bool next() { return next(false); } | |
1300 bool next2() { return next(true); } | |
1301 | |
1302 bool next_non_empty() { return next_non_empty(false); } | |
1303 bool next_non_empty2() { return next_non_empty(true); } | |
1304 // next_non_empty2 can yield states where is_empty() is true | |
1305 | |
1306 private: | |
1307 // find the next item, which might be empty | |
1308 bool next(bool have_mm2) { | |
1309 assert((_mm2 != NULL) == have_mm2, "use other next"); | |
1310 assert_synch(); | |
1311 if (++_idx < _cnt) { | |
1312 // Note: This iterator allows _mm to be non-sparse. | |
1313 // It behaves the same whether _mem is top or base_memory. | |
1314 _mem = _mm->in(_idx); | |
1315 if (have_mm2) | |
1316 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); | |
1317 return true; | |
1318 } | |
1319 return false; | |
1320 } | |
1321 | |
1322 // find the next non-empty item | |
1323 bool next_non_empty(bool have_mm2) { | |
1324 while (next(have_mm2)) { | |
1325 if (!is_empty()) { | |
1326 // make sure _mem2 is filled in sensibly | |
1327 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); | |
1328 return true; | |
1329 } else if (have_mm2 && !is_empty2()) { | |
1330 return true; // is_empty() == true | |
1331 } | |
1332 } | |
1333 return false; | |
1334 } | |
1335 }; | |
1336 | |
1337 //------------------------------Prefetch--------------------------------------- | |
1338 | |
1339 // Non-faulting prefetch load. Prefetch for many reads. | |
1340 class PrefetchReadNode : public Node { | |
1341 public: | |
1342 PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {} | |
1343 virtual int Opcode() const; | |
1344 virtual uint ideal_reg() const { return NotAMachineReg; } | |
1345 virtual uint match_edge(uint idx) const { return idx==2; } | |
1346 virtual const Type *bottom_type() const { return Type::ABIO; } | |
1347 }; | |
1348 | |
1349 // Non-faulting prefetch load. Prefetch for many reads & many writes. | |
1350 class PrefetchWriteNode : public Node { | |
1351 public: | |
1352 PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {} | |
1353 virtual int Opcode() const; | |
1354 virtual uint ideal_reg() const { return NotAMachineReg; } | |
1355 virtual uint match_edge(uint idx) const { return idx==2; } | |
3854 | 1356 virtual const Type *bottom_type() const { return Type::ABIO; } |
1357 }; | |
1358 | |
1359 // Allocation prefetch which may fault, TLAB size have to be adjusted. | |
1360 class PrefetchAllocationNode : public Node { | |
1361 public: | |
1362 PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {} | |
1363 virtual int Opcode() const; | |
1364 virtual uint ideal_reg() const { return NotAMachineReg; } | |
1365 virtual uint match_edge(uint idx) const { return idx==2; } | |
1367
9e321dcfa5b7
6940726: Use BIS instruction for allocation prefetch on Sparc
kvn
parents:
1198
diff
changeset
|
1366 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } |
0 | 1367 }; |
1972 | 1368 |
1369 #endif // SHARE_VM_OPTO_MEMNODE_HPP |