Mercurial > hg > truffle
annotate src/share/vm/opto/callnode.hpp @ 452:00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
Summary: When we encounter marking stack overflow during precleaning of Reference lists, we were using the overflow list mechanism, which can cause problems on account of mutating the mark word of the header because of conflicts with mutator accesses and updates of that field. Instead we should use the usual mechanism for overflow handling in concurrent phases, namely dirtying of the card on which the overflowed object lies. Since precleaning effectively does a form of discovered list processing, albeit with discovery enabled, we needed to adjust some code to be correct in the face of interleaved processing and discovery.
Reviewed-by: apetrusenko, jcoomes
author | ysr |
---|---|
date | Thu, 20 Nov 2008 12:27:41 -0800 |
parents | a1980da045cc |
children | 424f9bfe6b96 |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // Portions of code courtesy of Clifford Click | |
26 | |
27 // Optimization - Graph Style | |
28 | |
29 class Chaitin; | |
30 class NamedCounter; | |
31 class MultiNode; | |
32 class SafePointNode; | |
33 class CallNode; | |
34 class CallJavaNode; | |
35 class CallStaticJavaNode; | |
36 class CallDynamicJavaNode; | |
37 class CallRuntimeNode; | |
38 class CallLeafNode; | |
39 class CallLeafNoFPNode; | |
40 class AllocateNode; | |
33 | 41 class AllocateArrayNode; |
0 | 42 class LockNode; |
43 class UnlockNode; | |
44 class JVMState; | |
45 class OopMap; | |
46 class State; | |
47 class StartNode; | |
48 class MachCallNode; | |
49 class FastLockNode; | |
50 | |
51 //------------------------------StartNode-------------------------------------- | |
52 // The method start node | |
53 class StartNode : public MultiNode { | |
54 virtual uint cmp( const Node &n ) const; | |
55 virtual uint size_of() const; // Size is bigger | |
56 public: | |
57 const TypeTuple *_domain; | |
58 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { | |
59 init_class_id(Class_Start); | |
60 init_flags(Flag_is_block_start); | |
61 init_req(0,this); | |
62 init_req(1,root); | |
63 } | |
64 virtual int Opcode() const; | |
65 virtual bool pinned() const { return true; }; | |
66 virtual const Type *bottom_type() const; | |
67 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } | |
68 virtual const Type *Value( PhaseTransform *phase ) const; | |
69 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
70 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; | |
71 virtual const RegMask &in_RegMask(uint) const; | |
72 virtual Node *match( const ProjNode *proj, const Matcher *m ); | |
73 virtual uint ideal_reg() const { return 0; } | |
74 #ifndef PRODUCT | |
75 virtual void dump_spec(outputStream *st) const; | |
76 #endif | |
77 }; | |
78 | |
79 //------------------------------StartOSRNode----------------------------------- | |
80 // The method start node for on stack replacement code | |
81 class StartOSRNode : public StartNode { | |
82 public: | |
83 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} | |
84 virtual int Opcode() const; | |
85 static const TypeTuple *osr_domain(); | |
86 }; | |
87 | |
88 | |
89 //------------------------------ParmNode--------------------------------------- | |
90 // Incoming parameters | |
91 class ParmNode : public ProjNode { | |
92 static const char * const names[TypeFunc::Parms+1]; | |
93 public: | |
33 | 94 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { |
95 init_class_id(Class_Parm); | |
96 } | |
0 | 97 virtual int Opcode() const; |
98 virtual bool is_CFG() const { return (_con == TypeFunc::Control); } | |
99 virtual uint ideal_reg() const; | |
100 #ifndef PRODUCT | |
101 virtual void dump_spec(outputStream *st) const; | |
102 #endif | |
103 }; | |
104 | |
105 | |
106 //------------------------------ReturnNode------------------------------------- | |
107 // Return from subroutine node | |
108 class ReturnNode : public Node { | |
109 public: | |
110 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); | |
111 virtual int Opcode() const; | |
112 virtual bool is_CFG() const { return true; } | |
113 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash | |
114 virtual bool depends_only_on_test() const { return false; } | |
115 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
116 virtual const Type *Value( PhaseTransform *phase ) const; | |
117 virtual uint ideal_reg() const { return NotAMachineReg; } | |
118 virtual uint match_edge(uint idx) const; | |
119 #ifndef PRODUCT | |
120 virtual void dump_req() const; | |
121 #endif | |
122 }; | |
123 | |
124 | |
125 //------------------------------RethrowNode------------------------------------ | |
126 // Rethrow of exception at call site. Ends a procedure before rethrowing; | |
127 // ends the current basic block like a ReturnNode. Restores registers and | |
128 // unwinds stack. Rethrow happens in the caller's method. | |
129 class RethrowNode : public Node { | |
130 public: | |
131 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); | |
132 virtual int Opcode() const; | |
133 virtual bool is_CFG() const { return true; } | |
134 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash | |
135 virtual bool depends_only_on_test() const { return false; } | |
136 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
137 virtual const Type *Value( PhaseTransform *phase ) const; | |
138 virtual uint match_edge(uint idx) const; | |
139 virtual uint ideal_reg() const { return NotAMachineReg; } | |
140 #ifndef PRODUCT | |
141 virtual void dump_req() const; | |
142 #endif | |
143 }; | |
144 | |
145 | |
146 //------------------------------TailCallNode----------------------------------- | |
147 // Pop stack frame and jump indirect | |
148 class TailCallNode : public ReturnNode { | |
149 public: | |
150 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) | |
151 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { | |
152 init_req(TypeFunc::Parms, target); | |
153 init_req(TypeFunc::Parms+1, moop); | |
154 } | |
155 | |
156 virtual int Opcode() const; | |
157 virtual uint match_edge(uint idx) const; | |
158 }; | |
159 | |
160 //------------------------------TailJumpNode----------------------------------- | |
161 // Pop stack frame and jump indirect | |
162 class TailJumpNode : public ReturnNode { | |
163 public: | |
164 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) | |
165 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { | |
166 init_req(TypeFunc::Parms, target); | |
167 init_req(TypeFunc::Parms+1, ex_oop); | |
168 } | |
169 | |
170 virtual int Opcode() const; | |
171 virtual uint match_edge(uint idx) const; | |
172 }; | |
173 | |
174 //-------------------------------JVMState------------------------------------- | |
175 // A linked list of JVMState nodes captures the whole interpreter state, | |
176 // plus GC roots, for all active calls at some call site in this compilation | |
177 // unit. (If there is no inlining, then the list has exactly one link.) | |
178 // This provides a way to map the optimized program back into the interpreter, | |
179 // or to let the GC mark the stack. | |
180 class JVMState : public ResourceObj { | |
181 private: | |
182 JVMState* _caller; // List pointer for forming scope chains | |
183 uint _depth; // One mroe than caller depth, or one. | |
184 uint _locoff; // Offset to locals in input edge mapping | |
185 uint _stkoff; // Offset to stack in input edge mapping | |
186 uint _monoff; // Offset to monitors in input edge mapping | |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
187 uint _scloff; // Offset to fields of scalar objs in input edge mapping |
0 | 188 uint _endoff; // Offset to end of input edge mapping |
189 uint _sp; // Jave Expression Stack Pointer for this state | |
190 int _bci; // Byte Code Index of this JVM point | |
191 ciMethod* _method; // Method Pointer | |
192 SafePointNode* _map; // Map node associated with this scope | |
193 public: | |
194 friend class Compile; | |
195 | |
196 // Because JVMState objects live over the entire lifetime of the | |
197 // Compile object, they are allocated into the comp_arena, which | |
198 // does not get resource marked or reset during the compile process | |
199 void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); } | |
200 void operator delete( void * ) { } // fast deallocation | |
201 | |
202 // Create a new JVMState, ready for abstract interpretation. | |
203 JVMState(ciMethod* method, JVMState* caller); | |
204 JVMState(int stack_size); // root state; has a null method | |
205 | |
206 // Access functions for the JVM | |
207 uint locoff() const { return _locoff; } | |
208 uint stkoff() const { return _stkoff; } | |
209 uint argoff() const { return _stkoff + _sp; } | |
210 uint monoff() const { return _monoff; } | |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
211 uint scloff() const { return _scloff; } |
0 | 212 uint endoff() const { return _endoff; } |
213 uint oopoff() const { return debug_end(); } | |
214 | |
215 int loc_size() const { return _stkoff - _locoff; } | |
216 int stk_size() const { return _monoff - _stkoff; } | |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
217 int mon_size() const { return _scloff - _monoff; } |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
218 int scl_size() const { return _endoff - _scloff; } |
0 | 219 |
220 bool is_loc(uint i) const { return i >= _locoff && i < _stkoff; } | |
221 bool is_stk(uint i) const { return i >= _stkoff && i < _monoff; } | |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
222 bool is_mon(uint i) const { return i >= _monoff && i < _scloff; } |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
223 bool is_scl(uint i) const { return i >= _scloff && i < _endoff; } |
0 | 224 |
225 uint sp() const { return _sp; } | |
226 int bci() const { return _bci; } | |
227 bool has_method() const { return _method != NULL; } | |
228 ciMethod* method() const { assert(has_method(), ""); return _method; } | |
229 JVMState* caller() const { return _caller; } | |
230 SafePointNode* map() const { return _map; } | |
231 uint depth() const { return _depth; } | |
232 uint debug_start() const; // returns locoff of root caller | |
233 uint debug_end() const; // returns endoff of self | |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
234 uint debug_size() const { |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
235 return loc_size() + sp() + mon_size() + scl_size(); |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
236 } |
0 | 237 uint debug_depth() const; // returns sum of debug_size values at all depths |
238 | |
239 // Returns the JVM state at the desired depth (1 == root). | |
240 JVMState* of_depth(int d) const; | |
241 | |
242 // Tells if two JVM states have the same call chain (depth, methods, & bcis). | |
243 bool same_calls_as(const JVMState* that) const; | |
244 | |
245 // Monitors (monitors are stored as (boxNode, objNode) pairs | |
246 enum { logMonitorEdges = 1 }; | |
247 int nof_monitors() const { return mon_size() >> logMonitorEdges; } | |
248 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } | |
249 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } | |
250 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } | |
251 bool is_monitor_box(uint off) const { | |
252 assert(is_mon(off), "should be called only for monitor edge"); | |
253 return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); | |
254 } | |
255 bool is_monitor_use(uint off) const { return (is_mon(off) | |
256 && is_monitor_box(off)) | |
257 || (caller() && caller()->is_monitor_use(off)); } | |
258 | |
259 // Initialization functions for the JVM | |
260 void set_locoff(uint off) { _locoff = off; } | |
261 void set_stkoff(uint off) { _stkoff = off; } | |
262 void set_monoff(uint off) { _monoff = off; } | |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
263 void set_scloff(uint off) { _scloff = off; } |
0 | 264 void set_endoff(uint off) { _endoff = off; } |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
265 void set_offsets(uint off) { |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
266 _locoff = _stkoff = _monoff = _scloff = _endoff = off; |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
267 } |
0 | 268 void set_map(SafePointNode *map) { _map = map; } |
269 void set_sp(uint sp) { _sp = sp; } | |
270 void set_bci(int bci) { _bci = bci; } | |
271 | |
272 // Miscellaneous utility functions | |
273 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain | |
274 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller | |
275 | |
276 #ifndef PRODUCT | |
277 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; | |
278 void dump_spec(outputStream *st) const; | |
279 void dump_on(outputStream* st) const; | |
280 void dump() const { | |
281 dump_on(tty); | |
282 } | |
283 #endif | |
284 }; | |
285 | |
286 //------------------------------SafePointNode---------------------------------- | |
287 // A SafePointNode is a subclass of a MultiNode for convenience (and | |
288 // potential code sharing) only - conceptually it is independent of | |
289 // the Node semantics. | |
290 class SafePointNode : public MultiNode { | |
291 virtual uint cmp( const Node &n ) const; | |
292 virtual uint size_of() const; // Size is bigger | |
293 | |
294 public: | |
295 SafePointNode(uint edges, JVMState* jvms, | |
296 // A plain safepoint advertises no memory effects (NULL): | |
297 const TypePtr* adr_type = NULL) | |
298 : MultiNode( edges ), | |
299 _jvms(jvms), | |
300 _oop_map(NULL), | |
301 _adr_type(adr_type) | |
302 { | |
303 init_class_id(Class_SafePoint); | |
304 } | |
305 | |
306 OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC | |
307 JVMState* const _jvms; // Pointer to list of JVM State objects | |
308 const TypePtr* _adr_type; // What type of memory does this node produce? | |
309 | |
310 // Many calls take *all* of memory as input, | |
311 // but some produce a limited subset of that memory as output. | |
312 // The adr_type reports the call's behavior as a store, not a load. | |
313 | |
314 virtual JVMState* jvms() const { return _jvms; } | |
315 void set_jvms(JVMState* s) { | |
316 *(JVMState**)&_jvms = s; // override const attribute in the accessor | |
317 } | |
318 OopMap *oop_map() const { return _oop_map; } | |
319 void set_oop_map(OopMap *om) { _oop_map = om; } | |
320 | |
321 // Functionality from old debug nodes which has changed | |
322 Node *local(JVMState* jvms, uint idx) const { | |
323 assert(verify_jvms(jvms), "jvms must match"); | |
324 return in(jvms->locoff() + idx); | |
325 } | |
326 Node *stack(JVMState* jvms, uint idx) const { | |
327 assert(verify_jvms(jvms), "jvms must match"); | |
328 return in(jvms->stkoff() + idx); | |
329 } | |
330 Node *argument(JVMState* jvms, uint idx) const { | |
331 assert(verify_jvms(jvms), "jvms must match"); | |
332 return in(jvms->argoff() + idx); | |
333 } | |
334 Node *monitor_box(JVMState* jvms, uint idx) const { | |
335 assert(verify_jvms(jvms), "jvms must match"); | |
336 return in(jvms->monitor_box_offset(idx)); | |
337 } | |
338 Node *monitor_obj(JVMState* jvms, uint idx) const { | |
339 assert(verify_jvms(jvms), "jvms must match"); | |
340 return in(jvms->monitor_obj_offset(idx)); | |
341 } | |
342 | |
343 void set_local(JVMState* jvms, uint idx, Node *c); | |
344 | |
345 void set_stack(JVMState* jvms, uint idx, Node *c) { | |
346 assert(verify_jvms(jvms), "jvms must match"); | |
347 set_req(jvms->stkoff() + idx, c); | |
348 } | |
349 void set_argument(JVMState* jvms, uint idx, Node *c) { | |
350 assert(verify_jvms(jvms), "jvms must match"); | |
351 set_req(jvms->argoff() + idx, c); | |
352 } | |
353 void ensure_stack(JVMState* jvms, uint stk_size) { | |
354 assert(verify_jvms(jvms), "jvms must match"); | |
355 int grow_by = (int)stk_size - (int)jvms->stk_size(); | |
356 if (grow_by > 0) grow_stack(jvms, grow_by); | |
357 } | |
358 void grow_stack(JVMState* jvms, uint grow_by); | |
359 // Handle monitor stack | |
360 void push_monitor( const FastLockNode *lock ); | |
361 void pop_monitor (); | |
362 Node *peek_monitor_box() const; | |
363 Node *peek_monitor_obj() const; | |
364 | |
365 // Access functions for the JVM | |
366 Node *control () const { return in(TypeFunc::Control ); } | |
367 Node *i_o () const { return in(TypeFunc::I_O ); } | |
368 Node *memory () const { return in(TypeFunc::Memory ); } | |
369 Node *returnadr() const { return in(TypeFunc::ReturnAdr); } | |
370 Node *frameptr () const { return in(TypeFunc::FramePtr ); } | |
371 | |
372 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } | |
373 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } | |
374 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } | |
375 | |
376 MergeMemNode* merged_memory() const { | |
377 return in(TypeFunc::Memory)->as_MergeMem(); | |
378 } | |
379 | |
380 // The parser marks useless maps as dead when it's done with them: | |
381 bool is_killed() { return in(TypeFunc::Control) == NULL; } | |
382 | |
383 // Exception states bubbling out of subgraphs such as inlined calls | |
384 // are recorded here. (There might be more than one, hence the "next".) | |
385 // This feature is used only for safepoints which serve as "maps" | |
386 // for JVM states during parsing, intrinsic expansion, etc. | |
387 SafePointNode* next_exception() const; | |
388 void set_next_exception(SafePointNode* n); | |
389 bool has_exceptions() const { return next_exception() != NULL; } | |
390 | |
391 // Standard Node stuff | |
392 virtual int Opcode() const; | |
393 virtual bool pinned() const { return true; } | |
394 virtual const Type *Value( PhaseTransform *phase ) const; | |
395 virtual const Type *bottom_type() const { return Type::CONTROL; } | |
396 virtual const TypePtr *adr_type() const { return _adr_type; } | |
397 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
398 virtual Node *Identity( PhaseTransform *phase ); | |
399 virtual uint ideal_reg() const { return 0; } | |
400 virtual const RegMask &in_RegMask(uint) const; | |
401 virtual const RegMask &out_RegMask() const; | |
402 virtual uint match_edge(uint idx) const; | |
403 | |
404 static bool needs_polling_address_input(); | |
405 | |
406 #ifndef PRODUCT | |
407 virtual void dump_spec(outputStream *st) const; | |
408 #endif | |
409 }; | |
410 | |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
411 //------------------------------SafePointScalarObjectNode---------------------- |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
412 // A SafePointScalarObjectNode represents the state of a scalarized object |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
413 // at a safepoint. |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
414 |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
415 class SafePointScalarObjectNode: public TypeNode { |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
416 uint _first_index; // First input edge index of a SafePoint node where |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
417 // states of the scalarized object fields are collected. |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
418 uint _n_fields; // Number of non-static fields of the scalarized object. |
74
2a9af0b9cb1c
6674600: (Escape Analysis) Optimize memory graph for instance's fields
kvn
parents:
65
diff
changeset
|
419 DEBUG_ONLY(AllocateNode* _alloc;) |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
420 public: |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
421 SafePointScalarObjectNode(const TypeOopPtr* tp, |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
422 #ifdef ASSERT |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
423 AllocateNode* alloc, |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
424 #endif |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
425 uint first_index, uint n_fields); |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
426 virtual int Opcode() const; |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
427 virtual uint ideal_reg() const; |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
428 virtual const RegMask &in_RegMask(uint) const; |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
429 virtual const RegMask &out_RegMask() const; |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
430 virtual uint match_edge(uint idx) const; |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
431 |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
432 uint first_index() const { return _first_index; } |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
433 uint n_fields() const { return _n_fields; } |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
434 DEBUG_ONLY(AllocateNode* alloc() const { return _alloc; }) |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
435 |
420
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
436 // SafePointScalarObject should be always pinned to the control edge |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
437 // of the SafePoint node for which it was generated. |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
438 virtual bool pinned() const; // { return true; } |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
366
diff
changeset
|
439 |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
440 virtual uint size_of() const { return sizeof(*this); } |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
441 |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
442 // Assumes that "this" is an argument to a safepoint node "s", and that |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
443 // "new_call" is being created to correspond to "s". But the difference |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
444 // between the start index of the jvmstates of "new_call" and "s" is |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
445 // "jvms_adj". Produce and return a SafePointScalarObjectNode that |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
446 // corresponds appropriately to "this" in "new_call". Assumes that |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
447 // "sosn_map" is a map, specific to the translation of "s" to "new_call", |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
448 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
449 SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const; |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
450 |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
451 #ifndef PRODUCT |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
452 virtual void dump_spec(outputStream *st) const; |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
453 #endif |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
454 }; |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
39
diff
changeset
|
455 |
0 | 456 //------------------------------CallNode--------------------------------------- |
457 // Call nodes now subsume the function of debug nodes at callsites, so they | |
458 // contain the functionality of a full scope chain of debug nodes. | |
459 class CallNode : public SafePointNode { | |
460 public: | |
461 const TypeFunc *_tf; // Function type | |
462 address _entry_point; // Address of method being called | |
463 float _cnt; // Estimate of number of times called | |
464 | |
465 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) | |
466 : SafePointNode(tf->domain()->cnt(), NULL, adr_type), | |
467 _tf(tf), | |
468 _entry_point(addr), | |
469 _cnt(COUNT_UNKNOWN) | |
470 { | |
471 init_class_id(Class_Call); | |
472 init_flags(Flag_is_Call); | |
473 } | |
474 | |
475 const TypeFunc* tf() const { return _tf; } | |
476 const address entry_point() const { return _entry_point; } | |
477 const float cnt() const { return _cnt; } | |
478 | |
479 void set_tf(const TypeFunc* tf) { _tf = tf; } | |
480 void set_entry_point(address p) { _entry_point = p; } | |
481 void set_cnt(float c) { _cnt = c; } | |
482 | |
483 virtual const Type *bottom_type() const; | |
484 virtual const Type *Value( PhaseTransform *phase ) const; | |
485 virtual Node *Identity( PhaseTransform *phase ) { return this; } | |
486 virtual uint cmp( const Node &n ) const; | |
487 virtual uint size_of() const = 0; | |
488 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; | |
489 virtual Node *match( const ProjNode *proj, const Matcher *m ); | |
490 virtual uint ideal_reg() const { return NotAMachineReg; } | |
491 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and | |
492 // for some macro nodes whose expansion does not have a safepoint on the fast path. | |
493 virtual bool guaranteed_safepoint() { return true; } | |
494 // For macro nodes, the JVMState gets modified during expansion, so when cloning | |
495 // the node the JVMState must be cloned. | |
496 virtual void clone_jvms() { } // default is not to clone | |
497 | |
65 | 498 // Returns true if the call may modify n |
499 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase); | |
500 // Does this node have a use of n other than in debug information? | |
168
7793bd37a336
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
kvn
parents:
113
diff
changeset
|
501 bool has_non_debug_use(Node *n); |
65 | 502 // Returns the unique CheckCastPP of a call |
503 // or result projection is there are several CheckCastPP | |
504 // or returns NULL if there is no one. | |
505 Node *result_cast(); | |
506 | |
0 | 507 virtual uint match_edge(uint idx) const; |
508 | |
509 #ifndef PRODUCT | |
510 virtual void dump_req() const; | |
511 virtual void dump_spec(outputStream *st) const; | |
512 #endif | |
513 }; | |
514 | |
515 //------------------------------CallJavaNode----------------------------------- | |
516 // Make a static or dynamic subroutine call node using Java calling | |
517 // convention. (The "Java" calling convention is the compiler's calling | |
518 // convention, as opposed to the interpreter's or that of native C.) | |
519 class CallJavaNode : public CallNode { | |
520 protected: | |
521 virtual uint cmp( const Node &n ) const; | |
522 virtual uint size_of() const; // Size is bigger | |
523 | |
524 bool _optimized_virtual; | |
525 ciMethod* _method; // Method being direct called | |
526 public: | |
527 const int _bci; // Byte Code Index of call byte code | |
528 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci) | |
529 : CallNode(tf, addr, TypePtr::BOTTOM), | |
530 _method(method), _bci(bci), _optimized_virtual(false) | |
531 { | |
532 init_class_id(Class_CallJava); | |
533 } | |
534 | |
535 virtual int Opcode() const; | |
536 ciMethod* method() const { return _method; } | |
537 void set_method(ciMethod *m) { _method = m; } | |
538 void set_optimized_virtual(bool f) { _optimized_virtual = f; } | |
539 bool is_optimized_virtual() const { return _optimized_virtual; } | |
540 | |
541 #ifndef PRODUCT | |
542 virtual void dump_spec(outputStream *st) const; | |
543 #endif | |
544 }; | |
545 | |
546 //------------------------------CallStaticJavaNode----------------------------- | |
547 // Make a direct subroutine call using Java calling convention (for static | |
548 // calls and optimized virtual calls, plus calls to wrappers for run-time | |
549 // routines); generates static stub. | |
550 class CallStaticJavaNode : public CallJavaNode { | |
551 virtual uint cmp( const Node &n ) const; | |
552 virtual uint size_of() const; // Size is bigger | |
553 public: | |
554 CallStaticJavaNode(const TypeFunc* tf, address addr, ciMethod* method, int bci) | |
555 : CallJavaNode(tf, addr, method, bci), _name(NULL) { | |
556 init_class_id(Class_CallStaticJava); | |
557 } | |
558 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci, | |
559 const TypePtr* adr_type) | |
560 : CallJavaNode(tf, addr, NULL, bci), _name(name) { | |
561 init_class_id(Class_CallStaticJava); | |
562 // This node calls a runtime stub, which often has narrow memory effects. | |
563 _adr_type = adr_type; | |
564 } | |
565 const char *_name; // Runtime wrapper name | |
566 | |
567 // If this is an uncommon trap, return the request code, else zero. | |
568 int uncommon_trap_request() const; | |
569 static int extract_uncommon_trap_request(const Node* call); | |
570 | |
571 virtual int Opcode() const; | |
572 #ifndef PRODUCT | |
573 virtual void dump_spec(outputStream *st) const; | |
574 #endif | |
575 }; | |
576 | |
577 //------------------------------CallDynamicJavaNode---------------------------- | |
578 // Make a dispatched call using Java calling convention. | |
579 class CallDynamicJavaNode : public CallJavaNode { | |
580 virtual uint cmp( const Node &n ) const; | |
581 virtual uint size_of() const; // Size is bigger | |
582 public: | |
583 CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) { | |
584 init_class_id(Class_CallDynamicJava); | |
585 } | |
586 | |
587 int _vtable_index; | |
588 virtual int Opcode() const; | |
589 #ifndef PRODUCT | |
590 virtual void dump_spec(outputStream *st) const; | |
591 #endif | |
592 }; | |
593 | |
594 //------------------------------CallRuntimeNode-------------------------------- | |
595 // Make a direct subroutine call node into compiled C++ code. | |
596 class CallRuntimeNode : public CallNode { | |
597 virtual uint cmp( const Node &n ) const; | |
598 virtual uint size_of() const; // Size is bigger | |
599 public: | |
600 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, | |
601 const TypePtr* adr_type) | |
602 : CallNode(tf, addr, adr_type), | |
603 _name(name) | |
604 { | |
605 init_class_id(Class_CallRuntime); | |
606 } | |
607 | |
608 const char *_name; // Printable name, if _method is NULL | |
609 virtual int Opcode() const; | |
610 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; | |
611 | |
612 #ifndef PRODUCT | |
613 virtual void dump_spec(outputStream *st) const; | |
614 #endif | |
615 }; | |
616 | |
617 //------------------------------CallLeafNode----------------------------------- | |
618 // Make a direct subroutine call node into compiled C++ code, without | |
619 // safepoints | |
620 class CallLeafNode : public CallRuntimeNode { | |
621 public: | |
622 CallLeafNode(const TypeFunc* tf, address addr, const char* name, | |
623 const TypePtr* adr_type) | |
624 : CallRuntimeNode(tf, addr, name, adr_type) | |
625 { | |
626 init_class_id(Class_CallLeaf); | |
627 } | |
628 virtual int Opcode() const; | |
629 virtual bool guaranteed_safepoint() { return false; } | |
630 #ifndef PRODUCT | |
631 virtual void dump_spec(outputStream *st) const; | |
632 #endif | |
633 }; | |
634 | |
635 //------------------------------CallLeafNoFPNode------------------------------- | |
636 // CallLeafNode, not using floating point or using it in the same manner as | |
637 // the generated code | |
638 class CallLeafNoFPNode : public CallLeafNode { | |
639 public: | |
640 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, | |
641 const TypePtr* adr_type) | |
642 : CallLeafNode(tf, addr, name, adr_type) | |
643 { | |
644 } | |
645 virtual int Opcode() const; | |
646 }; | |
647 | |
648 | |
649 //------------------------------Allocate--------------------------------------- | |
650 // High-level memory allocation | |
651 // | |
652 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will | |
653 // get expanded into a code sequence containing a call. Unlike other CallNodes, | |
654 // they have 2 memory projections and 2 i_o projections (which are distinguished by | |
655 // the _is_io_use flag in the projection.) This is needed when expanding the node in | |
656 // order to differentiate the uses of the projection on the normal control path from | |
657 // those on the exception return path. | |
658 // | |
659 class AllocateNode : public CallNode { | |
660 public: | |
661 enum { | |
662 // Output: | |
663 RawAddress = TypeFunc::Parms, // the newly-allocated raw address | |
664 // Inputs: | |
665 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object | |
666 KlassNode, // type (maybe dynamic) of the obj. | |
667 InitialTest, // slow-path test (may be constant) | |
668 ALength, // array length (or TOP if none) | |
669 ParmLimit | |
670 }; | |
671 | |
672 static const TypeFunc* alloc_type() { | |
673 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); | |
674 fields[AllocSize] = TypeInt::POS; | |
675 fields[KlassNode] = TypeInstPtr::NOTNULL; | |
676 fields[InitialTest] = TypeInt::BOOL; | |
677 fields[ALength] = TypeInt::INT; // length (can be a bad length) | |
678 | |
679 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); | |
680 | |
681 // create result type (range) | |
682 fields = TypeTuple::fields(1); | |
683 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop | |
684 | |
685 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); | |
686 | |
687 return TypeFunc::make(domain, range); | |
688 } | |
689 | |
39
76256d272075
6667612: (Escape Analysis) disable loop cloning if it has a scalar replaceable allocation
kvn
parents:
33
diff
changeset
|
690 bool _is_scalar_replaceable; // Result of Escape Analysis |
76256d272075
6667612: (Escape Analysis) disable loop cloning if it has a scalar replaceable allocation
kvn
parents:
33
diff
changeset
|
691 |
0 | 692 virtual uint size_of() const; // Size is bigger |
693 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, | |
694 Node *size, Node *klass_node, Node *initial_test); | |
695 // Expansion modifies the JVMState, so we need to clone it | |
696 virtual void clone_jvms() { | |
697 set_jvms(jvms()->clone_deep(Compile::current())); | |
698 } | |
699 virtual int Opcode() const; | |
700 virtual uint ideal_reg() const { return Op_RegP; } | |
701 virtual bool guaranteed_safepoint() { return false; } | |
702 | |
65 | 703 // allocations do not modify their arguments |
704 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase) { return false;} | |
705 | |
0 | 706 // Pattern-match a possible usage of AllocateNode. |
707 // Return null if no allocation is recognized. | |
708 // The operand is the pointer produced by the (possible) allocation. | |
709 // It must be a projection of the Allocate or its subsequent CastPP. | |
710 // (Note: This function is defined in file graphKit.cpp, near | |
711 // GraphKit::new_instance/new_array, whose output it recognizes.) | |
712 // The 'ptr' may not have an offset unless the 'offset' argument is given. | |
713 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase); | |
714 | |
715 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip | |
716 // an offset, which is reported back to the caller. | |
717 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) | |
718 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase, | |
719 intptr_t& offset); | |
720 | |
721 // Dig the klass operand out of a (possible) allocation site. | |
722 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) { | |
723 AllocateNode* allo = Ideal_allocation(ptr, phase); | |
724 return (allo == NULL) ? NULL : allo->in(KlassNode); | |
725 } | |
726 | |
727 // Conservatively small estimate of offset of first non-header byte. | |
728 int minimum_header_size() { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
729 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
74
diff
changeset
|
730 instanceOopDesc::base_offset_in_bytes(); |
0 | 731 } |
732 | |
733 // Return the corresponding initialization barrier (or null if none). | |
734 // Walks out edges to find it... | |
735 // (Note: Both InitializeNode::allocation and AllocateNode::initialization | |
736 // are defined in graphKit.cpp, which sets up the bidirectional relation.) | |
737 InitializeNode* initialization(); | |
738 | |
739 // Convenience for initialization->maybe_set_complete(phase) | |
740 bool maybe_set_complete(PhaseGVN* phase); | |
741 }; | |
742 | |
743 //------------------------------AllocateArray--------------------------------- | |
744 // | |
745 // High-level array allocation | |
746 // | |
747 class AllocateArrayNode : public AllocateNode { | |
748 public: | |
749 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, | |
750 Node* size, Node* klass_node, Node* initial_test, | |
751 Node* count_val | |
752 ) | |
753 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, | |
754 initial_test) | |
755 { | |
756 init_class_id(Class_AllocateArray); | |
757 set_req(AllocateNode::ALength, count_val); | |
758 } | |
759 virtual int Opcode() const; | |
760 virtual uint size_of() const; // Size is bigger | |
761 | |
366
8261ee795323
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
196
diff
changeset
|
762 // Dig the length operand out of a array allocation site. |
8261ee795323
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
196
diff
changeset
|
763 Node* Ideal_length() { |
8261ee795323
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
196
diff
changeset
|
764 return in(AllocateNode::ALength); |
8261ee795323
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
196
diff
changeset
|
765 } |
8261ee795323
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
196
diff
changeset
|
766 |
8261ee795323
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
196
diff
changeset
|
767 // Dig the length operand out of a array allocation site and narrow the |
8261ee795323
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
196
diff
changeset
|
768 // type with a CastII, if necesssary |
8261ee795323
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
196
diff
changeset
|
769 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true); |
8261ee795323
6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
rasbold
parents:
196
diff
changeset
|
770 |
0 | 771 // Pattern-match a possible usage of AllocateArrayNode. |
772 // Return null if no allocation is recognized. | |
773 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { | |
774 AllocateNode* allo = Ideal_allocation(ptr, phase); | |
775 return (allo == NULL || !allo->is_AllocateArray()) | |
776 ? NULL : allo->as_AllocateArray(); | |
777 } | |
778 }; | |
779 | |
780 //------------------------------AbstractLockNode----------------------------------- | |
781 class AbstractLockNode: public CallNode { | |
782 private: | |
783 bool _eliminate; // indicates this lock can be safely eliminated | |
784 #ifndef PRODUCT | |
785 NamedCounter* _counter; | |
786 #endif | |
787 | |
788 protected: | |
789 // helper functions for lock elimination | |
790 // | |
791 | |
792 bool find_matching_unlock(const Node* ctrl, LockNode* lock, | |
793 GrowableArray<AbstractLockNode*> &lock_ops); | |
794 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, | |
795 GrowableArray<AbstractLockNode*> &lock_ops); | |
796 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, | |
797 GrowableArray<AbstractLockNode*> &lock_ops); | |
798 LockNode *find_matching_lock(UnlockNode* unlock); | |
799 | |
800 | |
801 public: | |
802 AbstractLockNode(const TypeFunc *tf) | |
803 : CallNode(tf, NULL, TypeRawPtr::BOTTOM), | |
804 _eliminate(false) | |
805 { | |
806 #ifndef PRODUCT | |
807 _counter = NULL; | |
808 #endif | |
809 } | |
810 virtual int Opcode() const = 0; | |
811 Node * obj_node() const {return in(TypeFunc::Parms + 0); } | |
812 Node * box_node() const {return in(TypeFunc::Parms + 1); } | |
813 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } | |
814 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} | |
815 | |
816 virtual uint size_of() const { return sizeof(*this); } | |
817 | |
818 bool is_eliminated() {return _eliminate; } | |
819 // mark node as eliminated and update the counter if there is one | |
820 void set_eliminated(); | |
821 | |
65 | 822 // locking does not modify its arguments |
823 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;} | |
824 | |
0 | 825 #ifndef PRODUCT |
826 void create_lock_counter(JVMState* s); | |
827 NamedCounter* counter() const { return _counter; } | |
828 #endif | |
829 }; | |
830 | |
831 //------------------------------Lock--------------------------------------- | |
832 // High-level lock operation | |
833 // | |
834 // This is a subclass of CallNode because it is a macro node which gets expanded | |
835 // into a code sequence containing a call. This node takes 3 "parameters": | |
836 // 0 - object to lock | |
837 // 1 - a BoxLockNode | |
838 // 2 - a FastLockNode | |
839 // | |
840 class LockNode : public AbstractLockNode { | |
841 public: | |
842 | |
843 static const TypeFunc *lock_type() { | |
844 // create input type (domain) | |
845 const Type **fields = TypeTuple::fields(3); | |
846 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked | |
847 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock | |
848 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock | |
849 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); | |
850 | |
851 // create result type (range) | |
852 fields = TypeTuple::fields(0); | |
853 | |
854 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); | |
855 | |
856 return TypeFunc::make(domain,range); | |
857 } | |
858 | |
859 virtual int Opcode() const; | |
860 virtual uint size_of() const; // Size is bigger | |
861 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { | |
862 init_class_id(Class_Lock); | |
863 init_flags(Flag_is_macro); | |
864 C->add_macro_node(this); | |
865 } | |
866 virtual bool guaranteed_safepoint() { return false; } | |
867 | |
868 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
869 // Expansion modifies the JVMState, so we need to clone it | |
870 virtual void clone_jvms() { | |
871 set_jvms(jvms()->clone_deep(Compile::current())); | |
872 } | |
873 }; | |
874 | |
875 //------------------------------Unlock--------------------------------------- | |
876 // High-level unlock operation | |
877 class UnlockNode : public AbstractLockNode { | |
878 public: | |
879 virtual int Opcode() const; | |
880 virtual uint size_of() const; // Size is bigger | |
881 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { | |
882 init_class_id(Class_Unlock); | |
883 init_flags(Flag_is_macro); | |
884 C->add_macro_node(this); | |
885 } | |
886 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | |
887 // unlock is never a safepoint | |
888 virtual bool guaranteed_safepoint() { return false; } | |
889 }; |