0
|
1 /*
|
|
2 * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 // Portions of code courtesy of Clifford Click
|
|
26
|
|
27 // Optimization - Graph Style
|
|
28
|
|
29 #include "incls/_precompiled.incl"
|
|
30 #include "incls/_callnode.cpp.incl"
|
|
31
|
|
32 //=============================================================================
|
|
33 uint StartNode::size_of() const { return sizeof(*this); }
|
|
34 uint StartNode::cmp( const Node &n ) const
|
|
35 { return _domain == ((StartNode&)n)._domain; }
|
|
36 const Type *StartNode::bottom_type() const { return _domain; }
|
|
37 const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; }
|
|
38 #ifndef PRODUCT
|
|
39 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
|
|
40 #endif
|
|
41
|
|
42 //------------------------------Ideal------------------------------------------
|
|
43 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
|
|
44 return remove_dead_region(phase, can_reshape) ? this : NULL;
|
|
45 }
|
|
46
|
|
47 //------------------------------calling_convention-----------------------------
|
|
48 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
|
|
49 Matcher::calling_convention( sig_bt, parm_regs, argcnt, false );
|
|
50 }
|
|
51
|
|
52 //------------------------------Registers--------------------------------------
|
|
53 const RegMask &StartNode::in_RegMask(uint) const {
|
|
54 return RegMask::Empty;
|
|
55 }
|
|
56
|
|
57 //------------------------------match------------------------------------------
|
|
58 // Construct projections for incoming parameters, and their RegMask info
|
|
59 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
|
|
60 switch (proj->_con) {
|
|
61 case TypeFunc::Control:
|
|
62 case TypeFunc::I_O:
|
|
63 case TypeFunc::Memory:
|
|
64 return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
|
|
65 case TypeFunc::FramePtr:
|
|
66 return new (match->C, 1) MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
|
|
67 case TypeFunc::ReturnAdr:
|
|
68 return new (match->C, 1) MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
|
|
69 case TypeFunc::Parms:
|
|
70 default: {
|
|
71 uint parm_num = proj->_con - TypeFunc::Parms;
|
|
72 const Type *t = _domain->field_at(proj->_con);
|
|
73 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
|
|
74 return new (match->C, 1) ConNode(Type::TOP);
|
|
75 uint ideal_reg = Matcher::base2reg[t->base()];
|
|
76 RegMask &rm = match->_calling_convention_mask[parm_num];
|
|
77 return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg);
|
|
78 }
|
|
79 }
|
|
80 return NULL;
|
|
81 }
|
|
82
|
|
83 //------------------------------StartOSRNode----------------------------------
|
|
84 // The method start node for an on stack replacement adapter
|
|
85
|
|
86 //------------------------------osr_domain-----------------------------
|
|
87 const TypeTuple *StartOSRNode::osr_domain() {
|
|
88 const Type **fields = TypeTuple::fields(2);
|
|
89 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
|
|
90
|
|
91 return TypeTuple::make(TypeFunc::Parms+1, fields);
|
|
92 }
|
|
93
|
|
94 //=============================================================================
|
|
95 const char * const ParmNode::names[TypeFunc::Parms+1] = {
|
|
96 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
|
|
97 };
|
|
98
|
|
99 #ifndef PRODUCT
|
|
100 void ParmNode::dump_spec(outputStream *st) const {
|
|
101 if( _con < TypeFunc::Parms ) {
|
|
102 st->print(names[_con]);
|
|
103 } else {
|
|
104 st->print("Parm%d: ",_con-TypeFunc::Parms);
|
|
105 // Verbose and WizardMode dump bottom_type for all nodes
|
|
106 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
|
|
107 }
|
|
108 }
|
|
109 #endif
|
|
110
|
|
111 uint ParmNode::ideal_reg() const {
|
|
112 switch( _con ) {
|
|
113 case TypeFunc::Control : // fall through
|
|
114 case TypeFunc::I_O : // fall through
|
|
115 case TypeFunc::Memory : return 0;
|
|
116 case TypeFunc::FramePtr : // fall through
|
|
117 case TypeFunc::ReturnAdr: return Op_RegP;
|
|
118 default : assert( _con > TypeFunc::Parms, "" );
|
|
119 // fall through
|
|
120 case TypeFunc::Parms : {
|
|
121 // Type of argument being passed
|
|
122 const Type *t = in(0)->as_Start()->_domain->field_at(_con);
|
|
123 return Matcher::base2reg[t->base()];
|
|
124 }
|
|
125 }
|
|
126 ShouldNotReachHere();
|
|
127 return 0;
|
|
128 }
|
|
129
|
|
130 //=============================================================================
|
|
131 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) {
|
|
132 init_req(TypeFunc::Control,cntrl);
|
|
133 init_req(TypeFunc::I_O,i_o);
|
|
134 init_req(TypeFunc::Memory,memory);
|
|
135 init_req(TypeFunc::FramePtr,frameptr);
|
|
136 init_req(TypeFunc::ReturnAdr,retadr);
|
|
137 }
|
|
138
|
|
139 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
|
|
140 return remove_dead_region(phase, can_reshape) ? this : NULL;
|
|
141 }
|
|
142
|
|
143 const Type *ReturnNode::Value( PhaseTransform *phase ) const {
|
|
144 return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
|
|
145 ? Type::TOP
|
|
146 : Type::BOTTOM;
|
|
147 }
|
|
148
|
|
149 // Do we Match on this edge index or not? No edges on return nodes
|
|
150 uint ReturnNode::match_edge(uint idx) const {
|
|
151 return 0;
|
|
152 }
|
|
153
|
|
154
|
|
155 #ifndef PRODUCT
|
|
156 void ReturnNode::dump_req() const {
|
|
157 // Dump the required inputs, enclosed in '(' and ')'
|
|
158 uint i; // Exit value of loop
|
|
159 for( i=0; i<req(); i++ ) { // For all required inputs
|
|
160 if( i == TypeFunc::Parms ) tty->print("returns");
|
|
161 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
|
|
162 else tty->print("_ ");
|
|
163 }
|
|
164 }
|
|
165 #endif
|
|
166
|
|
167 //=============================================================================
|
|
168 RethrowNode::RethrowNode(
|
|
169 Node* cntrl,
|
|
170 Node* i_o,
|
|
171 Node* memory,
|
|
172 Node* frameptr,
|
|
173 Node* ret_adr,
|
|
174 Node* exception
|
|
175 ) : Node(TypeFunc::Parms + 1) {
|
|
176 init_req(TypeFunc::Control , cntrl );
|
|
177 init_req(TypeFunc::I_O , i_o );
|
|
178 init_req(TypeFunc::Memory , memory );
|
|
179 init_req(TypeFunc::FramePtr , frameptr );
|
|
180 init_req(TypeFunc::ReturnAdr, ret_adr);
|
|
181 init_req(TypeFunc::Parms , exception);
|
|
182 }
|
|
183
|
|
184 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){
|
|
185 return remove_dead_region(phase, can_reshape) ? this : NULL;
|
|
186 }
|
|
187
|
|
188 const Type *RethrowNode::Value( PhaseTransform *phase ) const {
|
|
189 return (phase->type(in(TypeFunc::Control)) == Type::TOP)
|
|
190 ? Type::TOP
|
|
191 : Type::BOTTOM;
|
|
192 }
|
|
193
|
|
194 uint RethrowNode::match_edge(uint idx) const {
|
|
195 return 0;
|
|
196 }
|
|
197
|
|
198 #ifndef PRODUCT
|
|
199 void RethrowNode::dump_req() const {
|
|
200 // Dump the required inputs, enclosed in '(' and ')'
|
|
201 uint i; // Exit value of loop
|
|
202 for( i=0; i<req(); i++ ) { // For all required inputs
|
|
203 if( i == TypeFunc::Parms ) tty->print("exception");
|
|
204 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
|
|
205 else tty->print("_ ");
|
|
206 }
|
|
207 }
|
|
208 #endif
|
|
209
|
|
210 //=============================================================================
|
|
211 // Do we Match on this edge index or not? Match only target address & method
|
|
212 uint TailCallNode::match_edge(uint idx) const {
|
|
213 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
|
|
214 }
|
|
215
|
|
216 //=============================================================================
|
|
217 // Do we Match on this edge index or not? Match only target address & oop
|
|
218 uint TailJumpNode::match_edge(uint idx) const {
|
|
219 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
|
|
220 }
|
|
221
|
|
222 //=============================================================================
|
|
223 JVMState::JVMState(ciMethod* method, JVMState* caller) {
|
|
224 assert(method != NULL, "must be valid call site");
|
|
225 _method = method;
|
|
226 debug_only(_bci = -99); // random garbage value
|
|
227 debug_only(_map = (SafePointNode*)-1);
|
|
228 _caller = caller;
|
|
229 _depth = 1 + (caller == NULL ? 0 : caller->depth());
|
|
230 _locoff = TypeFunc::Parms;
|
|
231 _stkoff = _locoff + _method->max_locals();
|
|
232 _monoff = _stkoff + _method->max_stack();
|
|
233 _endoff = _monoff;
|
|
234 _sp = 0;
|
|
235 }
|
|
236 JVMState::JVMState(int stack_size) {
|
|
237 _method = NULL;
|
|
238 _bci = InvocationEntryBci;
|
|
239 debug_only(_map = (SafePointNode*)-1);
|
|
240 _caller = NULL;
|
|
241 _depth = 1;
|
|
242 _locoff = TypeFunc::Parms;
|
|
243 _stkoff = _locoff;
|
|
244 _monoff = _stkoff + stack_size;
|
|
245 _endoff = _monoff;
|
|
246 _sp = 0;
|
|
247 }
|
|
248
|
|
249 //--------------------------------of_depth-------------------------------------
|
|
250 JVMState* JVMState::of_depth(int d) const {
|
|
251 const JVMState* jvmp = this;
|
|
252 assert(0 < d && (uint)d <= depth(), "oob");
|
|
253 for (int skip = depth() - d; skip > 0; skip--) {
|
|
254 jvmp = jvmp->caller();
|
|
255 }
|
|
256 assert(jvmp->depth() == (uint)d, "found the right one");
|
|
257 return (JVMState*)jvmp;
|
|
258 }
|
|
259
|
|
260 //-----------------------------same_calls_as-----------------------------------
|
|
261 bool JVMState::same_calls_as(const JVMState* that) const {
|
|
262 if (this == that) return true;
|
|
263 if (this->depth() != that->depth()) return false;
|
|
264 const JVMState* p = this;
|
|
265 const JVMState* q = that;
|
|
266 for (;;) {
|
|
267 if (p->_method != q->_method) return false;
|
|
268 if (p->_method == NULL) return true; // bci is irrelevant
|
|
269 if (p->_bci != q->_bci) return false;
|
|
270 p = p->caller();
|
|
271 q = q->caller();
|
|
272 if (p == q) return true;
|
|
273 assert(p != NULL && q != NULL, "depth check ensures we don't run off end");
|
|
274 }
|
|
275 }
|
|
276
|
|
277 //------------------------------debug_start------------------------------------
|
|
278 uint JVMState::debug_start() const {
|
|
279 debug_only(JVMState* jvmroot = of_depth(1));
|
|
280 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last");
|
|
281 return of_depth(1)->locoff();
|
|
282 }
|
|
283
|
|
284 //-------------------------------debug_end-------------------------------------
|
|
285 uint JVMState::debug_end() const {
|
|
286 debug_only(JVMState* jvmroot = of_depth(1));
|
|
287 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last");
|
|
288 return endoff();
|
|
289 }
|
|
290
|
|
291 //------------------------------debug_depth------------------------------------
|
|
292 uint JVMState::debug_depth() const {
|
|
293 uint total = 0;
|
|
294 for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) {
|
|
295 total += jvmp->debug_size();
|
|
296 }
|
|
297 return total;
|
|
298 }
|
|
299
|
|
300 //------------------------------format_helper----------------------------------
|
|
301 // Given an allocation (a Chaitin object) and a Node decide if the Node carries
|
|
302 // any defined value or not. If it does, print out the register or constant.
|
|
303 #ifndef PRODUCT
|
|
304 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i ) {
|
|
305 if (n == NULL) { st->print(" NULL"); return; }
|
|
306 if( OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
|
|
307 char buf[50];
|
|
308 regalloc->dump_register(n,buf);
|
|
309 st->print(" %s%d]=%s",msg,i,buf);
|
|
310 } else { // No register, but might be constant
|
|
311 const Type *t = n->bottom_type();
|
|
312 switch (t->base()) {
|
|
313 case Type::Int:
|
|
314 st->print(" %s%d]=#"INT32_FORMAT,msg,i,t->is_int()->get_con());
|
|
315 break;
|
|
316 case Type::AnyPtr:
|
|
317 assert( t == TypePtr::NULL_PTR, "" );
|
|
318 st->print(" %s%d]=#NULL",msg,i);
|
|
319 break;
|
|
320 case Type::AryPtr:
|
|
321 case Type::KlassPtr:
|
|
322 case Type::InstPtr:
|
|
323 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->isa_oopptr()->const_oop());
|
|
324 break;
|
|
325 case Type::RawPtr:
|
|
326 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,t->is_rawptr());
|
|
327 break;
|
|
328 case Type::DoubleCon:
|
|
329 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d);
|
|
330 break;
|
|
331 case Type::FloatCon:
|
|
332 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f);
|
|
333 break;
|
|
334 case Type::Long:
|
|
335 st->print(" %s%d]=#"INT64_FORMAT,msg,i,t->is_long()->get_con());
|
|
336 break;
|
|
337 case Type::Half:
|
|
338 case Type::Top:
|
|
339 st->print(" %s%d]=_",msg,i);
|
|
340 break;
|
|
341 default: ShouldNotReachHere();
|
|
342 }
|
|
343 }
|
|
344 }
|
|
345 #endif
|
|
346
|
|
347 //------------------------------format-----------------------------------------
|
|
348 #ifndef PRODUCT
|
|
349 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const {
|
|
350 st->print(" #");
|
|
351 if( _method ) {
|
|
352 _method->print_short_name(st);
|
|
353 st->print(" @ bci:%d ",_bci);
|
|
354 } else {
|
|
355 st->print_cr(" runtime stub ");
|
|
356 return;
|
|
357 }
|
|
358 if (n->is_MachSafePoint()) {
|
|
359 MachSafePointNode *mcall = n->as_MachSafePoint();
|
|
360 uint i;
|
|
361 // Print locals
|
|
362 for( i = 0; i < (uint)loc_size(); i++ )
|
|
363 format_helper( regalloc, st, mcall->local(this, i), "L[", i );
|
|
364 // Print stack
|
|
365 for (i = 0; i < (uint)stk_size(); i++) {
|
|
366 if ((uint)(_stkoff + i) >= mcall->len())
|
|
367 st->print(" oob ");
|
|
368 else
|
|
369 format_helper( regalloc, st, mcall->stack(this, i), "STK[", i );
|
|
370 }
|
|
371 for (i = 0; (int)i < nof_monitors(); i++) {
|
|
372 Node *box = mcall->monitor_box(this, i);
|
|
373 Node *obj = mcall->monitor_obj(this, i);
|
|
374 if ( OptoReg::is_valid(regalloc->get_reg_first(box)) ) {
|
|
375 while( !box->is_BoxLock() ) box = box->in(1);
|
|
376 format_helper( regalloc, st, box, "MON-BOX[", i );
|
|
377 } else {
|
|
378 OptoReg::Name box_reg = BoxLockNode::stack_slot(box);
|
|
379 st->print(" MON-BOX%d=%s+%d",
|
|
380 i,
|
|
381 OptoReg::regname(OptoReg::c_frame_pointer),
|
|
382 regalloc->reg2offset(box_reg));
|
|
383 }
|
|
384 format_helper( regalloc, st, obj, "MON-OBJ[", i );
|
|
385 }
|
|
386 }
|
|
387 st->print_cr("");
|
|
388 if (caller() != NULL) caller()->format(regalloc, n, st);
|
|
389 }
|
|
390 #endif
|
|
391
|
|
392 #ifndef PRODUCT
|
|
393 void JVMState::dump_spec(outputStream *st) const {
|
|
394 if (_method != NULL) {
|
|
395 bool printed = false;
|
|
396 if (!Verbose) {
|
|
397 // The JVMS dumps make really, really long lines.
|
|
398 // Take out the most boring parts, which are the package prefixes.
|
|
399 char buf[500];
|
|
400 stringStream namest(buf, sizeof(buf));
|
|
401 _method->print_short_name(&namest);
|
|
402 if (namest.count() < sizeof(buf)) {
|
|
403 const char* name = namest.base();
|
|
404 if (name[0] == ' ') ++name;
|
|
405 const char* endcn = strchr(name, ':'); // end of class name
|
|
406 if (endcn == NULL) endcn = strchr(name, '(');
|
|
407 if (endcn == NULL) endcn = name + strlen(name);
|
|
408 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/')
|
|
409 --endcn;
|
|
410 st->print(" %s", endcn);
|
|
411 printed = true;
|
|
412 }
|
|
413 }
|
|
414 if (!printed)
|
|
415 _method->print_short_name(st);
|
|
416 st->print(" @ bci:%d",_bci);
|
|
417 } else {
|
|
418 st->print(" runtime stub");
|
|
419 }
|
|
420 if (caller() != NULL) caller()->dump_spec(st);
|
|
421 }
|
|
422 #endif
|
|
423
|
|
424 #ifndef PRODUCT
|
|
425 void JVMState::dump_on(outputStream* st) const {
|
|
426 if (_map && !((uintptr_t)_map & 1)) {
|
|
427 if (_map->len() > _map->req()) { // _map->has_exceptions()
|
|
428 Node* ex = _map->in(_map->req()); // _map->next_exception()
|
|
429 // skip the first one; it's already being printed
|
|
430 while (ex != NULL && ex->len() > ex->req()) {
|
|
431 ex = ex->in(ex->req()); // ex->next_exception()
|
|
432 ex->dump(1);
|
|
433 }
|
|
434 }
|
|
435 _map->dump(2);
|
|
436 }
|
|
437 st->print("JVMS depth=%d loc=%d stk=%d mon=%d end=%d mondepth=%d sp=%d bci=%d method=",
|
|
438 depth(), locoff(), stkoff(), monoff(), endoff(), monitor_depth(), sp(), bci());
|
|
439 if (_method == NULL) {
|
|
440 st->print_cr("(none)");
|
|
441 } else {
|
|
442 _method->print_name(st);
|
|
443 st->cr();
|
|
444 if (bci() >= 0 && bci() < _method->code_size()) {
|
|
445 st->print(" bc: ");
|
|
446 _method->print_codes_on(bci(), bci()+1, st);
|
|
447 }
|
|
448 }
|
|
449 if (caller() != NULL) {
|
|
450 caller()->dump_on(st);
|
|
451 }
|
|
452 }
|
|
453
|
|
454 // Extra way to dump a jvms from the debugger,
|
|
455 // to avoid a bug with C++ member function calls.
|
|
456 void dump_jvms(JVMState* jvms) {
|
|
457 jvms->dump();
|
|
458 }
|
|
459 #endif
|
|
460
|
|
461 //--------------------------clone_shallow--------------------------------------
|
|
462 JVMState* JVMState::clone_shallow(Compile* C) const {
|
|
463 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0);
|
|
464 n->set_bci(_bci);
|
|
465 n->set_locoff(_locoff);
|
|
466 n->set_stkoff(_stkoff);
|
|
467 n->set_monoff(_monoff);
|
|
468 n->set_endoff(_endoff);
|
|
469 n->set_sp(_sp);
|
|
470 n->set_map(_map);
|
|
471 return n;
|
|
472 }
|
|
473
|
|
474 //---------------------------clone_deep----------------------------------------
|
|
475 JVMState* JVMState::clone_deep(Compile* C) const {
|
|
476 JVMState* n = clone_shallow(C);
|
|
477 for (JVMState* p = n; p->_caller != NULL; p = p->_caller) {
|
|
478 p->_caller = p->_caller->clone_shallow(C);
|
|
479 }
|
|
480 assert(n->depth() == depth(), "sanity");
|
|
481 assert(n->debug_depth() == debug_depth(), "sanity");
|
|
482 return n;
|
|
483 }
|
|
484
|
|
485 //=============================================================================
|
|
486 uint CallNode::cmp( const Node &n ) const
|
|
487 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
|
|
488 #ifndef PRODUCT
|
|
489 void CallNode::dump_req() const {
|
|
490 // Dump the required inputs, enclosed in '(' and ')'
|
|
491 uint i; // Exit value of loop
|
|
492 for( i=0; i<req(); i++ ) { // For all required inputs
|
|
493 if( i == TypeFunc::Parms ) tty->print("(");
|
|
494 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
|
|
495 else tty->print("_ ");
|
|
496 }
|
|
497 tty->print(")");
|
|
498 }
|
|
499
|
|
500 void CallNode::dump_spec(outputStream *st) const {
|
|
501 st->print(" ");
|
|
502 tf()->dump_on(st);
|
|
503 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
|
|
504 if (jvms() != NULL) jvms()->dump_spec(st);
|
|
505 }
|
|
506 #endif
|
|
507
|
|
508 const Type *CallNode::bottom_type() const { return tf()->range(); }
|
|
509 const Type *CallNode::Value(PhaseTransform *phase) const {
|
|
510 if (phase->type(in(0)) == Type::TOP) return Type::TOP;
|
|
511 return tf()->range();
|
|
512 }
|
|
513
|
|
514 //------------------------------calling_convention-----------------------------
|
|
515 void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
|
|
516 // Use the standard compiler calling convention
|
|
517 Matcher::calling_convention( sig_bt, parm_regs, argcnt, true );
|
|
518 }
|
|
519
|
|
520
|
|
521 //------------------------------match------------------------------------------
|
|
522 // Construct projections for control, I/O, memory-fields, ..., and
|
|
523 // return result(s) along with their RegMask info
|
|
524 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
|
|
525 switch (proj->_con) {
|
|
526 case TypeFunc::Control:
|
|
527 case TypeFunc::I_O:
|
|
528 case TypeFunc::Memory:
|
|
529 return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
|
|
530
|
|
531 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
|
|
532 assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, "");
|
|
533 // 2nd half of doubles and longs
|
|
534 return new (match->C, 1) MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
|
|
535
|
|
536 case TypeFunc::Parms: { // Normal returns
|
|
537 uint ideal_reg = Matcher::base2reg[tf()->range()->field_at(TypeFunc::Parms)->base()];
|
|
538 OptoRegPair regs = is_CallRuntime()
|
|
539 ? match->c_return_value(ideal_reg,true) // Calls into C runtime
|
|
540 : match-> return_value(ideal_reg,true); // Calls into compiled Java code
|
|
541 RegMask rm = RegMask(regs.first());
|
|
542 if( OptoReg::is_valid(regs.second()) )
|
|
543 rm.Insert( regs.second() );
|
|
544 return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg);
|
|
545 }
|
|
546
|
|
547 case TypeFunc::ReturnAdr:
|
|
548 case TypeFunc::FramePtr:
|
|
549 default:
|
|
550 ShouldNotReachHere();
|
|
551 }
|
|
552 return NULL;
|
|
553 }
|
|
554
|
|
555 // Do we Match on this edge index or not? Match no edges
|
|
556 uint CallNode::match_edge(uint idx) const {
|
|
557 return 0;
|
|
558 }
|
|
559
|
|
560 //=============================================================================
|
|
561 uint CallJavaNode::size_of() const { return sizeof(*this); }
|
|
562 uint CallJavaNode::cmp( const Node &n ) const {
|
|
563 CallJavaNode &call = (CallJavaNode&)n;
|
|
564 return CallNode::cmp(call) && _method == call._method;
|
|
565 }
|
|
566 #ifndef PRODUCT
|
|
567 void CallJavaNode::dump_spec(outputStream *st) const {
|
|
568 if( _method ) _method->print_short_name(st);
|
|
569 CallNode::dump_spec(st);
|
|
570 }
|
|
571 #endif
|
|
572
|
|
573 //=============================================================================
|
|
574 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
|
|
575 uint CallStaticJavaNode::cmp( const Node &n ) const {
|
|
576 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
|
|
577 return CallJavaNode::cmp(call);
|
|
578 }
|
|
579
|
|
580 //----------------------------uncommon_trap_request----------------------------
|
|
581 // If this is an uncommon trap, return the request code, else zero.
|
|
582 int CallStaticJavaNode::uncommon_trap_request() const {
|
|
583 if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
|
|
584 return extract_uncommon_trap_request(this);
|
|
585 }
|
|
586 return 0;
|
|
587 }
|
|
588 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
|
|
589 #ifndef PRODUCT
|
|
590 if (!(call->req() > TypeFunc::Parms &&
|
|
591 call->in(TypeFunc::Parms) != NULL &&
|
|
592 call->in(TypeFunc::Parms)->is_Con())) {
|
|
593 assert(_in_dump_cnt != 0, "OK if dumping");
|
|
594 tty->print("[bad uncommon trap]");
|
|
595 return 0;
|
|
596 }
|
|
597 #endif
|
|
598 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
|
|
599 }
|
|
600
|
|
601 #ifndef PRODUCT
|
|
602 void CallStaticJavaNode::dump_spec(outputStream *st) const {
|
|
603 st->print("# Static ");
|
|
604 if (_name != NULL) {
|
|
605 st->print("%s", _name);
|
|
606 int trap_req = uncommon_trap_request();
|
|
607 if (trap_req != 0) {
|
|
608 char buf[100];
|
|
609 st->print("(%s)",
|
|
610 Deoptimization::format_trap_request(buf, sizeof(buf),
|
|
611 trap_req));
|
|
612 }
|
|
613 st->print(" ");
|
|
614 }
|
|
615 CallJavaNode::dump_spec(st);
|
|
616 }
|
|
617 #endif
|
|
618
|
|
619 //=============================================================================
|
|
620 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); }
|
|
621 uint CallDynamicJavaNode::cmp( const Node &n ) const {
|
|
622 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n;
|
|
623 return CallJavaNode::cmp(call);
|
|
624 }
|
|
625 #ifndef PRODUCT
|
|
626 void CallDynamicJavaNode::dump_spec(outputStream *st) const {
|
|
627 st->print("# Dynamic ");
|
|
628 CallJavaNode::dump_spec(st);
|
|
629 }
|
|
630 #endif
|
|
631
|
|
632 //=============================================================================
|
|
633 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
|
|
634 uint CallRuntimeNode::cmp( const Node &n ) const {
|
|
635 CallRuntimeNode &call = (CallRuntimeNode&)n;
|
|
636 return CallNode::cmp(call) && !strcmp(_name,call._name);
|
|
637 }
|
|
638 #ifndef PRODUCT
|
|
639 void CallRuntimeNode::dump_spec(outputStream *st) const {
|
|
640 st->print("# ");
|
|
641 st->print(_name);
|
|
642 CallNode::dump_spec(st);
|
|
643 }
|
|
644 #endif
|
|
645
|
|
646 //------------------------------calling_convention-----------------------------
|
|
647 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
|
|
648 Matcher::c_calling_convention( sig_bt, parm_regs, argcnt );
|
|
649 }
|
|
650
|
|
651 //=============================================================================
|
|
652 //------------------------------calling_convention-----------------------------
|
|
653
|
|
654
|
|
655 //=============================================================================
|
|
656 #ifndef PRODUCT
|
|
657 void CallLeafNode::dump_spec(outputStream *st) const {
|
|
658 st->print("# ");
|
|
659 st->print(_name);
|
|
660 CallNode::dump_spec(st);
|
|
661 }
|
|
662 #endif
|
|
663
|
|
664 //=============================================================================
|
|
665
|
|
666 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
|
|
667 assert(verify_jvms(jvms), "jvms must match");
|
|
668 int loc = jvms->locoff() + idx;
|
|
669 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
|
|
670 // If current local idx is top then local idx - 1 could
|
|
671 // be a long/double that needs to be killed since top could
|
|
672 // represent the 2nd half ofthe long/double.
|
|
673 uint ideal = in(loc -1)->ideal_reg();
|
|
674 if (ideal == Op_RegD || ideal == Op_RegL) {
|
|
675 // set other (low index) half to top
|
|
676 set_req(loc - 1, in(loc));
|
|
677 }
|
|
678 }
|
|
679 set_req(loc, c);
|
|
680 }
|
|
681
|
|
682 uint SafePointNode::size_of() const { return sizeof(*this); }
|
|
683 uint SafePointNode::cmp( const Node &n ) const {
|
|
684 return (&n == this); // Always fail except on self
|
|
685 }
|
|
686
|
|
687 //-------------------------set_next_exception----------------------------------
|
|
688 void SafePointNode::set_next_exception(SafePointNode* n) {
|
|
689 assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception");
|
|
690 if (len() == req()) {
|
|
691 if (n != NULL) add_prec(n);
|
|
692 } else {
|
|
693 set_prec(req(), n);
|
|
694 }
|
|
695 }
|
|
696
|
|
697
|
|
698 //----------------------------next_exception-----------------------------------
|
|
699 SafePointNode* SafePointNode::next_exception() const {
|
|
700 if (len() == req()) {
|
|
701 return NULL;
|
|
702 } else {
|
|
703 Node* n = in(req());
|
|
704 assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
|
|
705 return (SafePointNode*) n;
|
|
706 }
|
|
707 }
|
|
708
|
|
709
|
|
710 //------------------------------Ideal------------------------------------------
|
|
711 // Skip over any collapsed Regions
|
|
712 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|
713 if (remove_dead_region(phase, can_reshape)) return this;
|
|
714
|
|
715 return NULL;
|
|
716 }
|
|
717
|
|
718 //------------------------------Identity---------------------------------------
|
|
719 // Remove obviously duplicate safepoints
|
|
720 Node *SafePointNode::Identity( PhaseTransform *phase ) {
|
|
721
|
|
722 // If you have back to back safepoints, remove one
|
|
723 if( in(TypeFunc::Control)->is_SafePoint() )
|
|
724 return in(TypeFunc::Control);
|
|
725
|
|
726 if( in(0)->is_Proj() ) {
|
|
727 Node *n0 = in(0)->in(0);
|
|
728 // Check if he is a call projection (except Leaf Call)
|
|
729 if( n0->is_Catch() ) {
|
|
730 n0 = n0->in(0)->in(0);
|
|
731 assert( n0->is_Call(), "expect a call here" );
|
|
732 }
|
|
733 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) {
|
|
734 // Useless Safepoint, so remove it
|
|
735 return in(TypeFunc::Control);
|
|
736 }
|
|
737 }
|
|
738
|
|
739 return this;
|
|
740 }
|
|
741
|
|
742 //------------------------------Value------------------------------------------
|
|
743 const Type *SafePointNode::Value( PhaseTransform *phase ) const {
|
|
744 if( phase->type(in(0)) == Type::TOP ) return Type::TOP;
|
|
745 if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop
|
|
746 return Type::CONTROL;
|
|
747 }
|
|
748
|
|
749 #ifndef PRODUCT
|
|
750 void SafePointNode::dump_spec(outputStream *st) const {
|
|
751 st->print(" SafePoint ");
|
|
752 }
|
|
753 #endif
|
|
754
|
|
755 const RegMask &SafePointNode::in_RegMask(uint idx) const {
|
|
756 if( idx < TypeFunc::Parms ) return RegMask::Empty;
|
|
757 // Values outside the domain represent debug info
|
|
758 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
|
|
759 }
|
|
760 const RegMask &SafePointNode::out_RegMask() const {
|
|
761 return RegMask::Empty;
|
|
762 }
|
|
763
|
|
764
|
|
765 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
|
|
766 assert((int)grow_by > 0, "sanity");
|
|
767 int monoff = jvms->monoff();
|
|
768 int endoff = jvms->endoff();
|
|
769 assert(endoff == (int)req(), "no other states or debug info after me");
|
|
770 Node* top = Compile::current()->top();
|
|
771 for (uint i = 0; i < grow_by; i++) {
|
|
772 ins_req(monoff, top);
|
|
773 }
|
|
774 jvms->set_monoff(monoff + grow_by);
|
|
775 jvms->set_endoff(endoff + grow_by);
|
|
776 }
|
|
777
|
|
778 void SafePointNode::push_monitor(const FastLockNode *lock) {
|
|
779 // Add a LockNode, which points to both the original BoxLockNode (the
|
|
780 // stack space for the monitor) and the Object being locked.
|
|
781 const int MonitorEdges = 2;
|
|
782 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
|
|
783 assert(req() == jvms()->endoff(), "correct sizing");
|
|
784 if (GenerateSynchronizationCode) {
|
|
785 add_req(lock->box_node());
|
|
786 add_req(lock->obj_node());
|
|
787 } else {
|
|
788 add_req(NULL);
|
|
789 add_req(NULL);
|
|
790 }
|
|
791 jvms()->set_endoff(req());
|
|
792 }
|
|
793
|
|
794 void SafePointNode::pop_monitor() {
|
|
795 // Delete last monitor from debug info
|
|
796 debug_only(int num_before_pop = jvms()->nof_monitors());
|
|
797 const int MonitorEdges = (1<<JVMState::logMonitorEdges);
|
|
798 int endoff = jvms()->endoff();
|
|
799 int new_endoff = endoff - MonitorEdges;
|
|
800 jvms()->set_endoff(new_endoff);
|
|
801 while (endoff > new_endoff) del_req(--endoff);
|
|
802 assert(jvms()->nof_monitors() == num_before_pop-1, "");
|
|
803 }
|
|
804
|
|
805 Node *SafePointNode::peek_monitor_box() const {
|
|
806 int mon = jvms()->nof_monitors() - 1;
|
|
807 assert(mon >= 0, "most have a monitor");
|
|
808 return monitor_box(jvms(), mon);
|
|
809 }
|
|
810
|
|
811 Node *SafePointNode::peek_monitor_obj() const {
|
|
812 int mon = jvms()->nof_monitors() - 1;
|
|
813 assert(mon >= 0, "most have a monitor");
|
|
814 return monitor_obj(jvms(), mon);
|
|
815 }
|
|
816
|
|
817 // Do we Match on this edge index or not? Match no edges
|
|
818 uint SafePointNode::match_edge(uint idx) const {
|
|
819 if( !needs_polling_address_input() )
|
|
820 return 0;
|
|
821
|
|
822 return (TypeFunc::Parms == idx);
|
|
823 }
|
|
824
|
|
825 //=============================================================================
|
|
826 uint AllocateNode::size_of() const { return sizeof(*this); }
|
|
827
|
|
828 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
|
|
829 Node *ctrl, Node *mem, Node *abio,
|
|
830 Node *size, Node *klass_node, Node *initial_test)
|
|
831 : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
|
|
832 {
|
|
833 init_class_id(Class_Allocate);
|
|
834 init_flags(Flag_is_macro);
|
|
835 Node *topnode = C->top();
|
|
836
|
|
837 init_req( TypeFunc::Control , ctrl );
|
|
838 init_req( TypeFunc::I_O , abio );
|
|
839 init_req( TypeFunc::Memory , mem );
|
|
840 init_req( TypeFunc::ReturnAdr, topnode );
|
|
841 init_req( TypeFunc::FramePtr , topnode );
|
|
842 init_req( AllocSize , size);
|
|
843 init_req( KlassNode , klass_node);
|
|
844 init_req( InitialTest , initial_test);
|
|
845 init_req( ALength , topnode);
|
|
846 C->add_macro_node(this);
|
|
847 }
|
|
848
|
|
849 //=============================================================================
|
|
850 uint AllocateArrayNode::size_of() const { return sizeof(*this); }
|
|
851
|
|
852 //=============================================================================
|
|
853 uint LockNode::size_of() const { return sizeof(*this); }
|
|
854
|
|
855 // Redundant lock elimination
|
|
856 //
|
|
857 // There are various patterns of locking where we release and
|
|
858 // immediately reacquire a lock in a piece of code where no operations
|
|
859 // occur in between that would be observable. In those cases we can
|
|
860 // skip releasing and reacquiring the lock without violating any
|
|
861 // fairness requirements. Doing this around a loop could cause a lock
|
|
862 // to be held for a very long time so we concentrate on non-looping
|
|
863 // control flow. We also require that the operations are fully
|
|
864 // redundant meaning that we don't introduce new lock operations on
|
|
865 // some paths so to be able to eliminate it on others ala PRE. This
|
|
866 // would probably require some more extensive graph manipulation to
|
|
867 // guarantee that the memory edges were all handled correctly.
|
|
868 //
|
|
869 // Assuming p is a simple predicate which can't trap in any way and s
|
|
870 // is a synchronized method consider this code:
|
|
871 //
|
|
872 // s();
|
|
873 // if (p)
|
|
874 // s();
|
|
875 // else
|
|
876 // s();
|
|
877 // s();
|
|
878 //
|
|
879 // 1. The unlocks of the first call to s can be eliminated if the
|
|
880 // locks inside the then and else branches are eliminated.
|
|
881 //
|
|
882 // 2. The unlocks of the then and else branches can be eliminated if
|
|
883 // the lock of the final call to s is eliminated.
|
|
884 //
|
|
885 // Either of these cases subsumes the simple case of sequential control flow
|
|
886 //
|
|
887 // Addtionally we can eliminate versions without the else case:
|
|
888 //
|
|
889 // s();
|
|
890 // if (p)
|
|
891 // s();
|
|
892 // s();
|
|
893 //
|
|
894 // 3. In this case we eliminate the unlock of the first s, the lock
|
|
895 // and unlock in the then case and the lock in the final s.
|
|
896 //
|
|
897 // Note also that in all these cases the then/else pieces don't have
|
|
898 // to be trivial as long as they begin and end with synchronization
|
|
899 // operations.
|
|
900 //
|
|
901 // s();
|
|
902 // if (p)
|
|
903 // s();
|
|
904 // f();
|
|
905 // s();
|
|
906 // s();
|
|
907 //
|
|
908 // The code will work properly for this case, leaving in the unlock
|
|
909 // before the call to f and the relock after it.
|
|
910 //
|
|
911 // A potentially interesting case which isn't handled here is when the
|
|
912 // locking is partially redundant.
|
|
913 //
|
|
914 // s();
|
|
915 // if (p)
|
|
916 // s();
|
|
917 //
|
|
918 // This could be eliminated putting unlocking on the else case and
|
|
919 // eliminating the first unlock and the lock in the then side.
|
|
920 // Alternatively the unlock could be moved out of the then side so it
|
|
921 // was after the merge and the first unlock and second lock
|
|
922 // eliminated. This might require less manipulation of the memory
|
|
923 // state to get correct.
|
|
924 //
|
|
925 // Additionally we might allow work between a unlock and lock before
|
|
926 // giving up eliminating the locks. The current code disallows any
|
|
927 // conditional control flow between these operations. A formulation
|
|
928 // similar to partial redundancy elimination computing the
|
|
929 // availability of unlocking and the anticipatability of locking at a
|
|
930 // program point would allow detection of fully redundant locking with
|
|
931 // some amount of work in between. I'm not sure how often I really
|
|
932 // think that would occur though. Most of the cases I've seen
|
|
933 // indicate it's likely non-trivial work would occur in between.
|
|
934 // There may be other more complicated constructs where we could
|
|
935 // eliminate locking but I haven't seen any others appear as hot or
|
|
936 // interesting.
|
|
937 //
|
|
938 // Locking and unlocking have a canonical form in ideal that looks
|
|
939 // roughly like this:
|
|
940 //
|
|
941 // <obj>
|
|
942 // | \\------+
|
|
943 // | \ \
|
|
944 // | BoxLock \
|
|
945 // | | | \
|
|
946 // | | \ \
|
|
947 // | | FastLock
|
|
948 // | | /
|
|
949 // | | /
|
|
950 // | | |
|
|
951 //
|
|
952 // Lock
|
|
953 // |
|
|
954 // Proj #0
|
|
955 // |
|
|
956 // MembarAcquire
|
|
957 // |
|
|
958 // Proj #0
|
|
959 //
|
|
960 // MembarRelease
|
|
961 // |
|
|
962 // Proj #0
|
|
963 // |
|
|
964 // Unlock
|
|
965 // |
|
|
966 // Proj #0
|
|
967 //
|
|
968 //
|
|
969 // This code proceeds by processing Lock nodes during PhaseIterGVN
|
|
970 // and searching back through its control for the proper code
|
|
971 // patterns. Once it finds a set of lock and unlock operations to
|
|
972 // eliminate they are marked as eliminatable which causes the
|
|
973 // expansion of the Lock and Unlock macro nodes to make the operation a NOP
|
|
974 //
|
|
975 //=============================================================================
|
|
976
|
|
977 //
|
|
978 // Utility function to skip over uninteresting control nodes. Nodes skipped are:
|
|
979 // - copy regions. (These may not have been optimized away yet.)
|
|
980 // - eliminated locking nodes
|
|
981 //
|
|
982 static Node *next_control(Node *ctrl) {
|
|
983 if (ctrl == NULL)
|
|
984 return NULL;
|
|
985 while (1) {
|
|
986 if (ctrl->is_Region()) {
|
|
987 RegionNode *r = ctrl->as_Region();
|
|
988 Node *n = r->is_copy();
|
|
989 if (n == NULL)
|
|
990 break; // hit a region, return it
|
|
991 else
|
|
992 ctrl = n;
|
|
993 } else if (ctrl->is_Proj()) {
|
|
994 Node *in0 = ctrl->in(0);
|
|
995 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) {
|
|
996 ctrl = in0->in(0);
|
|
997 } else {
|
|
998 break;
|
|
999 }
|
|
1000 } else {
|
|
1001 break; // found an interesting control
|
|
1002 }
|
|
1003 }
|
|
1004 return ctrl;
|
|
1005 }
|
|
1006 //
|
|
1007 // Given a control, see if it's the control projection of an Unlock which
|
|
1008 // operating on the same object as lock.
|
|
1009 //
|
|
1010 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
|
|
1011 GrowableArray<AbstractLockNode*> &lock_ops) {
|
|
1012 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL;
|
|
1013 if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) {
|
|
1014 Node *n = ctrl_proj->in(0);
|
|
1015 if (n != NULL && n->is_Unlock()) {
|
|
1016 UnlockNode *unlock = n->as_Unlock();
|
|
1017 if ((lock->obj_node() == unlock->obj_node()) &&
|
|
1018 (lock->box_node() == unlock->box_node()) && !unlock->is_eliminated()) {
|
|
1019 lock_ops.append(unlock);
|
|
1020 return true;
|
|
1021 }
|
|
1022 }
|
|
1023 }
|
|
1024 return false;
|
|
1025 }
|
|
1026
|
|
1027 //
|
|
1028 // Find the lock matching an unlock. Returns null if a safepoint
|
|
1029 // or complicated control is encountered first.
|
|
1030 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
|
|
1031 LockNode *lock_result = NULL;
|
|
1032 // find the matching lock, or an intervening safepoint
|
|
1033 Node *ctrl = next_control(unlock->in(0));
|
|
1034 while (1) {
|
|
1035 assert(ctrl != NULL, "invalid control graph");
|
|
1036 assert(!ctrl->is_Start(), "missing lock for unlock");
|
|
1037 if (ctrl->is_top()) break; // dead control path
|
|
1038 if (ctrl->is_Proj()) ctrl = ctrl->in(0);
|
|
1039 if (ctrl->is_SafePoint()) {
|
|
1040 break; // found a safepoint (may be the lock we are searching for)
|
|
1041 } else if (ctrl->is_Region()) {
|
|
1042 // Check for a simple diamond pattern. Punt on anything more complicated
|
|
1043 if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) {
|
|
1044 Node *in1 = next_control(ctrl->in(1));
|
|
1045 Node *in2 = next_control(ctrl->in(2));
|
|
1046 if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
|
|
1047 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
|
|
1048 ctrl = next_control(in1->in(0)->in(0));
|
|
1049 } else {
|
|
1050 break;
|
|
1051 }
|
|
1052 } else {
|
|
1053 break;
|
|
1054 }
|
|
1055 } else {
|
|
1056 ctrl = next_control(ctrl->in(0)); // keep searching
|
|
1057 }
|
|
1058 }
|
|
1059 if (ctrl->is_Lock()) {
|
|
1060 LockNode *lock = ctrl->as_Lock();
|
|
1061 if ((lock->obj_node() == unlock->obj_node()) &&
|
|
1062 (lock->box_node() == unlock->box_node())) {
|
|
1063 lock_result = lock;
|
|
1064 }
|
|
1065 }
|
|
1066 return lock_result;
|
|
1067 }
|
|
1068
|
|
1069 // This code corresponds to case 3 above.
|
|
1070
|
|
1071 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
|
|
1072 GrowableArray<AbstractLockNode*> &lock_ops) {
|
|
1073 Node* if_node = node->in(0);
|
|
1074 bool if_true = node->is_IfTrue();
|
|
1075
|
|
1076 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
|
|
1077 Node *lock_ctrl = next_control(if_node->in(0));
|
|
1078 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
|
|
1079 Node* lock1_node = NULL;
|
|
1080 ProjNode* proj = if_node->as_If()->proj_out(!if_true);
|
|
1081 if (if_true) {
|
|
1082 if (proj->is_IfFalse() && proj->outcnt() == 1) {
|
|
1083 lock1_node = proj->unique_out();
|
|
1084 }
|
|
1085 } else {
|
|
1086 if (proj->is_IfTrue() && proj->outcnt() == 1) {
|
|
1087 lock1_node = proj->unique_out();
|
|
1088 }
|
|
1089 }
|
|
1090 if (lock1_node != NULL && lock1_node->is_Lock()) {
|
|
1091 LockNode *lock1 = lock1_node->as_Lock();
|
|
1092 if ((lock->obj_node() == lock1->obj_node()) &&
|
|
1093 (lock->box_node() == lock1->box_node()) && !lock1->is_eliminated()) {
|
|
1094 lock_ops.append(lock1);
|
|
1095 return true;
|
|
1096 }
|
|
1097 }
|
|
1098 }
|
|
1099 }
|
|
1100
|
|
1101 lock_ops.trunc_to(0);
|
|
1102 return false;
|
|
1103 }
|
|
1104
|
|
1105 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
|
|
1106 GrowableArray<AbstractLockNode*> &lock_ops) {
|
|
1107 // check each control merging at this point for a matching unlock.
|
|
1108 // in(0) should be self edge so skip it.
|
|
1109 for (int i = 1; i < (int)region->req(); i++) {
|
|
1110 Node *in_node = next_control(region->in(i));
|
|
1111 if (in_node != NULL) {
|
|
1112 if (find_matching_unlock(in_node, lock, lock_ops)) {
|
|
1113 // found a match so keep on checking.
|
|
1114 continue;
|
|
1115 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) {
|
|
1116 continue;
|
|
1117 }
|
|
1118
|
|
1119 // If we fall through to here then it was some kind of node we
|
|
1120 // don't understand or there wasn't a matching unlock, so give
|
|
1121 // up trying to merge locks.
|
|
1122 lock_ops.trunc_to(0);
|
|
1123 return false;
|
|
1124 }
|
|
1125 }
|
|
1126 return true;
|
|
1127
|
|
1128 }
|
|
1129
|
|
1130 #ifndef PRODUCT
|
|
1131 //
|
|
1132 // Create a counter which counts the number of times this lock is acquired
|
|
1133 //
|
|
1134 void AbstractLockNode::create_lock_counter(JVMState* state) {
|
|
1135 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter);
|
|
1136 }
|
|
1137 #endif
|
|
1138
|
|
1139 void AbstractLockNode::set_eliminated() {
|
|
1140 _eliminate = true;
|
|
1141 #ifndef PRODUCT
|
|
1142 if (_counter) {
|
|
1143 // Update the counter to indicate that this lock was eliminated.
|
|
1144 // The counter update code will stay around even though the
|
|
1145 // optimizer will eliminate the lock operation itself.
|
|
1146 _counter->set_tag(NamedCounter::EliminatedLockCounter);
|
|
1147 }
|
|
1148 #endif
|
|
1149 }
|
|
1150
|
|
1151 //=============================================================================
|
|
1152 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|
1153
|
|
1154 // perform any generic optimizations first
|
|
1155 Node *result = SafePointNode::Ideal(phase, can_reshape);
|
|
1156
|
|
1157 // Now see if we can optimize away this lock. We don't actually
|
|
1158 // remove the locking here, we simply set the _eliminate flag which
|
|
1159 // prevents macro expansion from expanding the lock. Since we don't
|
|
1160 // modify the graph, the value returned from this function is the
|
|
1161 // one computed above.
|
|
1162 if (EliminateLocks && !is_eliminated()) {
|
|
1163 //
|
|
1164 // Try lock coarsening
|
|
1165 //
|
|
1166 PhaseIterGVN* iter = phase->is_IterGVN();
|
|
1167 if (iter != NULL) {
|
|
1168
|
|
1169 GrowableArray<AbstractLockNode*> lock_ops;
|
|
1170
|
|
1171 Node *ctrl = next_control(in(0));
|
|
1172
|
|
1173 // now search back for a matching Unlock
|
|
1174 if (find_matching_unlock(ctrl, this, lock_ops)) {
|
|
1175 // found an unlock directly preceding this lock. This is the
|
|
1176 // case of single unlock directly control dependent on a
|
|
1177 // single lock which is the trivial version of case 1 or 2.
|
|
1178 } else if (ctrl->is_Region() ) {
|
|
1179 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) {
|
|
1180 // found lock preceded by multiple unlocks along all paths
|
|
1181 // joining at this point which is case 3 in description above.
|
|
1182 }
|
|
1183 } else {
|
|
1184 // see if this lock comes from either half of an if and the
|
|
1185 // predecessors merges unlocks and the other half of the if
|
|
1186 // performs a lock.
|
|
1187 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) {
|
|
1188 // found unlock splitting to an if with locks on both branches.
|
|
1189 }
|
|
1190 }
|
|
1191
|
|
1192 if (lock_ops.length() > 0) {
|
|
1193 // add ourselves to the list of locks to be eliminated.
|
|
1194 lock_ops.append(this);
|
|
1195
|
|
1196 #ifndef PRODUCT
|
|
1197 if (PrintEliminateLocks) {
|
|
1198 int locks = 0;
|
|
1199 int unlocks = 0;
|
|
1200 for (int i = 0; i < lock_ops.length(); i++) {
|
|
1201 AbstractLockNode* lock = lock_ops.at(i);
|
|
1202 if (lock->Opcode() == Op_Lock) locks++;
|
|
1203 else unlocks++;
|
|
1204 if (Verbose) {
|
|
1205 lock->dump(1);
|
|
1206 }
|
|
1207 }
|
|
1208 tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks);
|
|
1209 }
|
|
1210 #endif
|
|
1211
|
|
1212 // for each of the identified locks, mark them
|
|
1213 // as eliminatable
|
|
1214 for (int i = 0; i < lock_ops.length(); i++) {
|
|
1215 AbstractLockNode* lock = lock_ops.at(i);
|
|
1216
|
|
1217 // Mark it eliminated to update any counters
|
|
1218 lock->set_eliminated();
|
|
1219 }
|
|
1220 } else if (result != NULL && ctrl->is_Region() &&
|
|
1221 iter->_worklist.member(ctrl)) {
|
|
1222 // We weren't able to find any opportunities but the region this
|
|
1223 // lock is control dependent on hasn't been processed yet so put
|
|
1224 // this lock back on the worklist so we can check again once any
|
|
1225 // region simplification has occurred.
|
|
1226 iter->_worklist.push(this);
|
|
1227 }
|
|
1228 }
|
|
1229 }
|
|
1230
|
|
1231 return result;
|
|
1232 }
|
|
1233
|
|
1234 //=============================================================================
|
|
1235 uint UnlockNode::size_of() const { return sizeof(*this); }
|
|
1236
|
|
1237 //=============================================================================
|
|
1238 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|
1239
|
|
1240 // perform any generic optimizations first
|
|
1241 Node * result = SafePointNode::Ideal(phase, can_reshape);
|
|
1242
|
|
1243 // Now see if we can optimize away this unlock. We don't actually
|
|
1244 // remove the unlocking here, we simply set the _eliminate flag which
|
|
1245 // prevents macro expansion from expanding the unlock. Since we don't
|
|
1246 // modify the graph, the value returned from this function is the
|
|
1247 // one computed above.
|
|
1248 if (EliminateLocks && !is_eliminated()) {
|
|
1249 //
|
|
1250 // If we are unlocking an unescaped object, the lock/unlock is unnecessary
|
|
1251 // We can eliminate them if there are no safepoints in the locked region.
|
|
1252 //
|
|
1253 ConnectionGraph *cgr = Compile::current()->congraph();
|
|
1254 if (cgr != NULL && cgr->escape_state(obj_node(), phase) == PointsToNode::NoEscape) {
|
|
1255 GrowableArray<AbstractLockNode*> lock_ops;
|
|
1256 LockNode *lock = find_matching_lock(this);
|
|
1257 if (lock != NULL) {
|
|
1258 lock_ops.append(this);
|
|
1259 lock_ops.append(lock);
|
|
1260 // find other unlocks which pair with the lock we found and add them
|
|
1261 // to the list
|
|
1262 Node * box = box_node();
|
|
1263
|
|
1264 for (DUIterator_Fast imax, i = box->fast_outs(imax); i < imax; i++) {
|
|
1265 Node *use = box->fast_out(i);
|
|
1266 if (use->is_Unlock() && use != this) {
|
|
1267 UnlockNode *unlock1 = use->as_Unlock();
|
|
1268 if (!unlock1->is_eliminated()) {
|
|
1269 LockNode *lock1 = find_matching_lock(unlock1);
|
|
1270 if (lock == lock1)
|
|
1271 lock_ops.append(unlock1);
|
|
1272 else if (lock1 == NULL) {
|
|
1273 // we can't find a matching lock, we must assume the worst
|
|
1274 lock_ops.trunc_to(0);
|
|
1275 break;
|
|
1276 }
|
|
1277 }
|
|
1278 }
|
|
1279 }
|
|
1280 if (lock_ops.length() > 0) {
|
|
1281
|
|
1282 #ifndef PRODUCT
|
|
1283 if (PrintEliminateLocks) {
|
|
1284 int locks = 0;
|
|
1285 int unlocks = 0;
|
|
1286 for (int i = 0; i < lock_ops.length(); i++) {
|
|
1287 AbstractLockNode* lock = lock_ops.at(i);
|
|
1288 if (lock->Opcode() == Op_Lock) locks++;
|
|
1289 else unlocks++;
|
|
1290 if (Verbose) {
|
|
1291 lock->dump(1);
|
|
1292 }
|
|
1293 }
|
|
1294 tty->print_cr("***Eliminated %d unescaped unlocks and %d unescaped locks", unlocks, locks);
|
|
1295 }
|
|
1296 #endif
|
|
1297
|
|
1298 // for each of the identified locks, mark them
|
|
1299 // as eliminatable
|
|
1300 for (int i = 0; i < lock_ops.length(); i++) {
|
|
1301 AbstractLockNode* lock = lock_ops.at(i);
|
|
1302
|
|
1303 // Mark it eliminated to update any counters
|
|
1304 lock->set_eliminated();
|
|
1305 }
|
|
1306 }
|
|
1307 }
|
|
1308 }
|
|
1309 }
|
|
1310 return result;
|
|
1311 }
|