Mercurial > hg > truffle
annotate src/share/vm/opto/parse1.cpp @ 452:00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
Summary: When we encounter marking stack overflow during precleaning of Reference lists, we were using the overflow list mechanism, which can cause problems on account of mutating the mark word of the header because of conflicts with mutator accesses and updates of that field. Instead we should use the usual mechanism for overflow handling in concurrent phases, namely dirtying of the card on which the overflowed object lies. Since precleaning effectively does a form of discovered list processing, albeit with discovery enabled, we needed to adjust some code to be correct in the face of interleaved processing and discovery.
Reviewed-by: apetrusenko, jcoomes
author | ysr |
---|---|
date | Thu, 20 Nov 2008 12:27:41 -0800 |
parents | 194b8e3a2fc4 |
children | 98cb887364d3 |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_parse1.cpp.incl" | |
27 | |
28 // Static array so we can figure out which bytecodes stop us from compiling | |
29 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp | |
30 // and eventually should be encapsulated in a proper class (gri 8/18/98). | |
31 | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
32 int nodes_created = 0; |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
33 int methods_parsed = 0; |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
34 int methods_seen = 0; |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
35 int blocks_parsed = 0; |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
36 int blocks_seen = 0; |
0 | 37 |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
38 int explicit_null_checks_inserted = 0; |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
39 int explicit_null_checks_elided = 0; |
0 | 40 int all_null_checks_found = 0, implicit_null_checks = 0; |
41 int implicit_null_throws = 0; | |
42 | |
43 int reclaim_idx = 0; | |
44 int reclaim_in = 0; | |
45 int reclaim_node = 0; | |
46 | |
47 #ifndef PRODUCT | |
48 bool Parse::BytecodeParseHistogram::_initialized = false; | |
49 uint Parse::BytecodeParseHistogram::_bytecodes_parsed [Bytecodes::number_of_codes]; | |
50 uint Parse::BytecodeParseHistogram::_nodes_constructed[Bytecodes::number_of_codes]; | |
51 uint Parse::BytecodeParseHistogram::_nodes_transformed[Bytecodes::number_of_codes]; | |
52 uint Parse::BytecodeParseHistogram::_new_values [Bytecodes::number_of_codes]; | |
53 #endif | |
54 | |
55 //------------------------------print_statistics------------------------------- | |
56 #ifndef PRODUCT | |
57 void Parse::print_statistics() { | |
58 tty->print_cr("--- Compiler Statistics ---"); | |
59 tty->print("Methods seen: %d Methods parsed: %d", methods_seen, methods_parsed); | |
60 tty->print(" Nodes created: %d", nodes_created); | |
61 tty->cr(); | |
62 if (methods_seen != methods_parsed) | |
63 tty->print_cr("Reasons for parse failures (NOT cumulative):"); | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
64 tty->print_cr("Blocks parsed: %d Blocks seen: %d", blocks_parsed, blocks_seen); |
0 | 65 |
66 if( explicit_null_checks_inserted ) | |
67 tty->print_cr("%d original NULL checks - %d elided (%2d%%); optimizer leaves %d,", explicit_null_checks_inserted, explicit_null_checks_elided, (100*explicit_null_checks_elided)/explicit_null_checks_inserted, all_null_checks_found); | |
68 if( all_null_checks_found ) | |
69 tty->print_cr("%d made implicit (%2d%%)", implicit_null_checks, | |
70 (100*implicit_null_checks)/all_null_checks_found); | |
71 if( implicit_null_throws ) | |
72 tty->print_cr("%d implicit null exceptions at runtime", | |
73 implicit_null_throws); | |
74 | |
75 if( PrintParseStatistics && BytecodeParseHistogram::initialized() ) { | |
76 BytecodeParseHistogram::print(); | |
77 } | |
78 } | |
79 #endif | |
80 | |
81 //------------------------------ON STACK REPLACEMENT--------------------------- | |
82 | |
83 // Construct a node which can be used to get incoming state for | |
84 // on stack replacement. | |
85 Node *Parse::fetch_interpreter_state(int index, | |
86 BasicType bt, | |
87 Node *local_addrs, | |
88 Node *local_addrs_base) { | |
89 Node *mem = memory(Compile::AliasIdxRaw); | |
90 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize ); | |
91 | |
92 // Very similar to LoadNode::make, except we handle un-aligned longs and | |
93 // doubles on Sparc. Intel can handle them just fine directly. | |
94 Node *l; | |
95 switch( bt ) { // Signature is flattened | |
96 case T_INT: l = new (C, 3) LoadINode( 0, mem, adr, TypeRawPtr::BOTTOM ); break; | |
97 case T_FLOAT: l = new (C, 3) LoadFNode( 0, mem, adr, TypeRawPtr::BOTTOM ); break; | |
98 case T_ADDRESS: | |
99 case T_OBJECT: l = new (C, 3) LoadPNode( 0, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break; | |
100 case T_LONG: | |
101 case T_DOUBLE: { | |
102 // Since arguments are in reverse order, the argument address 'adr' | |
103 // refers to the back half of the long/double. Recompute adr. | |
104 adr = basic_plus_adr( local_addrs_base, local_addrs, -(index+1)*wordSize ); | |
105 if( Matcher::misaligned_doubles_ok ) { | |
106 l = (bt == T_DOUBLE) | |
107 ? (Node*)new (C, 3) LoadDNode( 0, mem, adr, TypeRawPtr::BOTTOM ) | |
108 : (Node*)new (C, 3) LoadLNode( 0, mem, adr, TypeRawPtr::BOTTOM ); | |
109 } else { | |
110 l = (bt == T_DOUBLE) | |
111 ? (Node*)new (C, 3) LoadD_unalignedNode( 0, mem, adr, TypeRawPtr::BOTTOM ) | |
112 : (Node*)new (C, 3) LoadL_unalignedNode( 0, mem, adr, TypeRawPtr::BOTTOM ); | |
113 } | |
114 break; | |
115 } | |
116 default: ShouldNotReachHere(); | |
117 } | |
118 return _gvn.transform(l); | |
119 } | |
120 | |
121 // Helper routine to prevent the interpreter from handing | |
122 // unexpected typestate to an OSR method. | |
123 // The Node l is a value newly dug out of the interpreter frame. | |
124 // The type is the type predicted by ciTypeFlow. Note that it is | |
125 // not a general type, but can only come from Type::get_typeflow_type. | |
126 // The safepoint is a map which will feed an uncommon trap. | |
127 Node* Parse::check_interpreter_type(Node* l, const Type* type, | |
128 SafePointNode* &bad_type_exit) { | |
129 | |
130 const TypeOopPtr* tp = type->isa_oopptr(); | |
131 | |
132 // TypeFlow may assert null-ness if a type appears unloaded. | |
133 if (type == TypePtr::NULL_PTR || | |
134 (tp != NULL && !tp->klass()->is_loaded())) { | |
135 // Value must be null, not a real oop. | |
136 Node* chk = _gvn.transform( new (C, 3) CmpPNode(l, null()) ); | |
137 Node* tst = _gvn.transform( new (C, 2) BoolNode(chk, BoolTest::eq) ); | |
138 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN); | |
139 set_control(_gvn.transform( new (C, 1) IfTrueNode(iff) )); | |
140 Node* bad_type = _gvn.transform( new (C, 1) IfFalseNode(iff) ); | |
141 bad_type_exit->control()->add_req(bad_type); | |
142 l = null(); | |
143 } | |
144 | |
145 // Typeflow can also cut off paths from the CFG, based on | |
146 // types which appear unloaded, or call sites which appear unlinked. | |
147 // When paths are cut off, values at later merge points can rise | |
148 // toward more specific classes. Make sure these specific classes | |
149 // are still in effect. | |
150 if (tp != NULL && tp->klass() != C->env()->Object_klass()) { | |
151 // TypeFlow asserted a specific object type. Value must have that type. | |
152 Node* bad_type_ctrl = NULL; | |
153 l = gen_checkcast(l, makecon(TypeKlassPtr::make(tp->klass())), &bad_type_ctrl); | |
154 bad_type_exit->control()->add_req(bad_type_ctrl); | |
155 } | |
156 | |
157 BasicType bt_l = _gvn.type(l)->basic_type(); | |
158 BasicType bt_t = type->basic_type(); | |
159 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate"); | |
160 return l; | |
161 } | |
162 | |
163 // Helper routine which sets up elements of the initial parser map when | |
164 // performing a parse for on stack replacement. Add values into map. | |
165 // The only parameter contains the address of a interpreter arguments. | |
166 void Parse::load_interpreter_state(Node* osr_buf) { | |
167 int index; | |
168 int max_locals = jvms()->loc_size(); | |
169 int max_stack = jvms()->stk_size(); | |
170 | |
171 | |
172 // Mismatch between method and jvms can occur since map briefly held | |
173 // an OSR entry state (which takes up one RawPtr word). | |
174 assert(max_locals == method()->max_locals(), "sanity"); | |
175 assert(max_stack >= method()->max_stack(), "sanity"); | |
176 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity"); | |
177 assert((int)jvms()->endoff() == (int)map()->req(), "sanity"); | |
178 | |
179 // Find the start block. | |
180 Block* osr_block = start_block(); | |
181 assert(osr_block->start() == osr_bci(), "sanity"); | |
182 | |
183 // Set initial BCI. | |
184 set_parse_bci(osr_block->start()); | |
185 | |
186 // Set initial stack depth. | |
187 set_sp(osr_block->start_sp()); | |
188 | |
189 // Check bailouts. We currently do not perform on stack replacement | |
190 // of loops in catch blocks or loops which branch with a non-empty stack. | |
191 if (sp() != 0) { | |
192 C->record_method_not_compilable("OSR starts with non-empty stack"); | |
193 return; | |
194 } | |
195 // Do not OSR inside finally clauses: | |
196 if (osr_block->has_trap_at(osr_block->start())) { | |
197 C->record_method_not_compilable("OSR starts with an immediate trap"); | |
198 return; | |
199 } | |
200 | |
201 // Commute monitors from interpreter frame to compiler frame. | |
202 assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr"); | |
203 int mcnt = osr_block->flow()->monitor_count(); | |
204 Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize); | |
205 for (index = 0; index < mcnt; index++) { | |
206 // Make a BoxLockNode for the monitor. | |
207 Node *box = _gvn.transform(new (C, 1) BoxLockNode(next_monitor())); | |
208 | |
209 | |
210 // Displaced headers and locked objects are interleaved in the | |
211 // temp OSR buffer. We only copy the locked objects out here. | |
212 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node. | |
213 Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf); | |
214 // Try and copy the displaced header to the BoxNode | |
215 Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf); | |
216 | |
217 | |
218 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw); | |
219 | |
220 // Build a bogus FastLockNode (no code will be generated) and push the | |
221 // monitor into our debug info. | |
222 const FastLockNode *flock = _gvn.transform(new (C, 3) FastLockNode( 0, lock_object, box ))->as_FastLock(); | |
223 map()->push_monitor(flock); | |
224 | |
225 // If the lock is our method synchronization lock, tuck it away in | |
226 // _sync_lock for return and rethrow exit paths. | |
227 if (index == 0 && method()->is_synchronized()) { | |
228 _synch_lock = flock; | |
229 } | |
230 } | |
231 | |
232 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci()); | |
233 if (!live_locals.is_valid()) { | |
234 // Degenerate or breakpointed method. | |
235 C->record_method_not_compilable("OSR in empty or breakpointed method"); | |
236 return; | |
237 } | |
238 | |
239 // Extract the needed locals from the interpreter frame. | |
240 Node *locals_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals-1)*wordSize); | |
241 | |
242 // find all the locals that the interpreter thinks contain live oops | |
243 const BitMap live_oops = method()->live_local_oops_at_bci(osr_bci()); | |
244 for (index = 0; index < max_locals; index++) { | |
245 | |
246 if (!live_locals.at(index)) { | |
247 continue; | |
248 } | |
249 | |
250 const Type *type = osr_block->local_type_at(index); | |
251 | |
252 if (type->isa_oopptr() != NULL) { | |
253 | |
254 // 6403625: Verify that the interpreter oopMap thinks that the oop is live | |
255 // else we might load a stale oop if the MethodLiveness disagrees with the | |
256 // result of the interpreter. If the interpreter says it is dead we agree | |
257 // by making the value go to top. | |
258 // | |
259 | |
260 if (!live_oops.at(index)) { | |
261 if (C->log() != NULL) { | |
262 C->log()->elem("OSR_mismatch local_index='%d'",index); | |
263 } | |
264 set_local(index, null()); | |
265 // and ignore it for the loads | |
266 continue; | |
267 } | |
268 } | |
269 | |
270 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.) | |
271 if (type == Type::TOP || type == Type::HALF) { | |
272 continue; | |
273 } | |
274 // If the type falls to bottom, then this must be a local that | |
275 // is mixing ints and oops or some such. Forcing it to top | |
276 // makes it go dead. | |
277 if (type == Type::BOTTOM) { | |
278 continue; | |
279 } | |
280 // Construct code to access the appropriate local. | |
281 Node *value = fetch_interpreter_state(index, type->basic_type(), locals_addr, osr_buf); | |
282 set_local(index, value); | |
283 } | |
284 | |
285 // Extract the needed stack entries from the interpreter frame. | |
286 for (index = 0; index < sp(); index++) { | |
287 const Type *type = osr_block->stack_type_at(index); | |
288 if (type != Type::TOP) { | |
289 // Currently the compiler bails out when attempting to on stack replace | |
290 // at a bci with a non-empty stack. We should not reach here. | |
291 ShouldNotReachHere(); | |
292 } | |
293 } | |
294 | |
295 // End the OSR migration | |
296 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(), | |
297 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end), | |
298 "OSR_migration_end", TypeRawPtr::BOTTOM, | |
299 osr_buf); | |
300 | |
301 // Now that the interpreter state is loaded, make sure it will match | |
302 // at execution time what the compiler is expecting now: | |
303 SafePointNode* bad_type_exit = clone_map(); | |
304 bad_type_exit->set_control(new (C, 1) RegionNode(1)); | |
305 | |
306 for (index = 0; index < max_locals; index++) { | |
307 if (stopped()) break; | |
308 Node* l = local(index); | |
309 if (l->is_top()) continue; // nothing here | |
310 const Type *type = osr_block->local_type_at(index); | |
311 if (type->isa_oopptr() != NULL) { | |
312 if (!live_oops.at(index)) { | |
313 // skip type check for dead oops | |
314 continue; | |
315 } | |
316 } | |
317 set_local(index, check_interpreter_type(l, type, bad_type_exit)); | |
318 } | |
319 | |
320 for (index = 0; index < sp(); index++) { | |
321 if (stopped()) break; | |
322 Node* l = stack(index); | |
323 if (l->is_top()) continue; // nothing here | |
324 const Type *type = osr_block->stack_type_at(index); | |
325 set_stack(index, check_interpreter_type(l, type, bad_type_exit)); | |
326 } | |
327 | |
328 if (bad_type_exit->control()->req() > 1) { | |
329 // Build an uncommon trap here, if any inputs can be unexpected. | |
330 bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() )); | |
331 record_for_igvn(bad_type_exit->control()); | |
332 SafePointNode* types_are_good = map(); | |
333 set_map(bad_type_exit); | |
334 // The unexpected type happens because a new edge is active | |
335 // in the CFG, which typeflow had previously ignored. | |
336 // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123). | |
337 // This x will be typed as Integer if notReached is not yet linked. | |
338 uncommon_trap(Deoptimization::Reason_unreached, | |
339 Deoptimization::Action_reinterpret); | |
340 set_map(types_are_good); | |
341 } | |
342 } | |
343 | |
344 //------------------------------Parse------------------------------------------ | |
345 // Main parser constructor. | |
346 Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) | |
347 : _exits(caller) | |
348 { | |
349 // Init some variables | |
350 _caller = caller; | |
351 _method = parse_method; | |
352 _expected_uses = expected_uses; | |
353 _depth = 1 + (caller->has_method() ? caller->depth() : 0); | |
354 _wrote_final = false; | |
355 _entry_bci = InvocationEntryBci; | |
356 _tf = NULL; | |
357 _block = NULL; | |
358 debug_only(_block_count = -1); | |
359 debug_only(_blocks = (Block*)-1); | |
360 #ifndef PRODUCT | |
361 if (PrintCompilation || PrintOpto) { | |
362 // Make sure I have an inline tree, so I can print messages about it. | |
363 JVMState* ilt_caller = is_osr_parse() ? caller->caller() : caller; | |
364 InlineTree::find_subtree_from_root(C->ilt(), ilt_caller, parse_method, true); | |
365 } | |
366 _max_switch_depth = 0; | |
367 _est_switch_depth = 0; | |
368 #endif | |
369 | |
370 _tf = TypeFunc::make(method()); | |
371 _iter.reset_to_method(method()); | |
372 _flow = method()->get_flow_analysis(); | |
373 if (_flow->failing()) { | |
374 C->record_method_not_compilable_all_tiers(_flow->failure_reason()); | |
375 } | |
376 | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
377 #ifndef PRODUCT |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
378 if (_flow->has_irreducible_entry()) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
379 C->set_parsed_irreducible_loop(true); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
380 } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
381 #endif |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
382 |
0 | 383 if (_expected_uses <= 0) { |
384 _prof_factor = 1; | |
385 } else { | |
386 float prof_total = parse_method->interpreter_invocation_count(); | |
387 if (prof_total <= _expected_uses) { | |
388 _prof_factor = 1; | |
389 } else { | |
390 _prof_factor = _expected_uses / prof_total; | |
391 } | |
392 } | |
393 | |
394 CompileLog* log = C->log(); | |
395 if (log != NULL) { | |
396 log->begin_head("parse method='%d' uses='%g'", | |
397 log->identify(parse_method), expected_uses); | |
398 if (depth() == 1 && C->is_osr_compilation()) { | |
399 log->print(" osr_bci='%d'", C->entry_bci()); | |
400 } | |
401 log->stamp(); | |
402 log->end_head(); | |
403 } | |
404 | |
405 // Accumulate deoptimization counts. | |
406 // (The range_check and store_check counts are checked elsewhere.) | |
407 ciMethodData* md = method()->method_data(); | |
408 for (uint reason = 0; reason < md->trap_reason_limit(); reason++) { | |
409 uint md_count = md->trap_count(reason); | |
410 if (md_count != 0) { | |
411 if (md_count == md->trap_count_limit()) | |
412 md_count += md->overflow_trap_count(); | |
413 uint total_count = C->trap_count(reason); | |
414 uint old_count = total_count; | |
415 total_count += md_count; | |
416 // Saturate the add if it overflows. | |
417 if (total_count < old_count || total_count < md_count) | |
418 total_count = (uint)-1; | |
419 C->set_trap_count(reason, total_count); | |
420 if (log != NULL) | |
421 log->elem("observe trap='%s' count='%d' total='%d'", | |
422 Deoptimization::trap_reason_name(reason), | |
423 md_count, total_count); | |
424 } | |
425 } | |
426 // Accumulate total sum of decompilations, also. | |
427 C->set_decompile_count(C->decompile_count() + md->decompile_count()); | |
428 | |
429 _count_invocations = C->do_count_invocations(); | |
430 _method_data_update = C->do_method_data_update(); | |
431 | |
432 if (log != NULL && method()->has_exception_handlers()) { | |
433 log->elem("observe that='has_exception_handlers'"); | |
434 } | |
435 | |
436 assert(method()->can_be_compiled(), "Can not parse this method, cutout earlier"); | |
437 assert(method()->has_balanced_monitors(), "Can not parse unbalanced monitors, cutout earlier"); | |
438 | |
439 // Always register dependence if JVMTI is enabled, because | |
440 // either breakpoint setting or hotswapping of methods may | |
441 // cause deoptimization. | |
442 if (JvmtiExport::can_hotswap_or_post_breakpoint()) { | |
443 C->dependencies()->assert_evol_method(method()); | |
444 } | |
445 | |
446 methods_seen++; | |
447 | |
448 // Do some special top-level things. | |
449 if (depth() == 1 && C->is_osr_compilation()) { | |
450 _entry_bci = C->entry_bci(); | |
451 _flow = method()->get_osr_flow_analysis(osr_bci()); | |
452 if (_flow->failing()) { | |
453 C->record_method_not_compilable(_flow->failure_reason()); | |
454 #ifndef PRODUCT | |
455 if (PrintOpto && (Verbose || WizardMode)) { | |
456 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason()); | |
457 if (Verbose) { | |
458 method()->print_oop(); | |
459 method()->print_codes(); | |
460 _flow->print(); | |
461 } | |
462 } | |
463 #endif | |
464 } | |
465 _tf = C->tf(); // the OSR entry type is different | |
466 } | |
467 | |
468 #ifdef ASSERT | |
469 if (depth() == 1) { | |
470 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync"); | |
471 if (C->tf() != tf()) { | |
472 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); | |
473 assert(C->env()->system_dictionary_modification_counter_changed(), | |
474 "Must invalidate if TypeFuncs differ"); | |
475 } | |
476 } else { | |
477 assert(!this->is_osr_parse(), "no recursive OSR"); | |
478 } | |
479 #endif | |
480 | |
481 methods_parsed++; | |
482 #ifndef PRODUCT | |
483 // add method size here to guarantee that inlined methods are added too | |
484 if (TimeCompiler) | |
485 _total_bytes_compiled += method()->code_size(); | |
486 | |
487 show_parse_info(); | |
488 #endif | |
489 | |
490 if (failing()) { | |
491 if (log) log->done("parse"); | |
492 return; | |
493 } | |
494 | |
495 gvn().set_type(root(), root()->bottom_type()); | |
496 gvn().transform(top()); | |
497 | |
498 // Import the results of the ciTypeFlow. | |
499 init_blocks(); | |
500 | |
501 // Merge point for all normal exits | |
502 build_exits(); | |
503 | |
504 // Setup the initial JVM state map. | |
505 SafePointNode* entry_map = create_entry_map(); | |
506 | |
507 // Check for bailouts during map initialization | |
508 if (failing() || entry_map == NULL) { | |
509 if (log) log->done("parse"); | |
510 return; | |
511 } | |
512 | |
513 Node_Notes* caller_nn = C->default_node_notes(); | |
514 // Collect debug info for inlined calls unless -XX:-DebugInlinedCalls. | |
515 if (DebugInlinedCalls || depth() == 1) { | |
516 C->set_default_node_notes(make_node_notes(caller_nn)); | |
517 } | |
518 | |
519 if (is_osr_parse()) { | |
520 Node* osr_buf = entry_map->in(TypeFunc::Parms+0); | |
521 entry_map->set_req(TypeFunc::Parms+0, top()); | |
522 set_map(entry_map); | |
523 load_interpreter_state(osr_buf); | |
524 } else { | |
525 set_map(entry_map); | |
526 do_method_entry(); | |
527 } | |
528 | |
529 // Check for bailouts during method entry. | |
530 if (failing()) { | |
531 if (log) log->done("parse"); | |
532 C->set_default_node_notes(caller_nn); | |
533 return; | |
534 } | |
535 | |
536 entry_map = map(); // capture any changes performed by method setup code | |
537 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout"); | |
538 | |
539 // We begin parsing as if we have just encountered a jump to the | |
540 // method entry. | |
541 Block* entry_block = start_block(); | |
542 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), ""); | |
543 set_map_clone(entry_map); | |
544 merge_common(entry_block, entry_block->next_path_num()); | |
545 | |
546 #ifndef PRODUCT | |
547 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C); | |
548 set_parse_histogram( parse_histogram_obj ); | |
549 #endif | |
550 | |
551 // Parse all the basic blocks. | |
552 do_all_blocks(); | |
553 | |
554 C->set_default_node_notes(caller_nn); | |
555 | |
556 // Check for bailouts during conversion to graph | |
557 if (failing()) { | |
558 if (log) log->done("parse"); | |
559 return; | |
560 } | |
561 | |
562 // Fix up all exiting control flow. | |
563 set_map(entry_map); | |
564 do_exits(); | |
565 | |
566 if (log) log->done("parse nodes='%d' memory='%d'", | |
567 C->unique(), C->node_arena()->used()); | |
568 } | |
569 | |
570 //---------------------------do_all_blocks------------------------------------- | |
571 void Parse::do_all_blocks() { | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
572 bool has_irreducible = flow()->has_irreducible_entry(); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
573 |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
574 // Walk over all blocks in Reverse Post-Order. |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
575 while (true) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
576 bool progress = false; |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
577 for (int rpo = 0; rpo < block_count(); rpo++) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
578 Block* block = rpo_at(rpo); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
579 |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
580 if (block->is_parsed()) continue; |
0 | 581 |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
582 if (!block->is_merged()) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
583 // Dead block, no state reaches this block |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
584 continue; |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
585 } |
0 | 586 |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
587 // Prepare to parse this block. |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
588 load_state_from(block); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
589 |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
590 if (stopped()) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
591 // Block is dead. |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
592 continue; |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
593 } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
594 |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
595 blocks_parsed++; |
0 | 596 |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
597 progress = true; |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
598 if (block->is_loop_head() || block->is_handler() || has_irreducible && !block->is_ready()) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
599 // Not all preds have been parsed. We must build phis everywhere. |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
600 // (Note that dead locals do not get phis built, ever.) |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
601 ensure_phis_everywhere(); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
602 |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
603 // Leave behind an undisturbed copy of the map, for future merges. |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
604 set_map(clone_map()); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
605 } |
0 | 606 |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
607 if (control()->is_Region() && !block->is_loop_head() && !has_irreducible && !block->is_handler()) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
608 // In the absence of irreducible loops, the Region and Phis |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
609 // associated with a merge that doesn't involve a backedge can |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
610 // be simplfied now since the RPO parsing order guarantees |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
611 // that any path which was supposed to reach here has already |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
612 // been parsed or must be dead. |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
613 Node* c = control(); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
614 Node* result = _gvn.transform_no_reclaim(control()); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
615 if (c != result && TraceOptoParse) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
616 tty->print_cr("Block #%d replace %d with %d", block->rpo(), c->_idx, result->_idx); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
617 } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
618 if (result != top()) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
619 record_for_igvn(result); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
620 } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
621 } |
0 | 622 |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
623 // Parse the block. |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
624 do_one_block(); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
625 |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
626 // Check for bailouts. |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
627 if (failing()) return; |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
628 } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
629 |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
630 // with irreducible loops multiple passes might be necessary to parse everything |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
631 if (!has_irreducible || !progress) { |
0 | 632 break; |
633 } | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
634 } |
0 | 635 |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
636 blocks_seen += block_count(); |
0 | 637 |
638 #ifndef PRODUCT | |
639 // Make sure there are no half-processed blocks remaining. | |
640 // Every remaining unprocessed block is dead and may be ignored now. | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
641 for (int rpo = 0; rpo < block_count(); rpo++) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
642 Block* block = rpo_at(rpo); |
0 | 643 if (!block->is_parsed()) { |
644 if (TraceOptoParse) { | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
645 tty->print_cr("Skipped dead block %d at bci:%d", rpo, block->start()); |
0 | 646 } |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
647 assert(!block->is_merged(), "no half-processed blocks"); |
0 | 648 } |
649 } | |
650 #endif | |
651 } | |
652 | |
653 //-------------------------------build_exits---------------------------------- | |
654 // Build normal and exceptional exit merge points. | |
655 void Parse::build_exits() { | |
656 // make a clone of caller to prevent sharing of side-effects | |
657 _exits.set_map(_exits.clone_map()); | |
658 _exits.clean_stack(_exits.sp()); | |
659 _exits.sync_jvms(); | |
660 | |
661 RegionNode* region = new (C, 1) RegionNode(1); | |
662 record_for_igvn(region); | |
663 gvn().set_type_bottom(region); | |
664 _exits.set_control(region); | |
665 | |
666 // Note: iophi and memphi are not transformed until do_exits. | |
667 Node* iophi = new (C, region->req()) PhiNode(region, Type::ABIO); | |
668 Node* memphi = new (C, region->req()) PhiNode(region, Type::MEMORY, TypePtr::BOTTOM); | |
669 _exits.set_i_o(iophi); | |
670 _exits.set_all_memory(memphi); | |
671 | |
672 // Add a return value to the exit state. (Do not push it yet.) | |
673 if (tf()->range()->cnt() > TypeFunc::Parms) { | |
674 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms); | |
675 // Don't "bind" an unloaded return klass to the ret_phi. If the klass | |
676 // becomes loaded during the subsequent parsing, the loaded and unloaded | |
677 // types will not join when we transform and push in do_exits(). | |
678 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr(); | |
679 if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) { | |
680 ret_type = TypeOopPtr::BOTTOM; | |
681 } | |
682 int ret_size = type2size[ret_type->basic_type()]; | |
683 Node* ret_phi = new (C, region->req()) PhiNode(region, ret_type); | |
684 _exits.ensure_stack(ret_size); | |
685 assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range"); | |
686 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method"); | |
687 _exits.set_argument(0, ret_phi); // here is where the parser finds it | |
688 // Note: ret_phi is not yet pushed, until do_exits. | |
689 } | |
690 } | |
691 | |
692 | |
693 //----------------------------build_start_state------------------------------- | |
694 // Construct a state which contains only the incoming arguments from an | |
695 // unknown caller. The method & bci will be NULL & InvocationEntryBci. | |
696 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) { | |
697 int arg_size = tf->domain()->cnt(); | |
698 int max_size = MAX2(arg_size, (int)tf->range()->cnt()); | |
699 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms); | |
700 SafePointNode* map = new (this, max_size) SafePointNode(max_size, NULL); | |
701 record_for_igvn(map); | |
702 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size"); | |
703 Node_Notes* old_nn = default_node_notes(); | |
704 if (old_nn != NULL && has_method()) { | |
705 Node_Notes* entry_nn = old_nn->clone(this); | |
706 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms()); | |
707 entry_jvms->set_offsets(0); | |
708 entry_jvms->set_bci(entry_bci()); | |
709 entry_nn->set_jvms(entry_jvms); | |
710 set_default_node_notes(entry_nn); | |
711 } | |
712 uint i; | |
713 for (i = 0; i < (uint)arg_size; i++) { | |
714 Node* parm = initial_gvn()->transform(new (this, 1) ParmNode(start, i)); | |
715 map->init_req(i, parm); | |
716 // Record all these guys for later GVN. | |
717 record_for_igvn(parm); | |
718 } | |
719 for (; i < map->req(); i++) { | |
720 map->init_req(i, top()); | |
721 } | |
722 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here"); | |
723 set_default_node_notes(old_nn); | |
724 map->set_jvms(jvms); | |
725 jvms->set_map(map); | |
726 return jvms; | |
727 } | |
728 | |
729 //-----------------------------make_node_notes--------------------------------- | |
730 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) { | |
731 if (caller_nn == NULL) return NULL; | |
732 Node_Notes* nn = caller_nn->clone(C); | |
733 JVMState* caller_jvms = nn->jvms(); | |
734 JVMState* jvms = new (C) JVMState(method(), caller_jvms); | |
735 jvms->set_offsets(0); | |
736 jvms->set_bci(_entry_bci); | |
737 nn->set_jvms(jvms); | |
738 return nn; | |
739 } | |
740 | |
741 | |
742 //--------------------------return_values-------------------------------------- | |
743 void Compile::return_values(JVMState* jvms) { | |
744 GraphKit kit(jvms); | |
745 Node* ret = new (this, TypeFunc::Parms) ReturnNode(TypeFunc::Parms, | |
746 kit.control(), | |
747 kit.i_o(), | |
748 kit.reset_memory(), | |
749 kit.frameptr(), | |
750 kit.returnadr()); | |
751 // Add zero or 1 return values | |
752 int ret_size = tf()->range()->cnt() - TypeFunc::Parms; | |
753 if (ret_size > 0) { | |
754 kit.inc_sp(-ret_size); // pop the return value(s) | |
755 kit.sync_jvms(); | |
756 ret->add_req(kit.argument(0)); | |
757 // Note: The second dummy edge is not needed by a ReturnNode. | |
758 } | |
759 // bind it to root | |
760 root()->add_req(ret); | |
761 record_for_igvn(ret); | |
762 initial_gvn()->transform_no_reclaim(ret); | |
763 } | |
764 | |
765 //------------------------rethrow_exceptions----------------------------------- | |
766 // Bind all exception states in the list into a single RethrowNode. | |
767 void Compile::rethrow_exceptions(JVMState* jvms) { | |
768 GraphKit kit(jvms); | |
769 if (!kit.has_exceptions()) return; // nothing to generate | |
770 // Load my combined exception state into the kit, with all phis transformed: | |
771 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states(); | |
772 Node* ex_oop = kit.use_exception_state(ex_map); | |
773 RethrowNode* exit = new (this, TypeFunc::Parms + 1) RethrowNode(kit.control(), | |
774 kit.i_o(), kit.reset_memory(), | |
775 kit.frameptr(), kit.returnadr(), | |
776 // like a return but with exception input | |
777 ex_oop); | |
778 // bind to root | |
779 root()->add_req(exit); | |
780 record_for_igvn(exit); | |
781 initial_gvn()->transform_no_reclaim(exit); | |
782 } | |
783 | |
784 bool Parse::can_rerun_bytecode() { | |
785 switch (bc()) { | |
786 case Bytecodes::_ldc: | |
787 case Bytecodes::_ldc_w: | |
788 case Bytecodes::_ldc2_w: | |
789 case Bytecodes::_getfield: | |
790 case Bytecodes::_putfield: | |
791 case Bytecodes::_getstatic: | |
792 case Bytecodes::_putstatic: | |
793 case Bytecodes::_arraylength: | |
794 case Bytecodes::_baload: | |
795 case Bytecodes::_caload: | |
796 case Bytecodes::_iaload: | |
797 case Bytecodes::_saload: | |
798 case Bytecodes::_faload: | |
799 case Bytecodes::_aaload: | |
800 case Bytecodes::_laload: | |
801 case Bytecodes::_daload: | |
802 case Bytecodes::_bastore: | |
803 case Bytecodes::_castore: | |
804 case Bytecodes::_iastore: | |
805 case Bytecodes::_sastore: | |
806 case Bytecodes::_fastore: | |
807 case Bytecodes::_aastore: | |
808 case Bytecodes::_lastore: | |
809 case Bytecodes::_dastore: | |
810 case Bytecodes::_irem: | |
811 case Bytecodes::_idiv: | |
812 case Bytecodes::_lrem: | |
813 case Bytecodes::_ldiv: | |
814 case Bytecodes::_frem: | |
815 case Bytecodes::_fdiv: | |
816 case Bytecodes::_drem: | |
817 case Bytecodes::_ddiv: | |
818 case Bytecodes::_checkcast: | |
819 case Bytecodes::_instanceof: | |
820 case Bytecodes::_athrow: | |
821 case Bytecodes::_anewarray: | |
822 case Bytecodes::_newarray: | |
823 case Bytecodes::_multianewarray: | |
824 case Bytecodes::_new: | |
825 case Bytecodes::_monitorenter: // can re-run initial null check, only | |
826 case Bytecodes::_return: | |
827 return true; | |
828 break; | |
829 | |
830 case Bytecodes::_invokestatic: | |
831 case Bytecodes::_invokespecial: | |
832 case Bytecodes::_invokevirtual: | |
833 case Bytecodes::_invokeinterface: | |
834 return false; | |
835 break; | |
836 | |
837 default: | |
838 assert(false, "unexpected bytecode produced an exception"); | |
839 return true; | |
840 } | |
841 } | |
842 | |
843 //---------------------------do_exceptions------------------------------------- | |
844 // Process exceptions arising from the current bytecode. | |
845 // Send caught exceptions to the proper handler within this method. | |
846 // Unhandled exceptions feed into _exit. | |
847 void Parse::do_exceptions() { | |
848 if (!has_exceptions()) return; | |
849 | |
850 if (failing()) { | |
851 // Pop them all off and throw them away. | |
852 while (pop_exception_state() != NULL) ; | |
853 return; | |
854 } | |
855 | |
856 // Make sure we can classify this bytecode if we need to. | |
857 debug_only(can_rerun_bytecode()); | |
858 | |
859 PreserveJVMState pjvms(this, false); | |
860 | |
861 SafePointNode* ex_map; | |
862 while ((ex_map = pop_exception_state()) != NULL) { | |
863 if (!method()->has_exception_handlers()) { | |
864 // Common case: Transfer control outward. | |
865 // Doing it this early allows the exceptions to common up | |
866 // even between adjacent method calls. | |
867 throw_to_exit(ex_map); | |
868 } else { | |
869 // Have to look at the exception first. | |
870 assert(stopped(), "catch_inline_exceptions trashes the map"); | |
871 catch_inline_exceptions(ex_map); | |
872 stop_and_kill_map(); // we used up this exception state; kill it | |
873 } | |
874 } | |
875 | |
876 // We now return to our regularly scheduled program: | |
877 } | |
878 | |
879 //---------------------------throw_to_exit------------------------------------- | |
880 // Merge the given map into an exception exit from this method. | |
881 // The exception exit will handle any unlocking of receiver. | |
882 // The ex_oop must be saved within the ex_map, unlike merge_exception. | |
883 void Parse::throw_to_exit(SafePointNode* ex_map) { | |
884 // Pop the JVMS to (a copy of) the caller. | |
885 GraphKit caller; | |
886 caller.set_map_clone(_caller->map()); | |
887 caller.set_bci(_caller->bci()); | |
888 caller.set_sp(_caller->sp()); | |
889 // Copy out the standard machine state: | |
890 for (uint i = 0; i < TypeFunc::Parms; i++) { | |
891 caller.map()->set_req(i, ex_map->in(i)); | |
892 } | |
893 // ...and the exception: | |
894 Node* ex_oop = saved_ex_oop(ex_map); | |
895 SafePointNode* caller_ex_map = caller.make_exception_state(ex_oop); | |
896 // Finally, collect the new exception state in my exits: | |
897 _exits.add_exception_state(caller_ex_map); | |
898 } | |
899 | |
900 //------------------------------do_exits--------------------------------------- | |
901 void Parse::do_exits() { | |
902 set_parse_bci(InvocationEntryBci); | |
903 | |
904 // Now peephole on the return bits | |
905 Node* region = _exits.control(); | |
906 _exits.set_control(gvn().transform(region)); | |
907 | |
908 Node* iophi = _exits.i_o(); | |
909 _exits.set_i_o(gvn().transform(iophi)); | |
910 | |
911 if (wrote_final()) { | |
912 // This method (which must be a constructor by the rules of Java) | |
913 // wrote a final. The effects of all initializations must be | |
914 // committed to memory before any code after the constructor | |
915 // publishes the reference to the newly constructor object. | |
916 // Rather than wait for the publication, we simply block the | |
917 // writes here. Rather than put a barrier on only those writes | |
918 // which are required to complete, we force all writes to complete. | |
919 // | |
920 // "All bets are off" unless the first publication occurs after a | |
921 // normal return from the constructor. We do not attempt to detect | |
922 // such unusual early publications. But no barrier is needed on | |
923 // exceptional returns, since they cannot publish normally. | |
924 // | |
925 _exits.insert_mem_bar(Op_MemBarRelease); | |
926 #ifndef PRODUCT | |
927 if (PrintOpto && (Verbose || WizardMode)) { | |
928 method()->print_name(); | |
929 tty->print_cr(" writes finals and needs a memory barrier"); | |
930 } | |
931 #endif | |
932 } | |
933 | |
934 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) { | |
935 // transform each slice of the original memphi: | |
936 mms.set_memory(_gvn.transform(mms.memory())); | |
937 } | |
938 | |
939 if (tf()->range()->cnt() > TypeFunc::Parms) { | |
940 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms); | |
941 Node* ret_phi = _gvn.transform( _exits.argument(0) ); | |
942 assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty(), "return value must be well defined"); | |
943 _exits.push_node(ret_type->basic_type(), ret_phi); | |
944 } | |
945 | |
946 // Note: Logic for creating and optimizing the ReturnNode is in Compile. | |
947 | |
948 // Unlock along the exceptional paths. | |
949 // This is done late so that we can common up equivalent exceptions | |
950 // (e.g., null checks) arising from multiple points within this method. | |
951 // See GraphKit::add_exception_state, which performs the commoning. | |
952 bool do_synch = method()->is_synchronized() && GenerateSynchronizationCode; | |
953 | |
954 // record exit from a method if compiled while Dtrace is turned on. | |
955 if (do_synch || DTraceMethodProbes) { | |
956 // First move the exception list out of _exits: | |
957 GraphKit kit(_exits.transfer_exceptions_into_jvms()); | |
958 SafePointNode* normal_map = kit.map(); // keep this guy safe | |
959 // Now re-collect the exceptions into _exits: | |
960 SafePointNode* ex_map; | |
961 while ((ex_map = kit.pop_exception_state()) != NULL) { | |
962 Node* ex_oop = kit.use_exception_state(ex_map); | |
963 // Force the exiting JVM state to have this method at InvocationEntryBci. | |
964 // The exiting JVM state is otherwise a copy of the calling JVMS. | |
965 JVMState* caller = kit.jvms(); | |
966 JVMState* ex_jvms = caller->clone_shallow(C); | |
967 ex_jvms->set_map(kit.clone_map()); | |
968 ex_jvms->map()->set_jvms(ex_jvms); | |
969 ex_jvms->set_bci( InvocationEntryBci); | |
970 kit.set_jvms(ex_jvms); | |
971 if (do_synch) { | |
972 // Add on the synchronized-method box/object combo | |
973 kit.map()->push_monitor(_synch_lock); | |
974 // Unlock! | |
975 kit.shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node()); | |
976 } | |
977 if (DTraceMethodProbes) { | |
978 kit.make_dtrace_method_exit(method()); | |
979 } | |
980 // Done with exception-path processing. | |
981 ex_map = kit.make_exception_state(ex_oop); | |
982 assert(ex_jvms->same_calls_as(ex_map->jvms()), "sanity"); | |
983 // Pop the last vestige of this method: | |
984 ex_map->set_jvms(caller->clone_shallow(C)); | |
985 ex_map->jvms()->set_map(ex_map); | |
986 _exits.push_exception_state(ex_map); | |
987 } | |
988 assert(_exits.map() == normal_map, "keep the same return state"); | |
989 } | |
990 | |
991 { | |
992 // Capture very early exceptions (receiver null checks) from caller JVMS | |
993 GraphKit caller(_caller); | |
994 SafePointNode* ex_map; | |
995 while ((ex_map = caller.pop_exception_state()) != NULL) { | |
996 _exits.add_exception_state(ex_map); | |
997 } | |
998 } | |
999 } | |
1000 | |
1001 //-----------------------------create_entry_map------------------------------- | |
1002 // Initialize our parser map to contain the types at method entry. | |
1003 // For OSR, the map contains a single RawPtr parameter. | |
1004 // Initial monitor locking for sync. methods is performed by do_method_entry. | |
1005 SafePointNode* Parse::create_entry_map() { | |
1006 // Check for really stupid bail-out cases. | |
1007 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack(); | |
1008 if (len >= 32760) { | |
1009 C->record_method_not_compilable_all_tiers("too many local variables"); | |
1010 return NULL; | |
1011 } | |
1012 | |
1013 // If this is an inlined method, we may have to do a receiver null check. | |
1014 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) { | |
1015 GraphKit kit(_caller); | |
1016 kit.null_check_receiver(method()); | |
1017 _caller = kit.transfer_exceptions_into_jvms(); | |
1018 if (kit.stopped()) { | |
1019 _exits.add_exception_states_from(_caller); | |
1020 _exits.set_jvms(_caller); | |
1021 return NULL; | |
1022 } | |
1023 } | |
1024 | |
1025 assert(method() != NULL, "parser must have a method"); | |
1026 | |
1027 // Create an initial safepoint to hold JVM state during parsing | |
1028 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : NULL); | |
1029 set_map(new (C, len) SafePointNode(len, jvms)); | |
1030 jvms->set_map(map()); | |
1031 record_for_igvn(map()); | |
1032 assert(jvms->endoff() == len, "correct jvms sizing"); | |
1033 | |
1034 SafePointNode* inmap = _caller->map(); | |
1035 assert(inmap != NULL, "must have inmap"); | |
1036 | |
1037 uint i; | |
1038 | |
1039 // Pass thru the predefined input parameters. | |
1040 for (i = 0; i < TypeFunc::Parms; i++) { | |
1041 map()->init_req(i, inmap->in(i)); | |
1042 } | |
1043 | |
1044 if (depth() == 1) { | |
1045 assert(map()->memory()->Opcode() == Op_Parm, ""); | |
1046 // Insert the memory aliasing node | |
1047 set_all_memory(reset_memory()); | |
1048 } | |
1049 assert(merged_memory(), ""); | |
1050 | |
1051 // Now add the locals which are initially bound to arguments: | |
1052 uint arg_size = tf()->domain()->cnt(); | |
1053 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args | |
1054 for (i = TypeFunc::Parms; i < arg_size; i++) { | |
1055 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms)); | |
1056 } | |
1057 | |
1058 // Clear out the rest of the map (locals and stack) | |
1059 for (i = arg_size; i < len; i++) { | |
1060 map()->init_req(i, top()); | |
1061 } | |
1062 | |
1063 SafePointNode* entry_map = stop(); | |
1064 return entry_map; | |
1065 } | |
1066 | |
1067 //-----------------------------do_method_entry-------------------------------- | |
1068 // Emit any code needed in the pseudo-block before BCI zero. | |
1069 // The main thing to do is lock the receiver of a synchronized method. | |
1070 void Parse::do_method_entry() { | |
1071 set_parse_bci(InvocationEntryBci); // Pseudo-BCP | |
1072 set_sp(0); // Java Stack Pointer | |
1073 | |
1074 NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); ) | |
1075 | |
1076 if (DTraceMethodProbes) { | |
1077 make_dtrace_method_entry(method()); | |
1078 } | |
1079 | |
1080 // If the method is synchronized, we need to construct a lock node, attach | |
1081 // it to the Start node, and pin it there. | |
1082 if (method()->is_synchronized()) { | |
1083 // Insert a FastLockNode right after the Start which takes as arguments | |
1084 // the current thread pointer, the "this" pointer & the address of the | |
1085 // stack slot pair used for the lock. The "this" pointer is a projection | |
1086 // off the start node, but the locking spot has to be constructed by | |
1087 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode | |
1088 // becomes the second argument to the FastLockNode call. The | |
1089 // FastLockNode becomes the new control parent to pin it to the start. | |
1090 | |
1091 // Setup Object Pointer | |
1092 Node *lock_obj = NULL; | |
1093 if(method()->is_static()) { | |
1094 ciInstance* mirror = _method->holder()->java_mirror(); | |
1095 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror); | |
1096 lock_obj = makecon(t_lock); | |
1097 } else { // Else pass the "this" pointer, | |
1098 lock_obj = local(0); // which is Parm0 from StartNode | |
1099 } | |
1100 // Clear out dead values from the debug info. | |
1101 kill_dead_locals(); | |
1102 // Build the FastLockNode | |
1103 _synch_lock = shared_lock(lock_obj); | |
1104 } | |
1105 | |
1106 if (depth() == 1) { | |
1107 increment_and_test_invocation_counter(Tier2CompileThreshold); | |
1108 } | |
1109 } | |
1110 | |
1111 //------------------------------init_blocks------------------------------------ | |
1112 // Initialize our parser map to contain the types/monitors at method entry. | |
1113 void Parse::init_blocks() { | |
1114 // Create the blocks. | |
1115 _block_count = flow()->block_count(); | |
1116 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count); | |
1117 Copy::zero_to_bytes(_blocks, sizeof(Block)*_block_count); | |
1118 | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1119 int rpo; |
0 | 1120 |
1121 // Initialize the structs. | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1122 for (rpo = 0; rpo < block_count(); rpo++) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1123 Block* block = rpo_at(rpo); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1124 block->init_node(this, rpo); |
0 | 1125 } |
1126 | |
1127 // Collect predecessor and successor information. | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1128 for (rpo = 0; rpo < block_count(); rpo++) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1129 Block* block = rpo_at(rpo); |
0 | 1130 block->init_graph(this); |
1131 } | |
1132 } | |
1133 | |
1134 //-------------------------------init_node------------------------------------- | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1135 void Parse::Block::init_node(Parse* outer, int rpo) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1136 _flow = outer->flow()->rpo_at(rpo); |
0 | 1137 _pred_count = 0; |
1138 _preds_parsed = 0; | |
1139 _count = 0; | |
1140 assert(pred_count() == 0 && preds_parsed() == 0, "sanity"); | |
1141 assert(!(is_merged() || is_parsed() || is_handler()), "sanity"); | |
1142 assert(_live_locals.size() == 0, "sanity"); | |
1143 | |
1144 // entry point has additional predecessor | |
1145 if (flow()->is_start()) _pred_count++; | |
1146 assert(flow()->is_start() == (this == outer->start_block()), ""); | |
1147 } | |
1148 | |
1149 //-------------------------------init_graph------------------------------------ | |
1150 void Parse::Block::init_graph(Parse* outer) { | |
1151 // Create the successor list for this parser block. | |
1152 GrowableArray<ciTypeFlow::Block*>* tfs = flow()->successors(); | |
1153 GrowableArray<ciTypeFlow::Block*>* tfe = flow()->exceptions(); | |
1154 int ns = tfs->length(); | |
1155 int ne = tfe->length(); | |
1156 _num_successors = ns; | |
1157 _all_successors = ns+ne; | |
1158 _successors = (ns+ne == 0) ? NULL : NEW_RESOURCE_ARRAY(Block*, ns+ne); | |
1159 int p = 0; | |
1160 for (int i = 0; i < ns+ne; i++) { | |
1161 ciTypeFlow::Block* tf2 = (i < ns) ? tfs->at(i) : tfe->at(i-ns); | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1162 Block* block2 = outer->rpo_at(tf2->rpo()); |
0 | 1163 _successors[i] = block2; |
1164 | |
1165 // Accumulate pred info for the other block, too. | |
1166 if (i < ns) { | |
1167 block2->_pred_count++; | |
1168 } else { | |
1169 block2->_is_handler = true; | |
1170 } | |
1171 | |
1172 #ifdef ASSERT | |
1173 // A block's successors must be distinguishable by BCI. | |
1174 // That is, no bytecode is allowed to branch to two different | |
1175 // clones of the same code location. | |
1176 for (int j = 0; j < i; j++) { | |
1177 Block* block1 = _successors[j]; | |
1178 if (block1 == block2) continue; // duplicates are OK | |
1179 assert(block1->start() != block2->start(), "successors have unique bcis"); | |
1180 } | |
1181 #endif | |
1182 } | |
1183 | |
1184 // Note: We never call next_path_num along exception paths, so they | |
1185 // never get processed as "ready". Also, the input phis of exception | |
1186 // handlers get specially processed, so that | |
1187 } | |
1188 | |
1189 //---------------------------successor_for_bci--------------------------------- | |
1190 Parse::Block* Parse::Block::successor_for_bci(int bci) { | |
1191 for (int i = 0; i < all_successors(); i++) { | |
1192 Block* block2 = successor_at(i); | |
1193 if (block2->start() == bci) return block2; | |
1194 } | |
1195 // We can actually reach here if ciTypeFlow traps out a block | |
1196 // due to an unloaded class, and concurrently with compilation the | |
1197 // class is then loaded, so that a later phase of the parser is | |
1198 // able to see more of the bytecode CFG. Or, the flow pass and | |
1199 // the parser can have a minor difference of opinion about executability | |
1200 // of bytecodes. For example, "obj.field = null" is executable even | |
1201 // if the field's type is an unloaded class; the flow pass used to | |
1202 // make a trap for such code. | |
1203 return NULL; | |
1204 } | |
1205 | |
1206 | |
1207 //-----------------------------stack_type_at----------------------------------- | |
1208 const Type* Parse::Block::stack_type_at(int i) const { | |
1209 return get_type(flow()->stack_type_at(i)); | |
1210 } | |
1211 | |
1212 | |
1213 //-----------------------------local_type_at----------------------------------- | |
1214 const Type* Parse::Block::local_type_at(int i) const { | |
1215 // Make dead locals fall to bottom. | |
1216 if (_live_locals.size() == 0) { | |
1217 MethodLivenessResult live_locals = flow()->outer()->method()->liveness_at_bci(start()); | |
1218 // This bitmap can be zero length if we saw a breakpoint. | |
1219 // In such cases, pretend they are all live. | |
1220 ((Block*)this)->_live_locals = live_locals; | |
1221 } | |
1222 if (_live_locals.size() > 0 && !_live_locals.at(i)) | |
1223 return Type::BOTTOM; | |
1224 | |
1225 return get_type(flow()->local_type_at(i)); | |
1226 } | |
1227 | |
1228 | |
1229 #ifndef PRODUCT | |
1230 | |
1231 //----------------------------name_for_bc-------------------------------------- | |
1232 // helper method for BytecodeParseHistogram | |
1233 static const char* name_for_bc(int i) { | |
1234 return Bytecodes::is_defined(i) ? Bytecodes::name(Bytecodes::cast(i)) : "xxxunusedxxx"; | |
1235 } | |
1236 | |
1237 //----------------------------BytecodeParseHistogram------------------------------------ | |
1238 Parse::BytecodeParseHistogram::BytecodeParseHistogram(Parse *p, Compile *c) { | |
1239 _parser = p; | |
1240 _compiler = c; | |
1241 if( ! _initialized ) { _initialized = true; reset(); } | |
1242 } | |
1243 | |
1244 //----------------------------current_count------------------------------------ | |
1245 int Parse::BytecodeParseHistogram::current_count(BPHType bph_type) { | |
1246 switch( bph_type ) { | |
1247 case BPH_transforms: { return _parser->gvn().made_progress(); } | |
1248 case BPH_values: { return _parser->gvn().made_new_values(); } | |
1249 default: { ShouldNotReachHere(); return 0; } | |
1250 } | |
1251 } | |
1252 | |
1253 //----------------------------initialized-------------------------------------- | |
1254 bool Parse::BytecodeParseHistogram::initialized() { return _initialized; } | |
1255 | |
1256 //----------------------------reset-------------------------------------------- | |
1257 void Parse::BytecodeParseHistogram::reset() { | |
1258 int i = Bytecodes::number_of_codes; | |
1259 while (i-- > 0) { _bytecodes_parsed[i] = 0; _nodes_constructed[i] = 0; _nodes_transformed[i] = 0; _new_values[i] = 0; } | |
1260 } | |
1261 | |
1262 //----------------------------set_initial_state-------------------------------- | |
1263 // Record info when starting to parse one bytecode | |
1264 void Parse::BytecodeParseHistogram::set_initial_state( Bytecodes::Code bc ) { | |
1265 if( PrintParseStatistics && !_parser->is_osr_parse() ) { | |
1266 _initial_bytecode = bc; | |
1267 _initial_node_count = _compiler->unique(); | |
1268 _initial_transforms = current_count(BPH_transforms); | |
1269 _initial_values = current_count(BPH_values); | |
1270 } | |
1271 } | |
1272 | |
1273 //----------------------------record_change-------------------------------- | |
1274 // Record results of parsing one bytecode | |
1275 void Parse::BytecodeParseHistogram::record_change() { | |
1276 if( PrintParseStatistics && !_parser->is_osr_parse() ) { | |
1277 ++_bytecodes_parsed[_initial_bytecode]; | |
1278 _nodes_constructed [_initial_bytecode] += (_compiler->unique() - _initial_node_count); | |
1279 _nodes_transformed [_initial_bytecode] += (current_count(BPH_transforms) - _initial_transforms); | |
1280 _new_values [_initial_bytecode] += (current_count(BPH_values) - _initial_values); | |
1281 } | |
1282 } | |
1283 | |
1284 | |
1285 //----------------------------print-------------------------------------------- | |
1286 void Parse::BytecodeParseHistogram::print(float cutoff) { | |
1287 ResourceMark rm; | |
1288 // print profile | |
1289 int total = 0; | |
1290 int i = 0; | |
1291 for( i = 0; i < Bytecodes::number_of_codes; ++i ) { total += _bytecodes_parsed[i]; } | |
1292 int abs_sum = 0; | |
1293 tty->cr(); //0123456789012345678901234567890123456789012345678901234567890123456789 | |
1294 tty->print_cr("Histogram of %d parsed bytecodes:", total); | |
1295 if( total == 0 ) { return; } | |
1296 tty->cr(); | |
1297 tty->print_cr("absolute: count of compiled bytecodes of this type"); | |
1298 tty->print_cr("relative: percentage contribution to compiled nodes"); | |
1299 tty->print_cr("nodes : Average number of nodes constructed per bytecode"); | |
1300 tty->print_cr("rnodes : Significance towards total nodes constructed, (nodes*relative)"); | |
1301 tty->print_cr("transforms: Average amount of tranform progress per bytecode compiled"); | |
1302 tty->print_cr("values : Average number of node values improved per bytecode"); | |
1303 tty->print_cr("name : Bytecode name"); | |
1304 tty->cr(); | |
1305 tty->print_cr(" absolute relative nodes rnodes transforms values name"); | |
1306 tty->print_cr("----------------------------------------------------------------------"); | |
1307 while (--i > 0) { | |
1308 int abs = _bytecodes_parsed[i]; | |
1309 float rel = abs * 100.0F / total; | |
1310 float nodes = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _nodes_constructed[i])/_bytecodes_parsed[i]; | |
1311 float rnodes = _bytecodes_parsed[i] == 0 ? 0 : rel * nodes; | |
1312 float xforms = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _nodes_transformed[i])/_bytecodes_parsed[i]; | |
1313 float values = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _new_values [i])/_bytecodes_parsed[i]; | |
1314 if (cutoff <= rel) { | |
1315 tty->print_cr("%10d %7.2f%% %6.1f %6.2f %6.1f %6.1f %s", abs, rel, nodes, rnodes, xforms, values, name_for_bc(i)); | |
1316 abs_sum += abs; | |
1317 } | |
1318 } | |
1319 tty->print_cr("----------------------------------------------------------------------"); | |
1320 float rel_sum = abs_sum * 100.0F / total; | |
1321 tty->print_cr("%10d %7.2f%% (cutoff = %.2f%%)", abs_sum, rel_sum, cutoff); | |
1322 tty->print_cr("----------------------------------------------------------------------"); | |
1323 tty->cr(); | |
1324 } | |
1325 #endif | |
1326 | |
1327 //----------------------------load_state_from---------------------------------- | |
1328 // Load block/map/sp. But not do not touch iter/bci. | |
1329 void Parse::load_state_from(Block* block) { | |
1330 set_block(block); | |
1331 // load the block's JVM state: | |
1332 set_map(block->start_map()); | |
1333 set_sp( block->start_sp()); | |
1334 } | |
1335 | |
1336 | |
1337 //-----------------------------record_state------------------------------------ | |
1338 void Parse::Block::record_state(Parse* p) { | |
1339 assert(!is_merged(), "can only record state once, on 1st inflow"); | |
1340 assert(start_sp() == p->sp(), "stack pointer must agree with ciTypeFlow"); | |
1341 set_start_map(p->stop()); | |
1342 } | |
1343 | |
1344 | |
1345 //------------------------------do_one_block----------------------------------- | |
1346 void Parse::do_one_block() { | |
1347 if (TraceOptoParse) { | |
1348 Block *b = block(); | |
1349 int ns = b->num_successors(); | |
1350 int nt = b->all_successors(); | |
1351 | |
1352 tty->print("Parsing block #%d at bci [%d,%d), successors: ", | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1353 block()->rpo(), block()->start(), block()->limit()); |
0 | 1354 for (int i = 0; i < nt; i++) { |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1355 tty->print((( i < ns) ? " %d" : " %d(e)"), b->successor_at(i)->rpo()); |
0 | 1356 } |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1357 if (b->is_loop_head()) tty->print(" lphd"); |
0 | 1358 tty->print_cr(""); |
1359 } | |
1360 | |
1361 assert(block()->is_merged(), "must be merged before being parsed"); | |
1362 block()->mark_parsed(); | |
1363 ++_blocks_parsed; | |
1364 | |
1365 // Set iterator to start of block. | |
1366 iter().reset_to_bci(block()->start()); | |
1367 | |
1368 CompileLog* log = C->log(); | |
1369 | |
1370 // Parse bytecodes | |
1371 while (!stopped() && !failing()) { | |
1372 iter().next(); | |
1373 | |
1374 // Learn the current bci from the iterator: | |
1375 set_parse_bci(iter().cur_bci()); | |
1376 | |
1377 if (bci() == block()->limit()) { | |
1378 // Do not walk into the next block until directed by do_all_blocks. | |
1379 merge(bci()); | |
1380 break; | |
1381 } | |
1382 assert(bci() < block()->limit(), "bci still in block"); | |
1383 | |
1384 if (log != NULL) { | |
1385 // Output an optional context marker, to help place actions | |
1386 // that occur during parsing of this BC. If there is no log | |
1387 // output until the next context string, this context string | |
1388 // will be silently ignored. | |
1389 log->context()->reset(); | |
1390 log->context()->print_cr("<bc code='%d' bci='%d'/>", (int)bc(), bci()); | |
1391 } | |
1392 | |
1393 if (block()->has_trap_at(bci())) { | |
1394 // We must respect the flow pass's traps, because it will refuse | |
1395 // to produce successors for trapping blocks. | |
1396 int trap_index = block()->flow()->trap_index(); | |
1397 assert(trap_index != 0, "trap index must be valid"); | |
1398 uncommon_trap(trap_index); | |
1399 break; | |
1400 } | |
1401 | |
1402 NOT_PRODUCT( parse_histogram()->set_initial_state(bc()); ); | |
1403 | |
1404 #ifdef ASSERT | |
1405 int pre_bc_sp = sp(); | |
1406 int inputs, depth; | |
1407 bool have_se = !stopped() && compute_stack_effects(inputs, depth); | |
1408 assert(!have_se || pre_bc_sp >= inputs, "have enough stack to execute this BC"); | |
1409 #endif //ASSERT | |
1410 | |
1411 do_one_bytecode(); | |
1412 | |
1413 assert(!have_se || stopped() || failing() || (sp() - pre_bc_sp) == depth, "correct depth prediction"); | |
1414 | |
1415 do_exceptions(); | |
1416 | |
1417 NOT_PRODUCT( parse_histogram()->record_change(); ); | |
1418 | |
1419 if (log != NULL) log->context()->reset(); // done w/ this one | |
1420 | |
1421 // Fall into next bytecode. Each bytecode normally has 1 sequential | |
1422 // successor which is typically made ready by visiting this bytecode. | |
1423 // If the successor has several predecessors, then it is a merge | |
1424 // point, starts a new basic block, and is handled like other basic blocks. | |
1425 } | |
1426 } | |
1427 | |
1428 | |
1429 //------------------------------merge------------------------------------------ | |
1430 void Parse::set_parse_bci(int bci) { | |
1431 set_bci(bci); | |
1432 Node_Notes* nn = C->default_node_notes(); | |
1433 if (nn == NULL) return; | |
1434 | |
1435 // Collect debug info for inlined calls unless -XX:-DebugInlinedCalls. | |
1436 if (!DebugInlinedCalls && depth() > 1) { | |
1437 return; | |
1438 } | |
1439 | |
1440 // Update the JVMS annotation, if present. | |
1441 JVMState* jvms = nn->jvms(); | |
1442 if (jvms != NULL && jvms->bci() != bci) { | |
1443 // Update the JVMS. | |
1444 jvms = jvms->clone_shallow(C); | |
1445 jvms->set_bci(bci); | |
1446 nn->set_jvms(jvms); | |
1447 } | |
1448 } | |
1449 | |
1450 //------------------------------merge------------------------------------------ | |
1451 // Merge the current mapping into the basic block starting at bci | |
1452 void Parse::merge(int target_bci) { | |
1453 Block* target = successor_for_bci(target_bci); | |
1454 if (target == NULL) { handle_missing_successor(target_bci); return; } | |
1455 assert(!target->is_ready(), "our arrival must be expected"); | |
1456 int pnum = target->next_path_num(); | |
1457 merge_common(target, pnum); | |
1458 } | |
1459 | |
1460 //-------------------------merge_new_path-------------------------------------- | |
1461 // Merge the current mapping into the basic block, using a new path | |
1462 void Parse::merge_new_path(int target_bci) { | |
1463 Block* target = successor_for_bci(target_bci); | |
1464 if (target == NULL) { handle_missing_successor(target_bci); return; } | |
1465 assert(!target->is_ready(), "new path into frozen graph"); | |
1466 int pnum = target->add_new_path(); | |
1467 merge_common(target, pnum); | |
1468 } | |
1469 | |
1470 //-------------------------merge_exception------------------------------------- | |
1471 // Merge the current mapping into the basic block starting at bci | |
1472 // The ex_oop must be pushed on the stack, unlike throw_to_exit. | |
1473 void Parse::merge_exception(int target_bci) { | |
1474 assert(sp() == 1, "must have only the throw exception on the stack"); | |
1475 Block* target = successor_for_bci(target_bci); | |
1476 if (target == NULL) { handle_missing_successor(target_bci); return; } | |
1477 assert(target->is_handler(), "exceptions are handled by special blocks"); | |
1478 int pnum = target->add_new_path(); | |
1479 merge_common(target, pnum); | |
1480 } | |
1481 | |
1482 //--------------------handle_missing_successor--------------------------------- | |
1483 void Parse::handle_missing_successor(int target_bci) { | |
1484 #ifndef PRODUCT | |
1485 Block* b = block(); | |
1486 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1; | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1487 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci); |
0 | 1488 #endif |
1489 ShouldNotReachHere(); | |
1490 } | |
1491 | |
1492 //--------------------------merge_common--------------------------------------- | |
1493 void Parse::merge_common(Parse::Block* target, int pnum) { | |
1494 if (TraceOptoParse) { | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1495 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start()); |
0 | 1496 } |
1497 | |
1498 // Zap extra stack slots to top | |
1499 assert(sp() == target->start_sp(), ""); | |
1500 clean_stack(sp()); | |
1501 | |
1502 if (!target->is_merged()) { // No prior mapping at this bci | |
1503 if (TraceOptoParse) { tty->print(" with empty state"); } | |
1504 | |
1505 // If this path is dead, do not bother capturing it as a merge. | |
1506 // It is "as if" we had 1 fewer predecessors from the beginning. | |
1507 if (stopped()) { | |
1508 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count"); | |
1509 return; | |
1510 } | |
1511 | |
1512 // Record that a new block has been merged. | |
1513 ++_blocks_merged; | |
1514 | |
1515 // Make a region if we know there are multiple or unpredictable inputs. | |
1516 // (Also, if this is a plain fall-through, we might see another region, | |
1517 // which must not be allowed into this block's map.) | |
1518 if (pnum > PhiNode::Input // Known multiple inputs. | |
1519 || target->is_handler() // These have unpredictable inputs. | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1520 || target->is_loop_head() // Known multiple inputs |
0 | 1521 || control()->is_Region()) { // We must hide this guy. |
1522 // Add a Region to start the new basic block. Phis will be added | |
1523 // later lazily. | |
1524 int edges = target->pred_count(); | |
1525 if (edges < pnum) edges = pnum; // might be a new path! | |
1526 Node *r = new (C, edges+1) RegionNode(edges+1); | |
1527 gvn().set_type(r, Type::CONTROL); | |
1528 record_for_igvn(r); | |
1529 // zap all inputs to NULL for debugging (done in Node(uint) constructor) | |
1530 // for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); } | |
1531 r->init_req(pnum, control()); | |
1532 set_control(r); | |
1533 } | |
1534 | |
1535 // Convert the existing Parser mapping into a mapping at this bci. | |
1536 store_state_to(target); | |
1537 assert(target->is_merged(), "do not come here twice"); | |
1538 | |
1539 } else { // Prior mapping at this bci | |
1540 if (TraceOptoParse) { tty->print(" with previous state"); } | |
1541 | |
1542 // We must not manufacture more phis if the target is already parsed. | |
1543 bool nophi = target->is_parsed(); | |
1544 | |
1545 SafePointNode* newin = map();// Hang on to incoming mapping | |
1546 Block* save_block = block(); // Hang on to incoming block; | |
1547 load_state_from(target); // Get prior mapping | |
1548 | |
1549 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree"); | |
1550 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree"); | |
1551 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree"); | |
1552 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree"); | |
1553 | |
1554 // Iterate over my current mapping and the old mapping. | |
1555 // Where different, insert Phi functions. | |
1556 // Use any existing Phi functions. | |
1557 assert(control()->is_Region(), "must be merging to a region"); | |
1558 RegionNode* r = control()->as_Region(); | |
1559 | |
1560 // Compute where to merge into | |
1561 // Merge incoming control path | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1562 r->init_req(pnum, newin->control()); |
0 | 1563 |
1564 if (pnum == 1) { // Last merge for this Region? | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1565 if (!block()->flow()->is_irreducible_entry()) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1566 Node* result = _gvn.transform_no_reclaim(r); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1567 if (r != result && TraceOptoParse) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1568 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1569 } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1570 } |
0 | 1571 record_for_igvn(r); |
1572 } | |
1573 | |
1574 // Update all the non-control inputs to map: | |
1575 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms"); | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1576 bool check_elide_phi = target->is_SEL_backedge(save_block); |
0 | 1577 for (uint j = 1; j < newin->req(); j++) { |
1578 Node* m = map()->in(j); // Current state of target. | |
1579 Node* n = newin->in(j); // Incoming change to target state. | |
1580 PhiNode* phi; | |
1581 if (m->is_Phi() && m->as_Phi()->region() == r) | |
1582 phi = m->as_Phi(); | |
1583 else | |
1584 phi = NULL; | |
1585 if (m != n) { // Different; must merge | |
1586 switch (j) { | |
1587 // Frame pointer and Return Address never changes | |
1588 case TypeFunc::FramePtr:// Drop m, use the original value | |
1589 case TypeFunc::ReturnAdr: | |
1590 break; | |
1591 case TypeFunc::Memory: // Merge inputs to the MergeMem node | |
1592 assert(phi == NULL, "the merge contains phis, not vice versa"); | |
1593 merge_memory_edges(n->as_MergeMem(), pnum, nophi); | |
1594 continue; | |
1595 default: // All normal stuff | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1596 if (phi == NULL) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1597 if (!check_elide_phi || !target->can_elide_SEL_phi(j)) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1598 phi = ensure_phi(j, nophi); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1599 } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1600 } |
0 | 1601 break; |
1602 } | |
1603 } | |
1604 // At this point, n might be top if: | |
1605 // - there is no phi (because TypeFlow detected a conflict), or | |
1606 // - the corresponding control edges is top (a dead incoming path) | |
1607 // It is a bug if we create a phi which sees a garbage value on a live path. | |
1608 | |
1609 if (phi != NULL) { | |
1610 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage"); | |
1611 assert(phi->region() == r, ""); | |
1612 phi->set_req(pnum, n); // Then add 'n' to the merge | |
1613 if (pnum == PhiNode::Input) { | |
1614 // Last merge for this Phi. | |
1615 // So far, Phis have had a reasonable type from ciTypeFlow. | |
1616 // Now _gvn will join that with the meet of current inputs. | |
1617 // BOTTOM is never permissible here, 'cause pessimistically | |
1618 // Phis of pointers cannot lose the basic pointer type. | |
1619 debug_only(const Type* bt1 = phi->bottom_type()); | |
1620 assert(bt1 != Type::BOTTOM, "should not be building conflict phis"); | |
1621 map()->set_req(j, _gvn.transform_no_reclaim(phi)); | |
1622 debug_only(const Type* bt2 = phi->bottom_type()); | |
1623 assert(bt2->higher_equal(bt1), "must be consistent with type-flow"); | |
1624 record_for_igvn(phi); | |
1625 } | |
1626 } | |
1627 } // End of for all values to be merged | |
1628 | |
1629 if (pnum == PhiNode::Input && | |
1630 !r->in(0)) { // The occasional useless Region | |
1631 assert(control() == r, ""); | |
1632 set_control(r->nonnull_req()); | |
1633 } | |
1634 | |
1635 // newin has been subsumed into the lazy merge, and is now dead. | |
1636 set_block(save_block); | |
1637 | |
1638 stop(); // done with this guy, for now | |
1639 } | |
1640 | |
1641 if (TraceOptoParse) { | |
1642 tty->print_cr(" on path %d", pnum); | |
1643 } | |
1644 | |
1645 // Done with this parser state. | |
1646 assert(stopped(), ""); | |
1647 } | |
1648 | |
1649 | |
1650 //--------------------------merge_memory_edges--------------------------------- | |
1651 void Parse::merge_memory_edges(MergeMemNode* n, int pnum, bool nophi) { | |
1652 // (nophi means we must not create phis, because we already parsed here) | |
1653 assert(n != NULL, ""); | |
1654 // Merge the inputs to the MergeMems | |
1655 MergeMemNode* m = merged_memory(); | |
1656 | |
1657 assert(control()->is_Region(), "must be merging to a region"); | |
1658 RegionNode* r = control()->as_Region(); | |
1659 | |
1660 PhiNode* base = NULL; | |
1661 MergeMemNode* remerge = NULL; | |
1662 for (MergeMemStream mms(m, n); mms.next_non_empty2(); ) { | |
1663 Node *p = mms.force_memory(); | |
1664 Node *q = mms.memory2(); | |
1665 if (mms.is_empty() && nophi) { | |
1666 // Trouble: No new splits allowed after a loop body is parsed. | |
1667 // Instead, wire the new split into a MergeMem on the backedge. | |
1668 // The optimizer will sort it out, slicing the phi. | |
1669 if (remerge == NULL) { | |
1670 assert(base != NULL, ""); | |
1671 assert(base->in(0) != NULL, "should not be xformed away"); | |
1672 remerge = MergeMemNode::make(C, base->in(pnum)); | |
1673 gvn().set_type(remerge, Type::MEMORY); | |
1674 base->set_req(pnum, remerge); | |
1675 } | |
1676 remerge->set_memory_at(mms.alias_idx(), q); | |
1677 continue; | |
1678 } | |
1679 assert(!q->is_MergeMem(), ""); | |
1680 PhiNode* phi; | |
1681 if (p != q) { | |
1682 phi = ensure_memory_phi(mms.alias_idx(), nophi); | |
1683 } else { | |
1684 if (p->is_Phi() && p->as_Phi()->region() == r) | |
1685 phi = p->as_Phi(); | |
1686 else | |
1687 phi = NULL; | |
1688 } | |
1689 // Insert q into local phi | |
1690 if (phi != NULL) { | |
1691 assert(phi->region() == r, ""); | |
1692 p = phi; | |
1693 phi->set_req(pnum, q); | |
1694 if (mms.at_base_memory()) { | |
1695 base = phi; // delay transforming it | |
1696 } else if (pnum == 1) { | |
1697 record_for_igvn(phi); | |
1698 p = _gvn.transform_no_reclaim(phi); | |
1699 } | |
1700 mms.set_memory(p);// store back through the iterator | |
1701 } | |
1702 } | |
1703 // Transform base last, in case we must fiddle with remerging. | |
1704 if (base != NULL && pnum == 1) { | |
1705 record_for_igvn(base); | |
1706 m->set_base_memory( _gvn.transform_no_reclaim(base) ); | |
1707 } | |
1708 } | |
1709 | |
1710 | |
1711 //------------------------ensure_phis_everywhere------------------------------- | |
1712 void Parse::ensure_phis_everywhere() { | |
1713 ensure_phi(TypeFunc::I_O); | |
1714 | |
1715 // Ensure a phi on all currently known memories. | |
1716 for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) { | |
1717 ensure_memory_phi(mms.alias_idx()); | |
1718 debug_only(mms.set_memory()); // keep the iterator happy | |
1719 } | |
1720 | |
1721 // Note: This is our only chance to create phis for memory slices. | |
1722 // If we miss a slice that crops up later, it will have to be | |
1723 // merged into the base-memory phi that we are building here. | |
1724 // Later, the optimizer will comb out the knot, and build separate | |
1725 // phi-loops for each memory slice that matters. | |
1726 | |
1727 // Monitors must nest nicely and not get confused amongst themselves. | |
1728 // Phi-ify everything up to the monitors, though. | |
1729 uint monoff = map()->jvms()->monoff(); | |
1730 uint nof_monitors = map()->jvms()->nof_monitors(); | |
1731 | |
1732 assert(TypeFunc::Parms == map()->jvms()->locoff(), "parser map should contain only youngest jvms"); | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1733 bool check_elide_phi = block()->is_SEL_head(); |
0 | 1734 for (uint i = TypeFunc::Parms; i < monoff; i++) { |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1735 if (!check_elide_phi || !block()->can_elide_SEL_phi(i)) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1736 ensure_phi(i); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1737 } |
0 | 1738 } |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
1739 |
0 | 1740 // Even monitors need Phis, though they are well-structured. |
1741 // This is true for OSR methods, and also for the rare cases where | |
1742 // a monitor object is the subject of a replace_in_map operation. | |
1743 // See bugs 4426707 and 5043395. | |
1744 for (uint m = 0; m < nof_monitors; m++) { | |
1745 ensure_phi(map()->jvms()->monitor_obj_offset(m)); | |
1746 } | |
1747 } | |
1748 | |
1749 | |
1750 //-----------------------------add_new_path------------------------------------ | |
1751 // Add a previously unaccounted predecessor to this block. | |
1752 int Parse::Block::add_new_path() { | |
1753 // If there is no map, return the lowest unused path number. | |
1754 if (!is_merged()) return pred_count()+1; // there will be a map shortly | |
1755 | |
1756 SafePointNode* map = start_map(); | |
1757 if (!map->control()->is_Region()) | |
1758 return pred_count()+1; // there may be a region some day | |
1759 RegionNode* r = map->control()->as_Region(); | |
1760 | |
1761 // Add new path to the region. | |
1762 uint pnum = r->req(); | |
1763 r->add_req(NULL); | |
1764 | |
1765 for (uint i = 1; i < map->req(); i++) { | |
1766 Node* n = map->in(i); | |
1767 if (i == TypeFunc::Memory) { | |
1768 // Ensure a phi on all currently known memories. | |
1769 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) { | |
1770 Node* phi = mms.memory(); | |
1771 if (phi->is_Phi() && phi->as_Phi()->region() == r) { | |
1772 assert(phi->req() == pnum, "must be same size as region"); | |
1773 phi->add_req(NULL); | |
1774 } | |
1775 } | |
1776 } else { | |
1777 if (n->is_Phi() && n->as_Phi()->region() == r) { | |
1778 assert(n->req() == pnum, "must be same size as region"); | |
1779 n->add_req(NULL); | |
1780 } | |
1781 } | |
1782 } | |
1783 | |
1784 return pnum; | |
1785 } | |
1786 | |
1787 //------------------------------ensure_phi------------------------------------- | |
1788 // Turn the idx'th entry of the current map into a Phi | |
1789 PhiNode *Parse::ensure_phi(int idx, bool nocreate) { | |
1790 SafePointNode* map = this->map(); | |
1791 Node* region = map->control(); | |
1792 assert(region->is_Region(), ""); | |
1793 | |
1794 Node* o = map->in(idx); | |
1795 assert(o != NULL, ""); | |
1796 | |
1797 if (o == top()) return NULL; // TOP always merges into TOP | |
1798 | |
1799 if (o->is_Phi() && o->as_Phi()->region() == region) { | |
1800 return o->as_Phi(); | |
1801 } | |
1802 | |
1803 // Now use a Phi here for merging | |
1804 assert(!nocreate, "Cannot build a phi for a block already parsed."); | |
1805 const JVMState* jvms = map->jvms(); | |
1806 const Type* t; | |
1807 if (jvms->is_loc(idx)) { | |
1808 t = block()->local_type_at(idx - jvms->locoff()); | |
1809 } else if (jvms->is_stk(idx)) { | |
1810 t = block()->stack_type_at(idx - jvms->stkoff()); | |
1811 } else if (jvms->is_mon(idx)) { | |
1812 assert(!jvms->is_monitor_box(idx), "no phis for boxes"); | |
1813 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object | |
1814 } else if ((uint)idx < TypeFunc::Parms) { | |
1815 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like. | |
1816 } else { | |
1817 assert(false, "no type information for this phi"); | |
1818 } | |
1819 | |
1820 // If the type falls to bottom, then this must be a local that | |
1821 // is mixing ints and oops or some such. Forcing it to top | |
1822 // makes it go dead. | |
1823 if (t == Type::BOTTOM) { | |
1824 map->set_req(idx, top()); | |
1825 return NULL; | |
1826 } | |
1827 | |
1828 // Do not create phis for top either. | |
1829 // A top on a non-null control flow must be an unused even after the.phi. | |
1830 if (t == Type::TOP || t == Type::HALF) { | |
1831 map->set_req(idx, top()); | |
1832 return NULL; | |
1833 } | |
1834 | |
1835 PhiNode* phi = PhiNode::make(region, o, t); | |
1836 gvn().set_type(phi, t); | |
38
b789bcaf2dd9
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
0
diff
changeset
|
1837 if (C->do_escape_analysis()) record_for_igvn(phi); |
0 | 1838 map->set_req(idx, phi); |
1839 return phi; | |
1840 } | |
1841 | |
1842 //--------------------------ensure_memory_phi---------------------------------- | |
1843 // Turn the idx'th slice of the current memory into a Phi | |
1844 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) { | |
1845 MergeMemNode* mem = merged_memory(); | |
1846 Node* region = control(); | |
1847 assert(region->is_Region(), ""); | |
1848 | |
1849 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx); | |
1850 assert(o != NULL && o != top(), ""); | |
1851 | |
1852 PhiNode* phi; | |
1853 if (o->is_Phi() && o->as_Phi()->region() == region) { | |
1854 phi = o->as_Phi(); | |
1855 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) { | |
1856 // clone the shared base memory phi to make a new memory split | |
1857 assert(!nocreate, "Cannot build a phi for a block already parsed."); | |
1858 const Type* t = phi->bottom_type(); | |
1859 const TypePtr* adr_type = C->get_adr_type(idx); | |
1860 phi = phi->slice_memory(adr_type); | |
1861 gvn().set_type(phi, t); | |
1862 } | |
1863 return phi; | |
1864 } | |
1865 | |
1866 // Now use a Phi here for merging | |
1867 assert(!nocreate, "Cannot build a phi for a block already parsed."); | |
1868 const Type* t = o->bottom_type(); | |
1869 const TypePtr* adr_type = C->get_adr_type(idx); | |
1870 phi = PhiNode::make(region, o, t, adr_type); | |
1871 gvn().set_type(phi, t); | |
1872 if (idx == Compile::AliasIdxBot) | |
1873 mem->set_base_memory(phi); | |
1874 else | |
1875 mem->set_memory_at(idx, phi); | |
1876 return phi; | |
1877 } | |
1878 | |
1879 //------------------------------call_register_finalizer----------------------- | |
1880 // Check the klass of the receiver and call register_finalizer if the | |
1881 // class need finalization. | |
1882 void Parse::call_register_finalizer() { | |
1883 Node* receiver = local(0); | |
1884 assert(receiver != NULL && receiver->bottom_type()->isa_instptr() != NULL, | |
1885 "must have non-null instance type"); | |
1886 | |
1887 const TypeInstPtr *tinst = receiver->bottom_type()->isa_instptr(); | |
1888 if (tinst != NULL && tinst->klass()->is_loaded() && !tinst->klass_is_exact()) { | |
1889 // The type isn't known exactly so see if CHA tells us anything. | |
1890 ciInstanceKlass* ik = tinst->klass()->as_instance_klass(); | |
1891 if (!Dependencies::has_finalizable_subclass(ik)) { | |
1892 // No finalizable subclasses so skip the dynamic check. | |
1893 C->dependencies()->assert_has_no_finalizable_subclasses(ik); | |
1894 return; | |
1895 } | |
1896 } | |
1897 | |
1898 // Insert a dynamic test for whether the instance needs | |
1899 // finalization. In general this will fold up since the concrete | |
1900 // class is often visible so the access flags are constant. | |
1901 Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() ); | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
38
diff
changeset
|
1902 Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) ); |
0 | 1903 |
1904 Node* access_flags_addr = basic_plus_adr(klass, klass, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)); | |
1905 Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT); | |
1906 | |
1907 Node* mask = _gvn.transform(new (C, 3) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER))); | |
1908 Node* check = _gvn.transform(new (C, 3) CmpINode(mask, intcon(0))); | |
1909 Node* test = _gvn.transform(new (C, 2) BoolNode(check, BoolTest::ne)); | |
1910 | |
1911 IfNode* iff = create_and_map_if(control(), test, PROB_MAX, COUNT_UNKNOWN); | |
1912 | |
1913 RegionNode* result_rgn = new (C, 3) RegionNode(3); | |
1914 record_for_igvn(result_rgn); | |
1915 | |
1916 Node *skip_register = _gvn.transform(new (C, 1) IfFalseNode(iff)); | |
1917 result_rgn->init_req(1, skip_register); | |
1918 | |
1919 Node *needs_register = _gvn.transform(new (C, 1) IfTrueNode(iff)); | |
1920 set_control(needs_register); | |
1921 if (stopped()) { | |
1922 // There is no slow path. | |
1923 result_rgn->init_req(2, top()); | |
1924 } else { | |
1925 Node *call = make_runtime_call(RC_NO_LEAF, | |
1926 OptoRuntime::register_finalizer_Type(), | |
1927 OptoRuntime::register_finalizer_Java(), | |
1928 NULL, TypePtr::BOTTOM, | |
1929 receiver); | |
1930 make_slow_call_ex(call, env()->Throwable_klass(), true); | |
1931 | |
1932 Node* fast_io = call->in(TypeFunc::I_O); | |
1933 Node* fast_mem = call->in(TypeFunc::Memory); | |
1934 // These two phis are pre-filled with copies of of the fast IO and Memory | |
1935 Node* io_phi = PhiNode::make(result_rgn, fast_io, Type::ABIO); | |
1936 Node* mem_phi = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM); | |
1937 | |
1938 result_rgn->init_req(2, control()); | |
1939 io_phi ->init_req(2, i_o()); | |
1940 mem_phi ->init_req(2, reset_memory()); | |
1941 | |
1942 set_all_memory( _gvn.transform(mem_phi) ); | |
1943 set_i_o( _gvn.transform(io_phi) ); | |
1944 } | |
1945 | |
1946 set_control( _gvn.transform(result_rgn) ); | |
1947 } | |
1948 | |
1949 //------------------------------return_current--------------------------------- | |
1950 // Append current _map to _exit_return | |
1951 void Parse::return_current(Node* value) { | |
1952 if (RegisterFinalizersAtInit && | |
1953 method()->intrinsic_id() == vmIntrinsics::_Object_init) { | |
1954 call_register_finalizer(); | |
1955 } | |
1956 | |
1957 // Do not set_parse_bci, so that return goo is credited to the return insn. | |
1958 set_bci(InvocationEntryBci); | |
1959 if (method()->is_synchronized() && GenerateSynchronizationCode) { | |
1960 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node()); | |
1961 } | |
1962 if (DTraceMethodProbes) { | |
1963 make_dtrace_method_exit(method()); | |
1964 } | |
1965 SafePointNode* exit_return = _exits.map(); | |
1966 exit_return->in( TypeFunc::Control )->add_req( control() ); | |
1967 exit_return->in( TypeFunc::I_O )->add_req( i_o () ); | |
1968 Node *mem = exit_return->in( TypeFunc::Memory ); | |
1969 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) { | |
1970 if (mms.is_empty()) { | |
1971 // get a copy of the base memory, and patch just this one input | |
1972 const TypePtr* adr_type = mms.adr_type(C); | |
1973 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type); | |
1974 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), ""); | |
1975 gvn().set_type_bottom(phi); | |
1976 phi->del_req(phi->req()-1); // prepare to re-patch | |
1977 mms.set_memory(phi); | |
1978 } | |
1979 mms.memory()->add_req(mms.memory2()); | |
1980 } | |
1981 | |
1982 // frame pointer is always same, already captured | |
1983 if (value != NULL) { | |
1984 // If returning oops to an interface-return, there is a silent free | |
1985 // cast from oop to interface allowed by the Verifier. Make it explicit | |
1986 // here. | |
1987 Node* phi = _exits.argument(0); | |
1988 const TypeInstPtr *tr = phi->bottom_type()->isa_instptr(); | |
1989 if( tr && tr->klass()->is_loaded() && | |
1990 tr->klass()->is_interface() ) { | |
1991 const TypeInstPtr *tp = value->bottom_type()->isa_instptr(); | |
1992 if (tp && tp->klass()->is_loaded() && | |
1993 !tp->klass()->is_interface()) { | |
1994 // sharpen the type eagerly; this eases certain assert checking | |
1995 if (tp->higher_equal(TypeInstPtr::NOTNULL)) | |
1996 tr = tr->join(TypeInstPtr::NOTNULL)->is_instptr(); | |
1997 value = _gvn.transform(new (C, 2) CheckCastPPNode(0,value,tr)); | |
1998 } | |
1999 } | |
2000 phi->add_req(value); | |
2001 } | |
2002 | |
2003 stop_and_kill_map(); // This CFG path dies here | |
2004 } | |
2005 | |
2006 | |
2007 //------------------------------add_safepoint---------------------------------- | |
2008 void Parse::add_safepoint() { | |
2009 // See if we can avoid this safepoint. No need for a SafePoint immediately | |
2010 // after a Call (except Leaf Call) or another SafePoint. | |
2011 Node *proj = control(); | |
2012 bool add_poll_param = SafePointNode::needs_polling_address_input(); | |
2013 uint parms = add_poll_param ? TypeFunc::Parms+1 : TypeFunc::Parms; | |
2014 if( proj->is_Proj() ) { | |
2015 Node *n0 = proj->in(0); | |
2016 if( n0->is_Catch() ) { | |
2017 n0 = n0->in(0)->in(0); | |
2018 assert( n0->is_Call(), "expect a call here" ); | |
2019 } | |
2020 if( n0->is_Call() ) { | |
2021 if( n0->as_Call()->guaranteed_safepoint() ) | |
2022 return; | |
2023 } else if( n0->is_SafePoint() && n0->req() >= parms ) { | |
2024 return; | |
2025 } | |
2026 } | |
2027 | |
2028 // Clear out dead values from the debug info. | |
2029 kill_dead_locals(); | |
2030 | |
2031 // Clone the JVM State | |
2032 SafePointNode *sfpnt = new (C, parms) SafePointNode(parms, NULL); | |
2033 | |
2034 // Capture memory state BEFORE a SafePoint. Since we can block at a | |
2035 // SafePoint we need our GC state to be safe; i.e. we need all our current | |
2036 // write barriers (card marks) to not float down after the SafePoint so we | |
2037 // must read raw memory. Likewise we need all oop stores to match the card | |
2038 // marks. If deopt can happen, we need ALL stores (we need the correct JVM | |
2039 // state on a deopt). | |
2040 | |
2041 // We do not need to WRITE the memory state after a SafePoint. The control | |
2042 // edge will keep card-marks and oop-stores from floating up from below a | |
2043 // SafePoint and our true dependency added here will keep them from floating | |
2044 // down below a SafePoint. | |
2045 | |
2046 // Clone the current memory state | |
2047 Node* mem = MergeMemNode::make(C, map()->memory()); | |
2048 | |
2049 mem = _gvn.transform(mem); | |
2050 | |
2051 // Pass control through the safepoint | |
2052 sfpnt->init_req(TypeFunc::Control , control()); | |
2053 // Fix edges normally used by a call | |
2054 sfpnt->init_req(TypeFunc::I_O , top() ); | |
2055 sfpnt->init_req(TypeFunc::Memory , mem ); | |
2056 sfpnt->init_req(TypeFunc::ReturnAdr, top() ); | |
2057 sfpnt->init_req(TypeFunc::FramePtr , top() ); | |
2058 | |
2059 // Create a node for the polling address | |
2060 if( add_poll_param ) { | |
2061 Node *polladr = ConPNode::make(C, (address)os::get_polling_page()); | |
2062 sfpnt->init_req(TypeFunc::Parms+0, _gvn.transform(polladr)); | |
2063 } | |
2064 | |
2065 // Fix up the JVM State edges | |
2066 add_safepoint_edges(sfpnt); | |
2067 Node *transformed_sfpnt = _gvn.transform(sfpnt); | |
2068 set_control(transformed_sfpnt); | |
2069 | |
2070 // Provide an edge from root to safepoint. This makes the safepoint | |
2071 // appear useful until the parse has completed. | |
2072 if( OptoRemoveUseless && transformed_sfpnt->is_SafePoint() ) { | |
2073 assert(C->root() != NULL, "Expect parse is still valid"); | |
2074 C->root()->add_prec(transformed_sfpnt); | |
2075 } | |
2076 } | |
2077 | |
2078 #ifndef PRODUCT | |
2079 //------------------------show_parse_info-------------------------------------- | |
2080 void Parse::show_parse_info() { | |
2081 InlineTree* ilt = NULL; | |
2082 if (C->ilt() != NULL) { | |
2083 JVMState* caller_jvms = is_osr_parse() ? caller()->caller() : caller(); | |
2084 ilt = InlineTree::find_subtree_from_root(C->ilt(), caller_jvms, method()); | |
2085 } | |
2086 if (PrintCompilation && Verbose) { | |
2087 if (depth() == 1) { | |
2088 if( ilt->count_inlines() ) { | |
2089 tty->print(" __inlined %d (%d bytes)", ilt->count_inlines(), | |
2090 ilt->count_inline_bcs()); | |
2091 tty->cr(); | |
2092 } | |
2093 } else { | |
2094 if (method()->is_synchronized()) tty->print("s"); | |
2095 if (method()->has_exception_handlers()) tty->print("!"); | |
2096 // Check this is not the final compiled version | |
2097 if (C->trap_can_recompile()) { | |
2098 tty->print("-"); | |
2099 } else { | |
2100 tty->print(" "); | |
2101 } | |
2102 method()->print_short_name(); | |
2103 if (is_osr_parse()) { | |
2104 tty->print(" @ %d", osr_bci()); | |
2105 } | |
2106 tty->print(" (%d bytes)",method()->code_size()); | |
2107 if (ilt->count_inlines()) { | |
2108 tty->print(" __inlined %d (%d bytes)", ilt->count_inlines(), | |
2109 ilt->count_inline_bcs()); | |
2110 } | |
2111 tty->cr(); | |
2112 } | |
2113 } | |
2114 if (PrintOpto && (depth() == 1 || PrintOptoInlining)) { | |
2115 // Print that we succeeded; suppress this message on the first osr parse. | |
2116 | |
2117 if (method()->is_synchronized()) tty->print("s"); | |
2118 if (method()->has_exception_handlers()) tty->print("!"); | |
2119 // Check this is not the final compiled version | |
2120 if (C->trap_can_recompile() && depth() == 1) { | |
2121 tty->print("-"); | |
2122 } else { | |
2123 tty->print(" "); | |
2124 } | |
2125 if( depth() != 1 ) { tty->print(" "); } // missing compile count | |
2126 for (int i = 1; i < depth(); ++i) { tty->print(" "); } | |
2127 method()->print_short_name(); | |
2128 if (is_osr_parse()) { | |
2129 tty->print(" @ %d", osr_bci()); | |
2130 } | |
2131 if (ilt->caller_bci() != -1) { | |
2132 tty->print(" @ %d", ilt->caller_bci()); | |
2133 } | |
2134 tty->print(" (%d bytes)",method()->code_size()); | |
2135 if (ilt->count_inlines()) { | |
2136 tty->print(" __inlined %d (%d bytes)", ilt->count_inlines(), | |
2137 ilt->count_inline_bcs()); | |
2138 } | |
2139 tty->cr(); | |
2140 } | |
2141 } | |
2142 | |
2143 | |
2144 //------------------------------dump------------------------------------------- | |
2145 // Dump information associated with the bytecodes of current _method | |
2146 void Parse::dump() { | |
2147 if( method() != NULL ) { | |
2148 // Iterate over bytecodes | |
2149 ciBytecodeStream iter(method()); | |
2150 for( Bytecodes::Code bc = iter.next(); bc != ciBytecodeStream::EOBC() ; bc = iter.next() ) { | |
2151 dump_bci( iter.cur_bci() ); | |
2152 tty->cr(); | |
2153 } | |
2154 } | |
2155 } | |
2156 | |
2157 // Dump information associated with a byte code index, 'bci' | |
2158 void Parse::dump_bci(int bci) { | |
2159 // Output info on merge-points, cloning, and within _jsr..._ret | |
2160 // NYI | |
2161 tty->print(" bci:%d", bci); | |
2162 } | |
2163 | |
2164 #endif |