Mercurial > hg > truffle
annotate src/share/vm/opto/callGenerator.cpp @ 452:00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
Summary: When we encounter marking stack overflow during precleaning of Reference lists, we were using the overflow list mechanism, which can cause problems on account of mutating the mark word of the header because of conflicts with mutator accesses and updates of that field. Instead we should use the usual mechanism for overflow handling in concurrent phases, namely dirtying of the card on which the overflowed object lies. Since precleaning effectively does a form of discovered list processing, albeit with discovery enabled, we needed to adjust some code to be correct in the face of interleaved processing and discovery.
Reviewed-by: apetrusenko, jcoomes
author | ysr |
---|---|
date | Thu, 20 Nov 2008 12:27:41 -0800 |
parents | 9ee9cf798b59 |
children | 7c57aead6d3e |
rev | line source |
---|---|
0 | 1 /* |
337 | 2 * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_callGenerator.cpp.incl" | |
27 | |
28 CallGenerator::CallGenerator(ciMethod* method) { | |
29 _method = method; | |
30 } | |
31 | |
32 // Utility function. | |
33 const TypeFunc* CallGenerator::tf() const { | |
34 return TypeFunc::make(method()); | |
35 } | |
36 | |
37 //-----------------------------ParseGenerator--------------------------------- | |
38 // Internal class which handles all direct bytecode traversal. | |
39 class ParseGenerator : public InlineCallGenerator { | |
40 private: | |
41 bool _is_osr; | |
42 float _expected_uses; | |
43 | |
44 public: | |
45 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) | |
46 : InlineCallGenerator(method) | |
47 { | |
48 _is_osr = is_osr; | |
49 _expected_uses = expected_uses; | |
50 assert(can_parse(method, is_osr), "parse must be possible"); | |
51 } | |
52 | |
53 // Can we build either an OSR or a regular parser for this method? | |
54 static bool can_parse(ciMethod* method, int is_osr = false); | |
55 | |
56 virtual bool is_parse() const { return true; } | |
57 virtual JVMState* generate(JVMState* jvms); | |
58 int is_osr() { return _is_osr; } | |
59 | |
60 }; | |
61 | |
62 JVMState* ParseGenerator::generate(JVMState* jvms) { | |
63 Compile* C = Compile::current(); | |
64 | |
65 if (is_osr()) { | |
66 // The JVMS for a OSR has a single argument (see its TypeFunc). | |
67 assert(jvms->depth() == 1, "no inline OSR"); | |
68 } | |
69 | |
70 if (C->failing()) { | |
71 return NULL; // bailing out of the compile; do not try to parse | |
72 } | |
73 | |
74 Parse parser(jvms, method(), _expected_uses); | |
75 // Grab signature for matching/allocation | |
76 #ifdef ASSERT | |
77 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { | |
78 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); | |
79 assert(C->env()->system_dictionary_modification_counter_changed(), | |
80 "Must invalidate if TypeFuncs differ"); | |
81 } | |
82 #endif | |
83 | |
84 GraphKit& exits = parser.exits(); | |
85 | |
86 if (C->failing()) { | |
87 while (exits.pop_exception_state() != NULL) ; | |
88 return NULL; | |
89 } | |
90 | |
91 assert(exits.jvms()->same_calls_as(jvms), "sanity"); | |
92 | |
93 // Simply return the exit state of the parser, | |
94 // augmented by any exceptional states. | |
95 return exits.transfer_exceptions_into_jvms(); | |
96 } | |
97 | |
98 //---------------------------DirectCallGenerator------------------------------ | |
99 // Internal class which handles all out-of-line calls w/o receiver type checks. | |
100 class DirectCallGenerator : public CallGenerator { | |
101 public: | |
102 DirectCallGenerator(ciMethod* method) | |
103 : CallGenerator(method) | |
104 { | |
105 } | |
106 virtual JVMState* generate(JVMState* jvms); | |
107 }; | |
108 | |
109 JVMState* DirectCallGenerator::generate(JVMState* jvms) { | |
110 GraphKit kit(jvms); | |
111 bool is_static = method()->is_static(); | |
112 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() | |
113 : SharedRuntime::get_resolve_opt_virtual_call_stub(); | |
114 | |
115 if (kit.C->log() != NULL) { | |
116 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); | |
117 } | |
118 | |
119 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), target, method(), kit.bci()); | |
120 if (!is_static) { | |
121 // Make an explicit receiver null_check as part of this call. | |
122 // Since we share a map with the caller, his JVMS gets adjusted. | |
123 kit.null_check_receiver(method()); | |
124 if (kit.stopped()) { | |
125 // And dump it back to the caller, decorated with any exceptions: | |
126 return kit.transfer_exceptions_into_jvms(); | |
127 } | |
128 // Mark the call node as virtual, sort of: | |
129 call->set_optimized_virtual(true); | |
130 } | |
131 kit.set_arguments_for_java_call(call); | |
132 kit.set_edges_for_java_call(call); | |
133 Node* ret = kit.set_results_for_java_call(call); | |
134 kit.push_node(method()->return_type()->basic_type(), ret); | |
135 return kit.transfer_exceptions_into_jvms(); | |
136 } | |
137 | |
138 class VirtualCallGenerator : public CallGenerator { | |
139 private: | |
140 int _vtable_index; | |
141 public: | |
142 VirtualCallGenerator(ciMethod* method, int vtable_index) | |
143 : CallGenerator(method), _vtable_index(vtable_index) | |
144 { | |
145 assert(vtable_index == methodOopDesc::invalid_vtable_index || | |
146 vtable_index >= 0, "either invalid or usable"); | |
147 } | |
148 virtual bool is_virtual() const { return true; } | |
149 virtual JVMState* generate(JVMState* jvms); | |
150 }; | |
151 | |
152 //--------------------------VirtualCallGenerator------------------------------ | |
153 // Internal class which handles all out-of-line calls checking receiver type. | |
154 JVMState* VirtualCallGenerator::generate(JVMState* jvms) { | |
155 GraphKit kit(jvms); | |
156 Node* receiver = kit.argument(0); | |
157 | |
158 if (kit.C->log() != NULL) { | |
159 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); | |
160 } | |
161 | |
162 // If the receiver is a constant null, do not torture the system | |
163 // by attempting to call through it. The compile will proceed | |
164 // correctly, but may bail out in final_graph_reshaping, because | |
165 // the call instruction will have a seemingly deficient out-count. | |
166 // (The bailout says something misleading about an "infinite loop".) | |
167 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { | |
168 kit.inc_sp(method()->arg_size()); // restore arguments | |
169 kit.uncommon_trap(Deoptimization::Reason_null_check, | |
170 Deoptimization::Action_none, | |
171 NULL, "null receiver"); | |
172 return kit.transfer_exceptions_into_jvms(); | |
173 } | |
174 | |
175 // Ideally we would unconditionally do a null check here and let it | |
176 // be converted to an implicit check based on profile information. | |
177 // However currently the conversion to implicit null checks in | |
178 // Block::implicit_null_check() only looks for loads and stores, not calls. | |
179 ciMethod *caller = kit.method(); | |
180 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); | |
181 if (!UseInlineCaches || !ImplicitNullChecks || | |
182 ((ImplicitNullCheckThreshold > 0) && caller_md && | |
183 (caller_md->trap_count(Deoptimization::Reason_null_check) | |
184 >= (uint)ImplicitNullCheckThreshold))) { | |
185 // Make an explicit receiver null_check as part of this call. | |
186 // Since we share a map with the caller, his JVMS gets adjusted. | |
187 receiver = kit.null_check_receiver(method()); | |
188 if (kit.stopped()) { | |
189 // And dump it back to the caller, decorated with any exceptions: | |
190 return kit.transfer_exceptions_into_jvms(); | |
191 } | |
192 } | |
193 | |
194 assert(!method()->is_static(), "virtual call must not be to static"); | |
195 assert(!method()->is_final(), "virtual call should not be to final"); | |
196 assert(!method()->is_private(), "virtual call should not be to private"); | |
197 assert(_vtable_index == methodOopDesc::invalid_vtable_index || !UseInlineCaches, | |
198 "no vtable calls if +UseInlineCaches "); | |
199 address target = SharedRuntime::get_resolve_virtual_call_stub(); | |
200 // Normal inline cache used for call | |
201 CallDynamicJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); | |
202 kit.set_arguments_for_java_call(call); | |
203 kit.set_edges_for_java_call(call); | |
204 Node* ret = kit.set_results_for_java_call(call); | |
205 kit.push_node(method()->return_type()->basic_type(), ret); | |
206 | |
207 // Represent the effect of an implicit receiver null_check | |
208 // as part of this call. Since we share a map with the caller, | |
209 // his JVMS gets adjusted. | |
210 kit.cast_not_null(receiver); | |
211 return kit.transfer_exceptions_into_jvms(); | |
212 } | |
213 | |
214 bool ParseGenerator::can_parse(ciMethod* m, int entry_bci) { | |
215 // Certain methods cannot be parsed at all: | |
216 if (!m->can_be_compiled()) return false; | |
217 if (!m->has_balanced_monitors()) return false; | |
218 if (m->get_flow_analysis()->failing()) return false; | |
219 | |
220 // (Methods may bail out for other reasons, after the parser is run. | |
221 // We try to avoid this, but if forced, we must return (Node*)NULL. | |
222 // The user of the CallGenerator must check for this condition.) | |
223 return true; | |
224 } | |
225 | |
226 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { | |
227 if (!ParseGenerator::can_parse(m)) return NULL; | |
228 return new ParseGenerator(m, expected_uses); | |
229 } | |
230 | |
231 // As a special case, the JVMS passed to this CallGenerator is | |
232 // for the method execution already in progress, not just the JVMS | |
233 // of the caller. Thus, this CallGenerator cannot be mixed with others! | |
234 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { | |
235 if (!ParseGenerator::can_parse(m, true)) return NULL; | |
236 float past_uses = m->interpreter_invocation_count(); | |
237 float expected_uses = past_uses; | |
238 return new ParseGenerator(m, expected_uses, true); | |
239 } | |
240 | |
241 CallGenerator* CallGenerator::for_direct_call(ciMethod* m) { | |
242 assert(!m->is_abstract(), "for_direct_call mismatch"); | |
243 return new DirectCallGenerator(m); | |
244 } | |
245 | |
246 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { | |
247 assert(!m->is_static(), "for_virtual_call mismatch"); | |
248 return new VirtualCallGenerator(m, vtable_index); | |
249 } | |
250 | |
251 | |
252 //---------------------------WarmCallGenerator-------------------------------- | |
253 // Internal class which handles initial deferral of inlining decisions. | |
254 class WarmCallGenerator : public CallGenerator { | |
255 WarmCallInfo* _call_info; | |
256 CallGenerator* _if_cold; | |
257 CallGenerator* _if_hot; | |
258 bool _is_virtual; // caches virtuality of if_cold | |
259 bool _is_inline; // caches inline-ness of if_hot | |
260 | |
261 public: | |
262 WarmCallGenerator(WarmCallInfo* ci, | |
263 CallGenerator* if_cold, | |
264 CallGenerator* if_hot) | |
265 : CallGenerator(if_cold->method()) | |
266 { | |
267 assert(method() == if_hot->method(), "consistent choices"); | |
268 _call_info = ci; | |
269 _if_cold = if_cold; | |
270 _if_hot = if_hot; | |
271 _is_virtual = if_cold->is_virtual(); | |
272 _is_inline = if_hot->is_inline(); | |
273 } | |
274 | |
275 virtual bool is_inline() const { return _is_inline; } | |
276 virtual bool is_virtual() const { return _is_virtual; } | |
277 virtual bool is_deferred() const { return true; } | |
278 | |
279 virtual JVMState* generate(JVMState* jvms); | |
280 }; | |
281 | |
282 | |
283 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, | |
284 CallGenerator* if_cold, | |
285 CallGenerator* if_hot) { | |
286 return new WarmCallGenerator(ci, if_cold, if_hot); | |
287 } | |
288 | |
289 JVMState* WarmCallGenerator::generate(JVMState* jvms) { | |
290 Compile* C = Compile::current(); | |
291 if (C->log() != NULL) { | |
292 C->log()->elem("warm_call bci='%d'", jvms->bci()); | |
293 } | |
294 jvms = _if_cold->generate(jvms); | |
295 if (jvms != NULL) { | |
296 Node* m = jvms->map()->control(); | |
297 if (m->is_CatchProj()) m = m->in(0); else m = C->top(); | |
298 if (m->is_Catch()) m = m->in(0); else m = C->top(); | |
299 if (m->is_Proj()) m = m->in(0); else m = C->top(); | |
300 if (m->is_CallJava()) { | |
301 _call_info->set_call(m->as_Call()); | |
302 _call_info->set_hot_cg(_if_hot); | |
303 #ifndef PRODUCT | |
304 if (PrintOpto || PrintOptoInlining) { | |
305 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci()); | |
306 tty->print("WCI: "); | |
307 _call_info->print(); | |
308 } | |
309 #endif | |
310 _call_info->set_heat(_call_info->compute_heat()); | |
311 C->set_warm_calls(_call_info->insert_into(C->warm_calls())); | |
312 } | |
313 } | |
314 return jvms; | |
315 } | |
316 | |
317 void WarmCallInfo::make_hot() { | |
318 Compile* C = Compile::current(); | |
319 // Replace the callnode with something better. | |
320 CallJavaNode* call = this->call()->as_CallJava(); | |
321 ciMethod* method = call->method(); | |
322 int nargs = method->arg_size(); | |
323 JVMState* jvms = call->jvms()->clone_shallow(C); | |
324 uint size = TypeFunc::Parms + MAX2(2, nargs); | |
325 SafePointNode* map = new (C, size) SafePointNode(size, jvms); | |
326 for (uint i1 = 0; i1 < (uint)(TypeFunc::Parms + nargs); i1++) { | |
327 map->init_req(i1, call->in(i1)); | |
328 } | |
329 jvms->set_map(map); | |
330 jvms->set_offsets(map->req()); | |
331 jvms->set_locoff(TypeFunc::Parms); | |
332 jvms->set_stkoff(TypeFunc::Parms); | |
333 GraphKit kit(jvms); | |
334 | |
335 JVMState* new_jvms = _hot_cg->generate(kit.jvms()); | |
336 if (new_jvms == NULL) return; // no change | |
337 if (C->failing()) return; | |
338 | |
339 kit.set_jvms(new_jvms); | |
340 Node* res = C->top(); | |
341 int res_size = method->return_type()->size(); | |
342 if (res_size != 0) { | |
343 kit.inc_sp(-res_size); | |
344 res = kit.argument(0); | |
345 } | |
346 GraphKit ekit(kit.combine_and_pop_all_exception_states()->jvms()); | |
347 | |
348 // Replace the call: | |
349 for (DUIterator i = call->outs(); call->has_out(i); i++) { | |
350 Node* n = call->out(i); | |
351 Node* nn = NULL; // replacement | |
352 if (n->is_Proj()) { | |
353 ProjNode* nproj = n->as_Proj(); | |
354 assert(nproj->_con < (uint)(TypeFunc::Parms + (res_size ? 1 : 0)), "sane proj"); | |
355 if (nproj->_con == TypeFunc::Parms) { | |
356 nn = res; | |
357 } else { | |
358 nn = kit.map()->in(nproj->_con); | |
359 } | |
360 if (nproj->_con == TypeFunc::I_O) { | |
361 for (DUIterator j = nproj->outs(); nproj->has_out(j); j++) { | |
362 Node* e = nproj->out(j); | |
363 if (e->Opcode() == Op_CreateEx) { | |
364 e->replace_by(ekit.argument(0)); | |
365 } else if (e->Opcode() == Op_Catch) { | |
366 for (DUIterator k = e->outs(); e->has_out(k); k++) { | |
367 CatchProjNode* p = e->out(j)->as_CatchProj(); | |
368 if (p->is_handler_proj()) { | |
369 p->replace_by(ekit.control()); | |
370 } else { | |
371 p->replace_by(kit.control()); | |
372 } | |
373 } | |
374 } | |
375 } | |
376 } | |
377 } | |
378 NOT_PRODUCT(if (!nn) n->dump(2)); | |
379 assert(nn != NULL, "don't know what to do with this user"); | |
380 n->replace_by(nn); | |
381 } | |
382 } | |
383 | |
384 void WarmCallInfo::make_cold() { | |
385 // No action: Just dequeue. | |
386 } | |
387 | |
388 | |
389 //------------------------PredictedCallGenerator------------------------------ | |
390 // Internal class which handles all out-of-line calls checking receiver type. | |
391 class PredictedCallGenerator : public CallGenerator { | |
392 ciKlass* _predicted_receiver; | |
393 CallGenerator* _if_missed; | |
394 CallGenerator* _if_hit; | |
395 float _hit_prob; | |
396 | |
397 public: | |
398 PredictedCallGenerator(ciKlass* predicted_receiver, | |
399 CallGenerator* if_missed, | |
400 CallGenerator* if_hit, float hit_prob) | |
401 : CallGenerator(if_missed->method()) | |
402 { | |
403 // The call profile data may predict the hit_prob as extreme as 0 or 1. | |
404 // Remove the extremes values from the range. | |
405 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; | |
406 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; | |
407 | |
408 _predicted_receiver = predicted_receiver; | |
409 _if_missed = if_missed; | |
410 _if_hit = if_hit; | |
411 _hit_prob = hit_prob; | |
412 } | |
413 | |
414 virtual bool is_virtual() const { return true; } | |
415 virtual bool is_inline() const { return _if_hit->is_inline(); } | |
416 virtual bool is_deferred() const { return _if_hit->is_deferred(); } | |
417 | |
418 virtual JVMState* generate(JVMState* jvms); | |
419 }; | |
420 | |
421 | |
422 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, | |
423 CallGenerator* if_missed, | |
424 CallGenerator* if_hit, | |
425 float hit_prob) { | |
426 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob); | |
427 } | |
428 | |
429 | |
430 JVMState* PredictedCallGenerator::generate(JVMState* jvms) { | |
431 GraphKit kit(jvms); | |
432 PhaseGVN& gvn = kit.gvn(); | |
433 // We need an explicit receiver null_check before checking its type. | |
434 // We share a map with the caller, so his JVMS gets adjusted. | |
435 Node* receiver = kit.argument(0); | |
436 | |
437 CompileLog* log = kit.C->log(); | |
438 if (log != NULL) { | |
439 log->elem("predicted_call bci='%d' klass='%d'", | |
440 jvms->bci(), log->identify(_predicted_receiver)); | |
441 } | |
442 | |
443 receiver = kit.null_check_receiver(method()); | |
444 if (kit.stopped()) { | |
445 return kit.transfer_exceptions_into_jvms(); | |
446 } | |
447 | |
448 Node* exact_receiver = receiver; // will get updated in place... | |
449 Node* slow_ctl = kit.type_check_receiver(receiver, | |
450 _predicted_receiver, _hit_prob, | |
451 &exact_receiver); | |
452 | |
453 SafePointNode* slow_map = NULL; | |
454 JVMState* slow_jvms; | |
455 { PreserveJVMState pjvms(&kit); | |
456 kit.set_control(slow_ctl); | |
457 if (!kit.stopped()) { | |
458 slow_jvms = _if_missed->generate(kit.sync_jvms()); | |
459 assert(slow_jvms != NULL, "miss path must not fail to generate"); | |
460 kit.add_exception_states_from(slow_jvms); | |
461 kit.set_map(slow_jvms->map()); | |
462 if (!kit.stopped()) | |
463 slow_map = kit.stop(); | |
464 } | |
465 } | |
466 | |
293
c3e045194476
6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")
kvn
parents:
0
diff
changeset
|
467 if (kit.stopped()) { |
c3e045194476
6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")
kvn
parents:
0
diff
changeset
|
468 // Instance exactly does not matches the desired type. |
c3e045194476
6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")
kvn
parents:
0
diff
changeset
|
469 kit.set_jvms(slow_jvms); |
c3e045194476
6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")
kvn
parents:
0
diff
changeset
|
470 return kit.transfer_exceptions_into_jvms(); |
c3e045194476
6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")
kvn
parents:
0
diff
changeset
|
471 } |
c3e045194476
6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")
kvn
parents:
0
diff
changeset
|
472 |
0 | 473 // fall through if the instance exactly matches the desired type |
474 kit.replace_in_map(receiver, exact_receiver); | |
475 | |
476 // Make the hot call: | |
477 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); | |
478 if (new_jvms == NULL) { | |
479 // Inline failed, so make a direct call. | |
480 assert(_if_hit->is_inline(), "must have been a failed inline"); | |
481 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); | |
482 new_jvms = cg->generate(kit.sync_jvms()); | |
483 } | |
484 kit.add_exception_states_from(new_jvms); | |
485 kit.set_jvms(new_jvms); | |
486 | |
487 // Need to merge slow and fast? | |
488 if (slow_map == NULL) { | |
489 // The fast path is the only path remaining. | |
490 return kit.transfer_exceptions_into_jvms(); | |
491 } | |
492 | |
493 if (kit.stopped()) { | |
494 // Inlined method threw an exception, so it's just the slow path after all. | |
495 kit.set_jvms(slow_jvms); | |
496 return kit.transfer_exceptions_into_jvms(); | |
497 } | |
498 | |
499 // Finish the diamond. | |
500 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization | |
501 RegionNode* region = new (kit.C, 3) RegionNode(3); | |
502 region->init_req(1, kit.control()); | |
503 region->init_req(2, slow_map->control()); | |
504 kit.set_control(gvn.transform(region)); | |
505 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); | |
506 iophi->set_req(2, slow_map->i_o()); | |
507 kit.set_i_o(gvn.transform(iophi)); | |
508 kit.merge_memory(slow_map->merged_memory(), region, 2); | |
509 uint tos = kit.jvms()->stkoff() + kit.sp(); | |
510 uint limit = slow_map->req(); | |
511 for (uint i = TypeFunc::Parms; i < limit; i++) { | |
512 // Skip unused stack slots; fast forward to monoff(); | |
513 if (i == tos) { | |
514 i = kit.jvms()->monoff(); | |
515 if( i >= limit ) break; | |
516 } | |
517 Node* m = kit.map()->in(i); | |
518 Node* n = slow_map->in(i); | |
519 if (m != n) { | |
520 const Type* t = gvn.type(m)->meet(gvn.type(n)); | |
521 Node* phi = PhiNode::make(region, m, t); | |
522 phi->set_req(2, n); | |
523 kit.map()->set_req(i, gvn.transform(phi)); | |
524 } | |
525 } | |
526 return kit.transfer_exceptions_into_jvms(); | |
527 } | |
528 | |
529 | |
530 //-------------------------UncommonTrapCallGenerator----------------------------- | |
531 // Internal class which handles all out-of-line calls checking receiver type. | |
532 class UncommonTrapCallGenerator : public CallGenerator { | |
533 Deoptimization::DeoptReason _reason; | |
534 Deoptimization::DeoptAction _action; | |
535 | |
536 public: | |
537 UncommonTrapCallGenerator(ciMethod* m, | |
538 Deoptimization::DeoptReason reason, | |
539 Deoptimization::DeoptAction action) | |
540 : CallGenerator(m) | |
541 { | |
542 _reason = reason; | |
543 _action = action; | |
544 } | |
545 | |
546 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } | |
547 virtual bool is_trap() const { return true; } | |
548 | |
549 virtual JVMState* generate(JVMState* jvms); | |
550 }; | |
551 | |
552 | |
553 CallGenerator* | |
554 CallGenerator::for_uncommon_trap(ciMethod* m, | |
555 Deoptimization::DeoptReason reason, | |
556 Deoptimization::DeoptAction action) { | |
557 return new UncommonTrapCallGenerator(m, reason, action); | |
558 } | |
559 | |
560 | |
561 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { | |
562 GraphKit kit(jvms); | |
563 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). | |
564 int nargs = method()->arg_size(); | |
565 kit.inc_sp(nargs); | |
566 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); | |
567 if (_reason == Deoptimization::Reason_class_check && | |
568 _action == Deoptimization::Action_maybe_recompile) { | |
569 // Temp fix for 6529811 | |
570 // Don't allow uncommon_trap to override our decision to recompile in the event | |
571 // of a class cast failure for a monomorphic call as it will never let us convert | |
572 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops | |
573 bool keep_exact_action = true; | |
574 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); | |
575 } else { | |
576 kit.uncommon_trap(_reason, _action); | |
577 } | |
578 return kit.transfer_exceptions_into_jvms(); | |
579 } | |
580 | |
581 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) | |
582 | |
583 // (Node: Merged hook_up_exits into ParseGenerator::generate.) | |
584 | |
585 #define NODES_OVERHEAD_PER_METHOD (30.0) | |
586 #define NODES_PER_BYTECODE (9.5) | |
587 | |
588 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { | |
589 int call_count = profile.count(); | |
590 int code_size = call_method->code_size(); | |
591 | |
592 // Expected execution count is based on the historical count: | |
593 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); | |
594 | |
595 // Expected profit from inlining, in units of simple call-overheads. | |
596 _profit = 1.0; | |
597 | |
598 // Expected work performed by the call in units of call-overheads. | |
599 // %%% need an empirical curve fit for "work" (time in call) | |
600 float bytecodes_per_call = 3; | |
601 _work = 1.0 + code_size / bytecodes_per_call; | |
602 | |
603 // Expected size of compilation graph: | |
604 // -XX:+PrintParseStatistics once reported: | |
605 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 | |
606 // Histogram of 144298 parsed bytecodes: | |
607 // %%% Need an better predictor for graph size. | |
608 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); | |
609 } | |
610 | |
611 // is_cold: Return true if the node should never be inlined. | |
612 // This is true if any of the key metrics are extreme. | |
613 bool WarmCallInfo::is_cold() const { | |
614 if (count() < WarmCallMinCount) return true; | |
615 if (profit() < WarmCallMinProfit) return true; | |
616 if (work() > WarmCallMaxWork) return true; | |
617 if (size() > WarmCallMaxSize) return true; | |
618 return false; | |
619 } | |
620 | |
621 // is_hot: Return true if the node should be inlined immediately. | |
622 // This is true if any of the key metrics are extreme. | |
623 bool WarmCallInfo::is_hot() const { | |
624 assert(!is_cold(), "eliminate is_cold cases before testing is_hot"); | |
625 if (count() >= HotCallCountThreshold) return true; | |
626 if (profit() >= HotCallProfitThreshold) return true; | |
627 if (work() <= HotCallTrivialWork) return true; | |
628 if (size() <= HotCallTrivialSize) return true; | |
629 return false; | |
630 } | |
631 | |
632 // compute_heat: | |
633 float WarmCallInfo::compute_heat() const { | |
634 assert(!is_cold(), "compute heat only on warm nodes"); | |
635 assert(!is_hot(), "compute heat only on warm nodes"); | |
636 int min_size = MAX2(0, (int)HotCallTrivialSize); | |
637 int max_size = MIN2(500, (int)WarmCallMaxSize); | |
638 float method_size = (size() - min_size) / MAX2(1, max_size - min_size); | |
639 float size_factor; | |
640 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg. | |
641 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg. | |
642 else if (method_size < 0.5) size_factor = 1; // better than avg. | |
643 else size_factor = 0.5; // worse than avg. | |
644 return (count() * profit() * size_factor); | |
645 } | |
646 | |
647 bool WarmCallInfo::warmer_than(WarmCallInfo* that) { | |
648 assert(this != that, "compare only different WCIs"); | |
649 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st"); | |
650 if (this->heat() > that->heat()) return true; | |
651 if (this->heat() < that->heat()) return false; | |
652 assert(this->heat() == that->heat(), "no NaN heat allowed"); | |
653 // Equal heat. Break the tie some other way. | |
654 if (!this->call() || !that->call()) return (address)this > (address)that; | |
655 return this->call()->_idx > that->call()->_idx; | |
656 } | |
657 | |
658 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress) | |
659 #define UNINIT_NEXT ((WarmCallInfo*)NULL) | |
660 | |
661 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) { | |
662 assert(next() == UNINIT_NEXT, "not yet on any list"); | |
663 WarmCallInfo* prev_p = NULL; | |
664 WarmCallInfo* next_p = head; | |
665 while (next_p != NULL && next_p->warmer_than(this)) { | |
666 prev_p = next_p; | |
667 next_p = prev_p->next(); | |
668 } | |
669 // Install this between prev_p and next_p. | |
670 this->set_next(next_p); | |
671 if (prev_p == NULL) | |
672 head = this; | |
673 else | |
674 prev_p->set_next(this); | |
675 return head; | |
676 } | |
677 | |
678 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) { | |
679 WarmCallInfo* prev_p = NULL; | |
680 WarmCallInfo* next_p = head; | |
681 while (next_p != this) { | |
682 assert(next_p != NULL, "this must be in the list somewhere"); | |
683 prev_p = next_p; | |
684 next_p = prev_p->next(); | |
685 } | |
686 next_p = this->next(); | |
687 debug_only(this->set_next(UNINIT_NEXT)); | |
688 // Remove this from between prev_p and next_p. | |
689 if (prev_p == NULL) | |
690 head = next_p; | |
691 else | |
692 prev_p->set_next(next_p); | |
693 return head; | |
694 } | |
695 | |
696 WarmCallInfo* WarmCallInfo::_always_hot = NULL; | |
697 WarmCallInfo* WarmCallInfo::_always_cold = NULL; | |
698 | |
699 WarmCallInfo* WarmCallInfo::always_hot() { | |
700 if (_always_hot == NULL) { | |
701 static double bits[sizeof(WarmCallInfo) / sizeof(double) + 1] = {0}; | |
702 WarmCallInfo* ci = (WarmCallInfo*) bits; | |
703 ci->_profit = ci->_count = MAX_VALUE(); | |
704 ci->_work = ci->_size = MIN_VALUE(); | |
705 _always_hot = ci; | |
706 } | |
707 assert(_always_hot->is_hot(), "must always be hot"); | |
708 return _always_hot; | |
709 } | |
710 | |
711 WarmCallInfo* WarmCallInfo::always_cold() { | |
712 if (_always_cold == NULL) { | |
713 static double bits[sizeof(WarmCallInfo) / sizeof(double) + 1] = {0}; | |
714 WarmCallInfo* ci = (WarmCallInfo*) bits; | |
715 ci->_profit = ci->_count = MIN_VALUE(); | |
716 ci->_work = ci->_size = MAX_VALUE(); | |
717 _always_cold = ci; | |
718 } | |
719 assert(_always_cold->is_cold(), "must always be cold"); | |
720 return _always_cold; | |
721 } | |
722 | |
723 | |
724 #ifndef PRODUCT | |
725 | |
726 void WarmCallInfo::print() const { | |
727 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p", | |
728 is_cold() ? "cold" : is_hot() ? "hot " : "warm", | |
729 count(), profit(), work(), size(), compute_heat(), next()); | |
730 tty->cr(); | |
731 if (call() != NULL) call()->dump(); | |
732 } | |
733 | |
734 void print_wci(WarmCallInfo* ci) { | |
735 ci->print(); | |
736 } | |
737 | |
738 void WarmCallInfo::print_all() const { | |
739 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) | |
740 p->print(); | |
741 } | |
742 | |
743 int WarmCallInfo::count_all() const { | |
744 int cnt = 0; | |
745 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) | |
746 cnt++; | |
747 return cnt; | |
748 } | |
749 | |
750 #endif //PRODUCT |