Mercurial > hg > truffle
annotate src/share/vm/opto/parseHelper.cpp @ 563:1b9fc6e3171b
6442502: assert(bits,"Use TypePtr for NULL") on linux-x86
Reviewed-by: kvn
author | never |
---|---|
date | Wed, 04 Feb 2009 23:17:38 -0800 |
parents | d1605aabd0a1 |
children | be93aad57795 |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_parseHelper.cpp.incl" | |
27 | |
28 //------------------------------make_dtrace_method_entry_exit ---------------- | |
29 // Dtrace -- record entry or exit of a method if compiled with dtrace support | |
30 void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) { | |
31 const TypeFunc *call_type = OptoRuntime::dtrace_method_entry_exit_Type(); | |
32 address call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) : | |
33 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit); | |
34 const char *call_name = is_entry ? "dtrace_method_entry" : "dtrace_method_exit"; | |
35 | |
36 // Get base of thread-local storage area | |
37 Node* thread = _gvn.transform( new (C, 1) ThreadLocalNode() ); | |
38 | |
39 // Get method | |
40 const TypeInstPtr* method_type = TypeInstPtr::make(TypePtr::Constant, method->klass(), true, method, 0); | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
0
diff
changeset
|
41 Node *method_node = _gvn.transform( ConNode::make(C, method_type) ); |
0 | 42 |
43 kill_dead_locals(); | |
44 | |
45 // For some reason, this call reads only raw memory. | |
46 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; | |
47 make_runtime_call(RC_LEAF | RC_NARROW_MEM, | |
48 call_type, call_address, | |
49 call_name, raw_adr_type, | |
50 thread, method_node); | |
51 } | |
52 | |
53 | |
54 //============================================================================= | |
55 //------------------------------do_checkcast----------------------------------- | |
56 void Parse::do_checkcast() { | |
57 bool will_link; | |
58 ciKlass* klass = iter().get_klass(will_link); | |
59 | |
60 Node *obj = peek(); | |
61 | |
62 // Throw uncommon trap if class is not loaded or the value we are casting | |
63 // _from_ is not loaded, and value is not null. If the value _is_ NULL, | |
64 // then the checkcast does nothing. | |
65 const TypeInstPtr *tp = _gvn.type(obj)->isa_instptr(); | |
66 if (!will_link || (tp && !tp->is_loaded())) { | |
67 if (C->log() != NULL) { | |
68 if (!will_link) { | |
69 C->log()->elem("assert_null reason='checkcast' klass='%d'", | |
70 C->log()->identify(klass)); | |
71 } | |
72 if (tp && !tp->is_loaded()) { | |
73 // %%% Cannot happen? | |
74 C->log()->elem("assert_null reason='checkcast source' klass='%d'", | |
75 C->log()->identify(tp->klass())); | |
76 } | |
77 } | |
78 do_null_assert(obj, T_OBJECT); | |
79 assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" ); | |
80 if (!stopped()) { | |
81 profile_null_checkcast(); | |
82 } | |
83 return; | |
84 } | |
85 | |
86 Node *res = gen_checkcast(obj, makecon(TypeKlassPtr::make(klass)) ); | |
87 | |
88 // Pop from stack AFTER gen_checkcast because it can uncommon trap and | |
89 // the debug info has to be correct. | |
90 pop(); | |
91 push(res); | |
92 } | |
93 | |
94 | |
95 //------------------------------do_instanceof---------------------------------- | |
96 void Parse::do_instanceof() { | |
97 if (stopped()) return; | |
98 // We would like to return false if class is not loaded, emitting a | |
99 // dependency, but Java requires instanceof to load its operand. | |
100 | |
101 // Throw uncommon trap if class is not loaded | |
102 bool will_link; | |
103 ciKlass* klass = iter().get_klass(will_link); | |
104 | |
105 if (!will_link) { | |
106 if (C->log() != NULL) { | |
107 C->log()->elem("assert_null reason='instanceof' klass='%d'", | |
108 C->log()->identify(klass)); | |
109 } | |
110 do_null_assert(peek(), T_OBJECT); | |
111 assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" ); | |
112 if (!stopped()) { | |
113 // The object is now known to be null. | |
114 // Shortcut the effect of gen_instanceof and return "false" directly. | |
115 pop(); // pop the null | |
116 push(_gvn.intcon(0)); // push false answer | |
117 } | |
118 return; | |
119 } | |
120 | |
121 // Push the bool result back on stack | |
122 push( gen_instanceof( pop(), makecon(TypeKlassPtr::make(klass)) ) ); | |
123 } | |
124 | |
125 //------------------------------array_store_check------------------------------ | |
126 // pull array from stack and check that the store is valid | |
127 void Parse::array_store_check() { | |
128 | |
129 // Shorthand access to array store elements | |
130 Node *obj = stack(_sp-1); | |
131 Node *idx = stack(_sp-2); | |
132 Node *ary = stack(_sp-3); | |
133 | |
134 if (_gvn.type(obj) == TypePtr::NULL_PTR) { | |
135 // There's never a type check on null values. | |
136 // This cutout lets us avoid the uncommon_trap(Reason_array_check) | |
137 // below, which turns into a performance liability if the | |
138 // gen_checkcast folds up completely. | |
139 return; | |
140 } | |
141 | |
142 // Extract the array klass type | |
143 int klass_offset = oopDesc::klass_offset_in_bytes(); | |
144 Node* p = basic_plus_adr( ary, ary, klass_offset ); | |
145 // p's type is array-of-OOPS plus klass_offset | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
0
diff
changeset
|
146 Node* array_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS) ); |
0 | 147 // Get the array klass |
148 const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr(); | |
149 | |
150 // array_klass's type is generally INexact array-of-oop. Heroically | |
151 // cast the array klass to EXACT array and uncommon-trap if the cast | |
152 // fails. | |
153 bool always_see_exact_class = false; | |
154 if (MonomorphicArrayCheck | |
155 && !too_many_traps(Deoptimization::Reason_array_check)) { | |
156 always_see_exact_class = true; | |
157 // (If no MDO at all, hope for the best, until a trap actually occurs.) | |
158 } | |
159 | |
160 // Is the array klass is exactly its defined type? | |
161 if (always_see_exact_class && !tak->klass_is_exact()) { | |
162 // Make a constant out of the inexact array klass | |
163 const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr(); | |
164 Node* con = makecon(extak); | |
165 Node* cmp = _gvn.transform(new (C, 3) CmpPNode( array_klass, con )); | |
166 Node* bol = _gvn.transform(new (C, 2) BoolNode( cmp, BoolTest::eq )); | |
167 Node* ctrl= control(); | |
168 { BuildCutout unless(this, bol, PROB_MAX); | |
169 uncommon_trap(Deoptimization::Reason_array_check, | |
170 Deoptimization::Action_maybe_recompile, | |
171 tak->klass()); | |
172 } | |
173 if (stopped()) { // MUST uncommon-trap? | |
174 set_control(ctrl); // Then Don't Do It, just fall into the normal checking | |
175 } else { // Cast array klass to exactness: | |
176 // Use the exact constant value we know it is. | |
177 replace_in_map(array_klass,con); | |
178 CompileLog* log = C->log(); | |
179 if (log != NULL) { | |
180 log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'", | |
181 log->identify(tak->klass())); | |
182 } | |
183 array_klass = con; // Use cast value moving forward | |
184 } | |
185 } | |
186 | |
187 // Come here for polymorphic array klasses | |
188 | |
189 // Extract the array element class | |
190 int element_klass_offset = objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc); | |
191 Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset); | |
164
c436414a719e
6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
kvn
parents:
0
diff
changeset
|
192 Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) ); |
0 | 193 |
194 // Check (the hard way) and throw if not a subklass. | |
195 // Result is ignored, we just need the CFG effects. | |
196 gen_checkcast( obj, a_e_klass ); | |
197 } | |
198 | |
199 | |
200 //------------------------------do_new----------------------------------------- | |
201 void Parse::do_new() { | |
202 kill_dead_locals(); | |
203 | |
204 bool will_link; | |
205 ciInstanceKlass* klass = iter().get_klass(will_link)->as_instance_klass(); | |
206 assert(will_link, "_new: typeflow responsibility"); | |
207 | |
208 // Should initialize, or throw an InstantiationError? | |
209 if (!klass->is_initialized() || | |
210 klass->is_abstract() || klass->is_interface() || | |
211 klass->name() == ciSymbol::java_lang_Class() || | |
212 iter().is_unresolved_klass()) { | |
213 uncommon_trap(Deoptimization::Reason_uninitialized, | |
214 Deoptimization::Action_reinterpret, | |
215 klass); | |
216 return; | |
217 } | |
218 | |
219 Node* kls = makecon(TypeKlassPtr::make(klass)); | |
220 Node* obj = new_instance(kls); | |
221 | |
222 // Push resultant oop onto stack | |
223 push(obj); | |
224 } | |
225 | |
226 #ifndef PRODUCT | |
227 //------------------------------dump_map_adr_mem------------------------------- | |
228 // Debug dump of the mapping from address types to MergeMemNode indices. | |
229 void Parse::dump_map_adr_mem() const { | |
230 tty->print_cr("--- Mapping from address types to memory Nodes ---"); | |
231 MergeMemNode *mem = map() == NULL ? NULL : (map()->memory()->is_MergeMem() ? | |
232 map()->memory()->as_MergeMem() : NULL); | |
233 for (uint i = 0; i < (uint)C->num_alias_types(); i++) { | |
234 C->alias_type(i)->print_on(tty); | |
235 tty->print("\t"); | |
236 // Node mapping, if any | |
237 if (mem && i < mem->req() && mem->in(i) && mem->in(i) != mem->empty_memory()) { | |
238 mem->in(i)->dump(); | |
239 } else { | |
240 tty->cr(); | |
241 } | |
242 } | |
243 } | |
244 | |
245 #endif | |
246 | |
247 | |
248 //============================================================================= | |
249 // | |
250 // parser methods for profiling | |
251 | |
252 | |
253 //----------------------test_counter_against_threshold ------------------------ | |
254 void Parse::test_counter_against_threshold(Node* cnt, int limit) { | |
255 // Test the counter against the limit and uncommon trap if greater. | |
256 | |
257 // This code is largely copied from the range check code in | |
258 // array_addressing() | |
259 | |
260 // Test invocation count vs threshold | |
261 Node *threshold = makecon(TypeInt::make(limit)); | |
262 Node *chk = _gvn.transform( new (C, 3) CmpUNode( cnt, threshold) ); | |
263 BoolTest::mask btest = BoolTest::lt; | |
264 Node *tst = _gvn.transform( new (C, 2) BoolNode( chk, btest) ); | |
265 // Branch to failure if threshold exceeded | |
266 { BuildCutout unless(this, tst, PROB_ALWAYS); | |
267 uncommon_trap(Deoptimization::Reason_age, | |
268 Deoptimization::Action_maybe_recompile); | |
269 } | |
270 } | |
271 | |
272 //----------------------increment_and_test_invocation_counter------------------- | |
273 void Parse::increment_and_test_invocation_counter(int limit) { | |
274 if (!count_invocations()) return; | |
275 | |
276 // Get the methodOop node. | |
277 const TypePtr* adr_type = TypeOopPtr::make_from_constant(method()); | |
278 Node *methodOop_node = makecon(adr_type); | |
279 | |
280 // Load the interpreter_invocation_counter from the methodOop. | |
281 int offset = methodOopDesc::interpreter_invocation_counter_offset_in_bytes(); | |
282 Node* adr_node = basic_plus_adr(methodOop_node, methodOop_node, offset); | |
283 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type); | |
284 | |
285 test_counter_against_threshold(cnt, limit); | |
286 | |
287 // Add one to the counter and store | |
288 Node* incr = _gvn.transform(new (C, 3) AddINode(cnt, _gvn.intcon(1))); | |
289 store_to_memory( NULL, adr_node, incr, T_INT, adr_type ); | |
290 } | |
291 | |
292 //----------------------------method_data_addressing--------------------------- | |
293 Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) { | |
294 // Get offset within methodDataOop of the data array | |
295 ByteSize data_offset = methodDataOopDesc::data_offset(); | |
296 | |
297 // Get cell offset of the ProfileData within data array | |
298 int cell_offset = md->dp_to_di(data->dp()); | |
299 | |
300 // Add in counter_offset, the # of bytes into the ProfileData of counter or flag | |
301 int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset); | |
302 | |
303 const TypePtr* adr_type = TypeOopPtr::make_from_constant(md); | |
304 Node* mdo = makecon(adr_type); | |
305 Node* ptr = basic_plus_adr(mdo, mdo, offset); | |
306 | |
307 if (stride != 0) { | |
308 Node* str = _gvn.MakeConX(stride); | |
309 Node* scale = _gvn.transform( new (C, 3) MulXNode( idx, str ) ); | |
310 ptr = _gvn.transform( new (C, 4) AddPNode( mdo, ptr, scale ) ); | |
311 } | |
312 | |
313 return ptr; | |
314 } | |
315 | |
316 //--------------------------increment_md_counter_at---------------------------- | |
317 void Parse::increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) { | |
318 Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride); | |
319 | |
320 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); | |
321 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type); | |
322 Node* incr = _gvn.transform(new (C, 3) AddINode(cnt, _gvn.intcon(DataLayout::counter_increment))); | |
323 store_to_memory(NULL, adr_node, incr, T_INT, adr_type ); | |
324 } | |
325 | |
326 //--------------------------test_for_osr_md_counter_at------------------------- | |
327 void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) { | |
328 Node* adr_node = method_data_addressing(md, data, counter_offset); | |
329 | |
330 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); | |
331 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type); | |
332 | |
333 test_counter_against_threshold(cnt, limit); | |
334 } | |
335 | |
336 //-------------------------------set_md_flag_at-------------------------------- | |
337 void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) { | |
338 Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset()); | |
339 | |
340 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); | |
341 Node* flags = make_load(NULL, adr_node, TypeInt::BYTE, T_BYTE, adr_type); | |
342 Node* incr = _gvn.transform(new (C, 3) OrINode(flags, _gvn.intcon(flag_constant))); | |
343 store_to_memory(NULL, adr_node, incr, T_BYTE, adr_type); | |
344 } | |
345 | |
346 //----------------------------profile_taken_branch----------------------------- | |
347 void Parse::profile_taken_branch(int target_bci, bool force_update) { | |
348 // This is a potential osr_site if we have a backedge. | |
349 int cur_bci = bci(); | |
350 bool osr_site = | |
351 (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement; | |
352 | |
353 // If we are going to OSR, restart at the target bytecode. | |
354 set_bci(target_bci); | |
355 | |
356 // To do: factor out the the limit calculations below. These duplicate | |
357 // the similar limit calculations in the interpreter. | |
358 | |
359 if (method_data_update() || force_update) { | |
360 ciMethodData* md = method()->method_data(); | |
361 assert(md != NULL, "expected valid ciMethodData"); | |
362 ciProfileData* data = md->bci_to_data(cur_bci); | |
363 assert(data->is_JumpData(), "need JumpData for taken branch"); | |
364 increment_md_counter_at(md, data, JumpData::taken_offset()); | |
365 } | |
366 | |
367 // In the new tiered system this is all we need to do. In the old | |
368 // (c2 based) tiered sytem we must do the code below. | |
369 #ifndef TIERED | |
370 if (method_data_update()) { | |
371 ciMethodData* md = method()->method_data(); | |
372 if (osr_site) { | |
373 ciProfileData* data = md->bci_to_data(cur_bci); | |
374 int limit = (CompileThreshold | |
375 * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100; | |
376 test_for_osr_md_counter_at(md, data, JumpData::taken_offset(), limit); | |
377 } | |
378 } else { | |
379 // With method data update off, use the invocation counter to trigger an | |
380 // OSR compilation, as done in the interpreter. | |
381 if (osr_site) { | |
382 int limit = (CompileThreshold * OnStackReplacePercentage) / 100; | |
383 increment_and_test_invocation_counter(limit); | |
384 } | |
385 } | |
386 #endif // TIERED | |
387 | |
388 // Restore the original bytecode. | |
389 set_bci(cur_bci); | |
390 } | |
391 | |
392 //--------------------------profile_not_taken_branch--------------------------- | |
393 void Parse::profile_not_taken_branch(bool force_update) { | |
394 | |
395 if (method_data_update() || force_update) { | |
396 ciMethodData* md = method()->method_data(); | |
397 assert(md != NULL, "expected valid ciMethodData"); | |
398 ciProfileData* data = md->bci_to_data(bci()); | |
399 assert(data->is_BranchData(), "need BranchData for not taken branch"); | |
400 increment_md_counter_at(md, data, BranchData::not_taken_offset()); | |
401 } | |
402 | |
403 } | |
404 | |
405 //---------------------------------profile_call-------------------------------- | |
406 void Parse::profile_call(Node* receiver) { | |
407 if (!method_data_update()) return; | |
408 | |
409 profile_generic_call(); | |
410 | |
411 switch (bc()) { | |
412 case Bytecodes::_invokevirtual: | |
413 case Bytecodes::_invokeinterface: | |
414 profile_receiver_type(receiver); | |
415 break; | |
416 case Bytecodes::_invokestatic: | |
417 case Bytecodes::_invokespecial: | |
418 break; | |
419 default: fatal("unexpected call bytecode"); | |
420 } | |
421 } | |
422 | |
423 //------------------------------profile_generic_call--------------------------- | |
424 void Parse::profile_generic_call() { | |
425 assert(method_data_update(), "must be generating profile code"); | |
426 | |
427 ciMethodData* md = method()->method_data(); | |
428 assert(md != NULL, "expected valid ciMethodData"); | |
429 ciProfileData* data = md->bci_to_data(bci()); | |
430 assert(data->is_CounterData(), "need CounterData for not taken branch"); | |
431 increment_md_counter_at(md, data, CounterData::count_offset()); | |
432 } | |
433 | |
434 //-----------------------------profile_receiver_type--------------------------- | |
435 void Parse::profile_receiver_type(Node* receiver) { | |
436 assert(method_data_update(), "must be generating profile code"); | |
437 | |
438 // Skip if we aren't tracking receivers | |
439 if (TypeProfileWidth < 1) return; | |
440 | |
441 ciMethodData* md = method()->method_data(); | |
442 assert(md != NULL, "expected valid ciMethodData"); | |
443 ciProfileData* data = md->bci_to_data(bci()); | |
444 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData here"); | |
445 ciReceiverTypeData* rdata = (ciReceiverTypeData*)data->as_ReceiverTypeData(); | |
446 | |
447 Node* method_data = method_data_addressing(md, rdata, in_ByteSize(0)); | |
448 | |
449 // Using an adr_type of TypePtr::BOTTOM to work around anti-dep problems. | |
450 // A better solution might be to use TypeRawPtr::BOTTOM with RC_NARROW_MEM. | |
451 make_runtime_call(RC_LEAF, OptoRuntime::profile_receiver_type_Type(), | |
452 CAST_FROM_FN_PTR(address, | |
453 OptoRuntime::profile_receiver_type_C), | |
454 "profile_receiver_type_C", | |
455 TypePtr::BOTTOM, | |
456 method_data, receiver); | |
457 } | |
458 | |
459 //---------------------------------profile_ret--------------------------------- | |
460 void Parse::profile_ret(int target_bci) { | |
461 if (!method_data_update()) return; | |
462 | |
463 // Skip if we aren't tracking ret targets | |
464 if (TypeProfileWidth < 1) return; | |
465 | |
466 ciMethodData* md = method()->method_data(); | |
467 assert(md != NULL, "expected valid ciMethodData"); | |
468 ciProfileData* data = md->bci_to_data(bci()); | |
469 assert(data->is_RetData(), "need RetData for ret"); | |
470 ciRetData* ret_data = (ciRetData*)data->as_RetData(); | |
471 | |
472 // Look for the target_bci is already in the table | |
473 uint row; | |
474 bool table_full = true; | |
475 for (row = 0; row < ret_data->row_limit(); row++) { | |
476 int key = ret_data->bci(row); | |
477 table_full &= (key != RetData::no_bci); | |
478 if (key == target_bci) break; | |
479 } | |
480 | |
481 if (row >= ret_data->row_limit()) { | |
482 // The target_bci was not found in the table. | |
483 if (!table_full) { | |
484 // XXX: Make slow call to update RetData | |
485 } | |
486 return; | |
487 } | |
488 | |
489 // the target_bci is already in the table | |
490 increment_md_counter_at(md, data, RetData::bci_count_offset(row)); | |
491 } | |
492 | |
493 //--------------------------profile_null_checkcast---------------------------- | |
494 void Parse::profile_null_checkcast() { | |
495 // Set the null-seen flag, done in conjunction with the usual null check. We | |
496 // never unset the flag, so this is a one-way switch. | |
497 if (!method_data_update()) return; | |
498 | |
499 ciMethodData* md = method()->method_data(); | |
500 assert(md != NULL, "expected valid ciMethodData"); | |
501 ciProfileData* data = md->bci_to_data(bci()); | |
502 assert(data->is_BitData(), "need BitData for checkcast"); | |
503 set_md_flag_at(md, data, BitData::null_seen_byte_constant()); | |
504 } | |
505 | |
506 //-----------------------------profile_switch_case----------------------------- | |
507 void Parse::profile_switch_case(int table_index) { | |
508 if (!method_data_update()) return; | |
509 | |
510 ciMethodData* md = method()->method_data(); | |
511 assert(md != NULL, "expected valid ciMethodData"); | |
512 | |
513 ciProfileData* data = md->bci_to_data(bci()); | |
514 assert(data->is_MultiBranchData(), "need MultiBranchData for switch case"); | |
515 if (table_index >= 0) { | |
516 increment_md_counter_at(md, data, MultiBranchData::case_count_offset(table_index)); | |
517 } else { | |
518 increment_md_counter_at(md, data, MultiBranchData::default_count_offset()); | |
519 } | |
520 } |