Mercurial > hg > truffle
annotate src/share/vm/c1/c1_GraphBuilder.cpp @ 1721:413ad0331a0c
6977924: Changes for 6975078 produce build error with certain gcc versions
Summary: The changes introduced for 6975078 assign badHeapOopVal to the _allocation field in the ResourceObj class. In 32 bit linux builds with certain versions of gcc this assignment will be flagged as an error while compiling allocation.cpp. In 32 bit builds the constant value badHeapOopVal (which is cast to an intptr_t) is negative. The _allocation field is typed as an unsigned intptr_t and gcc catches this as an error.
Reviewed-by: jcoomes, ysr, phh
author | johnc |
---|---|
date | Wed, 18 Aug 2010 10:59:06 -0700 |
parents | 136b78722a08 |
children | d5d065957597 |
rev | line source |
---|---|
0 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1540
diff
changeset
|
2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1540
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1540
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1540
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_c1_GraphBuilder.cpp.incl" | |
27 | |
28 class BlockListBuilder VALUE_OBJ_CLASS_SPEC { | |
29 private: | |
30 Compilation* _compilation; | |
31 IRScope* _scope; | |
32 | |
33 BlockList _blocks; // internal list of all blocks | |
34 BlockList* _bci2block; // mapping from bci to blocks for GraphBuilder | |
35 | |
36 // fields used by mark_loops | |
37 BitMap _active; // for iteration of control flow graph | |
38 BitMap _visited; // for iteration of control flow graph | |
39 intArray _loop_map; // caches the information if a block is contained in a loop | |
40 int _next_loop_index; // next free loop number | |
41 int _next_block_number; // for reverse postorder numbering of blocks | |
42 | |
43 // accessors | |
44 Compilation* compilation() const { return _compilation; } | |
45 IRScope* scope() const { return _scope; } | |
46 ciMethod* method() const { return scope()->method(); } | |
47 XHandlers* xhandlers() const { return scope()->xhandlers(); } | |
48 | |
49 // unified bailout support | |
50 void bailout(const char* msg) const { compilation()->bailout(msg); } | |
51 bool bailed_out() const { return compilation()->bailed_out(); } | |
52 | |
53 // helper functions | |
54 BlockBegin* make_block_at(int bci, BlockBegin* predecessor); | |
55 void handle_exceptions(BlockBegin* current, int cur_bci); | |
56 void handle_jsr(BlockBegin* current, int sr_bci, int next_bci); | |
57 void store_one(BlockBegin* current, int local); | |
58 void store_two(BlockBegin* current, int local); | |
59 void set_entries(int osr_bci); | |
60 void set_leaders(); | |
61 | |
62 void make_loop_header(BlockBegin* block); | |
63 void mark_loops(); | |
64 int mark_loops(BlockBegin* b, bool in_subroutine); | |
65 | |
66 // debugging | |
67 #ifndef PRODUCT | |
68 void print(); | |
69 #endif | |
70 | |
71 public: | |
72 // creation | |
73 BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci); | |
74 | |
75 // accessors for GraphBuilder | |
76 BlockList* bci2block() const { return _bci2block; } | |
77 }; | |
78 | |
79 | |
80 // Implementation of BlockListBuilder | |
81 | |
82 BlockListBuilder::BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci) | |
83 : _compilation(compilation) | |
84 , _scope(scope) | |
85 , _blocks(16) | |
86 , _bci2block(new BlockList(scope->method()->code_size(), NULL)) | |
87 , _next_block_number(0) | |
88 , _active() // size not known yet | |
89 , _visited() // size not known yet | |
90 , _next_loop_index(0) | |
91 , _loop_map() // size not known yet | |
92 { | |
93 set_entries(osr_bci); | |
94 set_leaders(); | |
95 CHECK_BAILOUT(); | |
96 | |
97 mark_loops(); | |
98 NOT_PRODUCT(if (PrintInitialBlockList) print()); | |
99 | |
100 #ifndef PRODUCT | |
101 if (PrintCFGToFile) { | |
102 stringStream title; | |
103 title.print("BlockListBuilder "); | |
104 scope->method()->print_name(&title); | |
105 CFGPrinter::print_cfg(_bci2block, title.as_string(), false, false); | |
106 } | |
107 #endif | |
108 } | |
109 | |
110 | |
111 void BlockListBuilder::set_entries(int osr_bci) { | |
112 // generate start blocks | |
113 BlockBegin* std_entry = make_block_at(0, NULL); | |
114 if (scope()->caller() == NULL) { | |
115 std_entry->set(BlockBegin::std_entry_flag); | |
116 } | |
117 if (osr_bci != -1) { | |
118 BlockBegin* osr_entry = make_block_at(osr_bci, NULL); | |
119 osr_entry->set(BlockBegin::osr_entry_flag); | |
120 } | |
121 | |
122 // generate exception entry blocks | |
123 XHandlers* list = xhandlers(); | |
124 const int n = list->length(); | |
125 for (int i = 0; i < n; i++) { | |
126 XHandler* h = list->handler_at(i); | |
127 BlockBegin* entry = make_block_at(h->handler_bci(), NULL); | |
128 entry->set(BlockBegin::exception_entry_flag); | |
129 h->set_entry_block(entry); | |
130 } | |
131 } | |
132 | |
133 | |
134 BlockBegin* BlockListBuilder::make_block_at(int cur_bci, BlockBegin* predecessor) { | |
135 assert(method()->bci_block_start().at(cur_bci), "wrong block starts of MethodLivenessAnalyzer"); | |
136 | |
137 BlockBegin* block = _bci2block->at(cur_bci); | |
138 if (block == NULL) { | |
139 block = new BlockBegin(cur_bci); | |
140 block->init_stores_to_locals(method()->max_locals()); | |
141 _bci2block->at_put(cur_bci, block); | |
142 _blocks.append(block); | |
143 | |
144 assert(predecessor == NULL || predecessor->bci() < cur_bci, "targets for backward branches must already exist"); | |
145 } | |
146 | |
147 if (predecessor != NULL) { | |
148 if (block->is_set(BlockBegin::exception_entry_flag)) { | |
149 BAILOUT_("Exception handler can be reached by both normal and exceptional control flow", block); | |
150 } | |
151 | |
152 predecessor->add_successor(block); | |
153 block->increment_total_preds(); | |
154 } | |
155 | |
156 return block; | |
157 } | |
158 | |
159 | |
160 inline void BlockListBuilder::store_one(BlockBegin* current, int local) { | |
161 current->stores_to_locals().set_bit(local); | |
162 } | |
163 inline void BlockListBuilder::store_two(BlockBegin* current, int local) { | |
164 store_one(current, local); | |
165 store_one(current, local + 1); | |
166 } | |
167 | |
168 | |
169 void BlockListBuilder::handle_exceptions(BlockBegin* current, int cur_bci) { | |
170 // Draws edges from a block to its exception handlers | |
171 XHandlers* list = xhandlers(); | |
172 const int n = list->length(); | |
173 | |
174 for (int i = 0; i < n; i++) { | |
175 XHandler* h = list->handler_at(i); | |
176 | |
177 if (h->covers(cur_bci)) { | |
178 BlockBegin* entry = h->entry_block(); | |
179 assert(entry != NULL && entry == _bci2block->at(h->handler_bci()), "entry must be set"); | |
180 assert(entry->is_set(BlockBegin::exception_entry_flag), "flag must be set"); | |
181 | |
182 // add each exception handler only once | |
183 if (!current->is_successor(entry)) { | |
184 current->add_successor(entry); | |
185 entry->increment_total_preds(); | |
186 } | |
187 | |
188 // stop when reaching catchall | |
189 if (h->catch_type() == 0) break; | |
190 } | |
191 } | |
192 } | |
193 | |
194 void BlockListBuilder::handle_jsr(BlockBegin* current, int sr_bci, int next_bci) { | |
195 // start a new block after jsr-bytecode and link this block into cfg | |
196 make_block_at(next_bci, current); | |
197 | |
198 // start a new block at the subroutine entry at mark it with special flag | |
199 BlockBegin* sr_block = make_block_at(sr_bci, current); | |
200 if (!sr_block->is_set(BlockBegin::subroutine_entry_flag)) { | |
201 sr_block->set(BlockBegin::subroutine_entry_flag); | |
202 } | |
203 } | |
204 | |
205 | |
206 void BlockListBuilder::set_leaders() { | |
207 bool has_xhandlers = xhandlers()->has_handlers(); | |
208 BlockBegin* current = NULL; | |
209 | |
210 // The information which bci starts a new block simplifies the analysis | |
211 // Without it, backward branches could jump to a bci where no block was created | |
212 // during bytecode iteration. This would require the creation of a new block at the | |
213 // branch target and a modification of the successor lists. | |
214 BitMap bci_block_start = method()->bci_block_start(); | |
215 | |
216 ciBytecodeStream s(method()); | |
217 while (s.next() != ciBytecodeStream::EOBC()) { | |
218 int cur_bci = s.cur_bci(); | |
219 | |
220 if (bci_block_start.at(cur_bci)) { | |
221 current = make_block_at(cur_bci, current); | |
222 } | |
223 assert(current != NULL, "must have current block"); | |
224 | |
225 if (has_xhandlers && GraphBuilder::can_trap(method(), s.cur_bc())) { | |
226 handle_exceptions(current, cur_bci); | |
227 } | |
228 | |
229 switch (s.cur_bc()) { | |
230 // track stores to local variables for selective creation of phi functions | |
231 case Bytecodes::_iinc: store_one(current, s.get_index()); break; | |
232 case Bytecodes::_istore: store_one(current, s.get_index()); break; | |
233 case Bytecodes::_lstore: store_two(current, s.get_index()); break; | |
234 case Bytecodes::_fstore: store_one(current, s.get_index()); break; | |
235 case Bytecodes::_dstore: store_two(current, s.get_index()); break; | |
236 case Bytecodes::_astore: store_one(current, s.get_index()); break; | |
237 case Bytecodes::_istore_0: store_one(current, 0); break; | |
238 case Bytecodes::_istore_1: store_one(current, 1); break; | |
239 case Bytecodes::_istore_2: store_one(current, 2); break; | |
240 case Bytecodes::_istore_3: store_one(current, 3); break; | |
241 case Bytecodes::_lstore_0: store_two(current, 0); break; | |
242 case Bytecodes::_lstore_1: store_two(current, 1); break; | |
243 case Bytecodes::_lstore_2: store_two(current, 2); break; | |
244 case Bytecodes::_lstore_3: store_two(current, 3); break; | |
245 case Bytecodes::_fstore_0: store_one(current, 0); break; | |
246 case Bytecodes::_fstore_1: store_one(current, 1); break; | |
247 case Bytecodes::_fstore_2: store_one(current, 2); break; | |
248 case Bytecodes::_fstore_3: store_one(current, 3); break; | |
249 case Bytecodes::_dstore_0: store_two(current, 0); break; | |
250 case Bytecodes::_dstore_1: store_two(current, 1); break; | |
251 case Bytecodes::_dstore_2: store_two(current, 2); break; | |
252 case Bytecodes::_dstore_3: store_two(current, 3); break; | |
253 case Bytecodes::_astore_0: store_one(current, 0); break; | |
254 case Bytecodes::_astore_1: store_one(current, 1); break; | |
255 case Bytecodes::_astore_2: store_one(current, 2); break; | |
256 case Bytecodes::_astore_3: store_one(current, 3); break; | |
257 | |
258 // track bytecodes that affect the control flow | |
259 case Bytecodes::_athrow: // fall through | |
260 case Bytecodes::_ret: // fall through | |
261 case Bytecodes::_ireturn: // fall through | |
262 case Bytecodes::_lreturn: // fall through | |
263 case Bytecodes::_freturn: // fall through | |
264 case Bytecodes::_dreturn: // fall through | |
265 case Bytecodes::_areturn: // fall through | |
266 case Bytecodes::_return: | |
267 current = NULL; | |
268 break; | |
269 | |
270 case Bytecodes::_ifeq: // fall through | |
271 case Bytecodes::_ifne: // fall through | |
272 case Bytecodes::_iflt: // fall through | |
273 case Bytecodes::_ifge: // fall through | |
274 case Bytecodes::_ifgt: // fall through | |
275 case Bytecodes::_ifle: // fall through | |
276 case Bytecodes::_if_icmpeq: // fall through | |
277 case Bytecodes::_if_icmpne: // fall through | |
278 case Bytecodes::_if_icmplt: // fall through | |
279 case Bytecodes::_if_icmpge: // fall through | |
280 case Bytecodes::_if_icmpgt: // fall through | |
281 case Bytecodes::_if_icmple: // fall through | |
282 case Bytecodes::_if_acmpeq: // fall through | |
283 case Bytecodes::_if_acmpne: // fall through | |
284 case Bytecodes::_ifnull: // fall through | |
285 case Bytecodes::_ifnonnull: | |
286 make_block_at(s.next_bci(), current); | |
287 make_block_at(s.get_dest(), current); | |
288 current = NULL; | |
289 break; | |
290 | |
291 case Bytecodes::_goto: | |
292 make_block_at(s.get_dest(), current); | |
293 current = NULL; | |
294 break; | |
295 | |
296 case Bytecodes::_goto_w: | |
297 make_block_at(s.get_far_dest(), current); | |
298 current = NULL; | |
299 break; | |
300 | |
301 case Bytecodes::_jsr: | |
302 handle_jsr(current, s.get_dest(), s.next_bci()); | |
303 current = NULL; | |
304 break; | |
305 | |
306 case Bytecodes::_jsr_w: | |
307 handle_jsr(current, s.get_far_dest(), s.next_bci()); | |
308 current = NULL; | |
309 break; | |
310 | |
311 case Bytecodes::_tableswitch: { | |
312 // set block for each case | |
313 Bytecode_tableswitch *switch_ = Bytecode_tableswitch_at(s.cur_bcp()); | |
314 int l = switch_->length(); | |
315 for (int i = 0; i < l; i++) { | |
316 make_block_at(cur_bci + switch_->dest_offset_at(i), current); | |
317 } | |
318 make_block_at(cur_bci + switch_->default_offset(), current); | |
319 current = NULL; | |
320 break; | |
321 } | |
322 | |
323 case Bytecodes::_lookupswitch: { | |
324 // set block for each case | |
325 Bytecode_lookupswitch *switch_ = Bytecode_lookupswitch_at(s.cur_bcp()); | |
326 int l = switch_->number_of_pairs(); | |
327 for (int i = 0; i < l; i++) { | |
328 make_block_at(cur_bci + switch_->pair_at(i)->offset(), current); | |
329 } | |
330 make_block_at(cur_bci + switch_->default_offset(), current); | |
331 current = NULL; | |
332 break; | |
333 } | |
334 } | |
335 } | |
336 } | |
337 | |
338 | |
339 void BlockListBuilder::mark_loops() { | |
340 ResourceMark rm; | |
341 | |
342 _active = BitMap(BlockBegin::number_of_blocks()); _active.clear(); | |
343 _visited = BitMap(BlockBegin::number_of_blocks()); _visited.clear(); | |
344 _loop_map = intArray(BlockBegin::number_of_blocks(), 0); | |
345 _next_loop_index = 0; | |
346 _next_block_number = _blocks.length(); | |
347 | |
348 // recursively iterate the control flow graph | |
349 mark_loops(_bci2block->at(0), false); | |
350 assert(_next_block_number >= 0, "invalid block numbers"); | |
351 } | |
352 | |
353 void BlockListBuilder::make_loop_header(BlockBegin* block) { | |
354 if (block->is_set(BlockBegin::exception_entry_flag)) { | |
355 // exception edges may look like loops but don't mark them as such | |
356 // since it screws up block ordering. | |
357 return; | |
358 } | |
359 if (!block->is_set(BlockBegin::parser_loop_header_flag)) { | |
360 block->set(BlockBegin::parser_loop_header_flag); | |
361 | |
362 assert(_loop_map.at(block->block_id()) == 0, "must not be set yet"); | |
363 assert(0 <= _next_loop_index && _next_loop_index < BitsPerInt, "_next_loop_index is used as a bit-index in integer"); | |
364 _loop_map.at_put(block->block_id(), 1 << _next_loop_index); | |
365 if (_next_loop_index < 31) _next_loop_index++; | |
366 } else { | |
367 // block already marked as loop header | |
1060 | 368 assert(is_power_of_2((unsigned int)_loop_map.at(block->block_id())), "exactly one bit must be set"); |
0 | 369 } |
370 } | |
371 | |
372 int BlockListBuilder::mark_loops(BlockBegin* block, bool in_subroutine) { | |
373 int block_id = block->block_id(); | |
374 | |
375 if (_visited.at(block_id)) { | |
376 if (_active.at(block_id)) { | |
377 // reached block via backward branch | |
378 make_loop_header(block); | |
379 } | |
380 // return cached loop information for this block | |
381 return _loop_map.at(block_id); | |
382 } | |
383 | |
384 if (block->is_set(BlockBegin::subroutine_entry_flag)) { | |
385 in_subroutine = true; | |
386 } | |
387 | |
388 // set active and visited bits before successors are processed | |
389 _visited.set_bit(block_id); | |
390 _active.set_bit(block_id); | |
391 | |
392 intptr_t loop_state = 0; | |
393 for (int i = block->number_of_sux() - 1; i >= 0; i--) { | |
394 // recursively process all successors | |
395 loop_state |= mark_loops(block->sux_at(i), in_subroutine); | |
396 } | |
397 | |
398 // clear active-bit after all successors are processed | |
399 _active.clear_bit(block_id); | |
400 | |
401 // reverse-post-order numbering of all blocks | |
402 block->set_depth_first_number(_next_block_number); | |
403 _next_block_number--; | |
404 | |
405 if (loop_state != 0 || in_subroutine ) { | |
406 // block is contained at least in one loop, so phi functions are necessary | |
407 // phi functions are also necessary for all locals stored in a subroutine | |
408 scope()->requires_phi_function().set_union(block->stores_to_locals()); | |
409 } | |
410 | |
411 if (block->is_set(BlockBegin::parser_loop_header_flag)) { | |
412 int header_loop_state = _loop_map.at(block_id); | |
413 assert(is_power_of_2((unsigned)header_loop_state), "exactly one bit must be set"); | |
414 | |
415 // If the highest bit is set (i.e. when integer value is negative), the method | |
416 // has 32 or more loops. This bit is never cleared because it is used for multiple loops | |
417 if (header_loop_state >= 0) { | |
418 clear_bits(loop_state, header_loop_state); | |
419 } | |
420 } | |
421 | |
422 // cache and return loop information for this block | |
423 _loop_map.at_put(block_id, loop_state); | |
424 return loop_state; | |
425 } | |
426 | |
427 | |
428 #ifndef PRODUCT | |
429 | |
430 int compare_depth_first(BlockBegin** a, BlockBegin** b) { | |
431 return (*a)->depth_first_number() - (*b)->depth_first_number(); | |
432 } | |
433 | |
434 void BlockListBuilder::print() { | |
435 tty->print("----- initial block list of BlockListBuilder for method "); | |
436 method()->print_short_name(); | |
437 tty->cr(); | |
438 | |
439 // better readability if blocks are sorted in processing order | |
440 _blocks.sort(compare_depth_first); | |
441 | |
442 for (int i = 0; i < _blocks.length(); i++) { | |
443 BlockBegin* cur = _blocks.at(i); | |
444 tty->print("%4d: B%-4d bci: %-4d preds: %-4d ", cur->depth_first_number(), cur->block_id(), cur->bci(), cur->total_preds()); | |
445 | |
446 tty->print(cur->is_set(BlockBegin::std_entry_flag) ? " std" : " "); | |
447 tty->print(cur->is_set(BlockBegin::osr_entry_flag) ? " osr" : " "); | |
448 tty->print(cur->is_set(BlockBegin::exception_entry_flag) ? " ex" : " "); | |
449 tty->print(cur->is_set(BlockBegin::subroutine_entry_flag) ? " sr" : " "); | |
450 tty->print(cur->is_set(BlockBegin::parser_loop_header_flag) ? " lh" : " "); | |
451 | |
452 if (cur->number_of_sux() > 0) { | |
453 tty->print(" sux: "); | |
454 for (int j = 0; j < cur->number_of_sux(); j++) { | |
455 BlockBegin* sux = cur->sux_at(j); | |
456 tty->print("B%d ", sux->block_id()); | |
457 } | |
458 } | |
459 tty->cr(); | |
460 } | |
461 } | |
462 | |
463 #endif | |
464 | |
465 | |
466 // A simple growable array of Values indexed by ciFields | |
467 class FieldBuffer: public CompilationResourceObj { | |
468 private: | |
469 GrowableArray<Value> _values; | |
470 | |
471 public: | |
472 FieldBuffer() {} | |
473 | |
474 void kill() { | |
475 _values.trunc_to(0); | |
476 } | |
477 | |
478 Value at(ciField* field) { | |
479 assert(field->holder()->is_loaded(), "must be a loaded field"); | |
480 int offset = field->offset(); | |
481 if (offset < _values.length()) { | |
482 return _values.at(offset); | |
483 } else { | |
484 return NULL; | |
485 } | |
486 } | |
487 | |
488 void at_put(ciField* field, Value value) { | |
489 assert(field->holder()->is_loaded(), "must be a loaded field"); | |
490 int offset = field->offset(); | |
491 _values.at_put_grow(offset, value, NULL); | |
492 } | |
493 | |
494 }; | |
495 | |
496 | |
497 // MemoryBuffer is fairly simple model of the current state of memory. | |
498 // It partitions memory into several pieces. The first piece is | |
499 // generic memory where little is known about the owner of the memory. | |
500 // This is conceptually represented by the tuple <O, F, V> which says | |
501 // that the field F of object O has value V. This is flattened so | |
502 // that F is represented by the offset of the field and the parallel | |
503 // arrays _objects and _values are used for O and V. Loads of O.F can | |
504 // simply use V. Newly allocated objects are kept in a separate list | |
505 // along with a parallel array for each object which represents the | |
506 // current value of its fields. Stores of the default value to fields | |
507 // which have never been stored to before are eliminated since they | |
508 // are redundant. Once newly allocated objects are stored into | |
509 // another object or they are passed out of the current compile they | |
510 // are treated like generic memory. | |
511 | |
512 class MemoryBuffer: public CompilationResourceObj { | |
513 private: | |
514 FieldBuffer _values; | |
515 GrowableArray<Value> _objects; | |
516 GrowableArray<Value> _newobjects; | |
517 GrowableArray<FieldBuffer*> _fields; | |
518 | |
519 public: | |
520 MemoryBuffer() {} | |
521 | |
522 StoreField* store(StoreField* st) { | |
523 if (!EliminateFieldAccess) { | |
524 return st; | |
525 } | |
526 | |
527 Value object = st->obj(); | |
528 Value value = st->value(); | |
529 ciField* field = st->field(); | |
530 if (field->holder()->is_loaded()) { | |
531 int offset = field->offset(); | |
532 int index = _newobjects.find(object); | |
533 if (index != -1) { | |
534 // newly allocated object with no other stores performed on this field | |
535 FieldBuffer* buf = _fields.at(index); | |
536 if (buf->at(field) == NULL && is_default_value(value)) { | |
537 #ifndef PRODUCT | |
538 if (PrintIRDuringConstruction && Verbose) { | |
539 tty->print_cr("Eliminated store for object %d:", index); | |
540 st->print_line(); | |
541 } | |
542 #endif | |
543 return NULL; | |
544 } else { | |
545 buf->at_put(field, value); | |
546 } | |
547 } else { | |
548 _objects.at_put_grow(offset, object, NULL); | |
549 _values.at_put(field, value); | |
550 } | |
551 | |
552 store_value(value); | |
553 } else { | |
554 // if we held onto field names we could alias based on names but | |
555 // we don't know what's being stored to so kill it all. | |
556 kill(); | |
557 } | |
558 return st; | |
559 } | |
560 | |
561 | |
562 // return true if this value correspond to the default value of a field. | |
563 bool is_default_value(Value value) { | |
564 Constant* con = value->as_Constant(); | |
565 if (con) { | |
566 switch (con->type()->tag()) { | |
567 case intTag: return con->type()->as_IntConstant()->value() == 0; | |
568 case longTag: return con->type()->as_LongConstant()->value() == 0; | |
569 case floatTag: return jint_cast(con->type()->as_FloatConstant()->value()) == 0; | |
570 case doubleTag: return jlong_cast(con->type()->as_DoubleConstant()->value()) == jlong_cast(0); | |
571 case objectTag: return con->type() == objectNull; | |
572 default: ShouldNotReachHere(); | |
573 } | |
574 } | |
575 return false; | |
576 } | |
577 | |
578 | |
579 // return either the actual value of a load or the load itself | |
580 Value load(LoadField* load) { | |
581 if (!EliminateFieldAccess) { | |
582 return load; | |
583 } | |
584 | |
585 if (RoundFPResults && UseSSE < 2 && load->type()->is_float_kind()) { | |
586 // can't skip load since value might get rounded as a side effect | |
587 return load; | |
588 } | |
589 | |
590 ciField* field = load->field(); | |
591 Value object = load->obj(); | |
592 if (field->holder()->is_loaded() && !field->is_volatile()) { | |
593 int offset = field->offset(); | |
594 Value result = NULL; | |
595 int index = _newobjects.find(object); | |
596 if (index != -1) { | |
597 result = _fields.at(index)->at(field); | |
598 } else if (_objects.at_grow(offset, NULL) == object) { | |
599 result = _values.at(field); | |
600 } | |
601 if (result != NULL) { | |
602 #ifndef PRODUCT | |
603 if (PrintIRDuringConstruction && Verbose) { | |
604 tty->print_cr("Eliminated load: "); | |
605 load->print_line(); | |
606 } | |
607 #endif | |
608 assert(result->type()->tag() == load->type()->tag(), "wrong types"); | |
609 return result; | |
610 } | |
611 } | |
612 return load; | |
613 } | |
614 | |
615 // Record this newly allocated object | |
616 void new_instance(NewInstance* object) { | |
617 int index = _newobjects.length(); | |
618 _newobjects.append(object); | |
619 if (_fields.at_grow(index, NULL) == NULL) { | |
620 _fields.at_put(index, new FieldBuffer()); | |
621 } else { | |
622 _fields.at(index)->kill(); | |
623 } | |
624 } | |
625 | |
626 void store_value(Value value) { | |
627 int index = _newobjects.find(value); | |
628 if (index != -1) { | |
629 // stored a newly allocated object into another object. | |
630 // Assume we've lost track of it as separate slice of memory. | |
631 // We could do better by keeping track of whether individual | |
632 // fields could alias each other. | |
633 _newobjects.remove_at(index); | |
634 // pull out the field info and store it at the end up the list | |
635 // of field info list to be reused later. | |
636 _fields.append(_fields.at(index)); | |
637 _fields.remove_at(index); | |
638 } | |
639 } | |
640 | |
641 void kill() { | |
642 _newobjects.trunc_to(0); | |
643 _objects.trunc_to(0); | |
644 _values.kill(); | |
645 } | |
646 }; | |
647 | |
648 | |
649 // Implementation of GraphBuilder's ScopeData | |
650 | |
651 GraphBuilder::ScopeData::ScopeData(ScopeData* parent) | |
652 : _parent(parent) | |
653 , _bci2block(NULL) | |
654 , _scope(NULL) | |
655 , _has_handler(false) | |
656 , _stream(NULL) | |
657 , _work_list(NULL) | |
658 , _parsing_jsr(false) | |
659 , _jsr_xhandlers(NULL) | |
660 , _caller_stack_size(-1) | |
661 , _continuation(NULL) | |
662 , _continuation_state(NULL) | |
663 , _num_returns(0) | |
664 , _cleanup_block(NULL) | |
665 , _cleanup_return_prev(NULL) | |
666 , _cleanup_state(NULL) | |
667 { | |
668 if (parent != NULL) { | |
669 _max_inline_size = (intx) ((float) NestedInliningSizeRatio * (float) parent->max_inline_size() / 100.0f); | |
670 } else { | |
671 _max_inline_size = MaxInlineSize; | |
672 } | |
673 if (_max_inline_size < MaxTrivialSize) { | |
674 _max_inline_size = MaxTrivialSize; | |
675 } | |
676 } | |
677 | |
678 | |
679 void GraphBuilder::kill_all() { | |
680 if (UseLocalValueNumbering) { | |
681 vmap()->kill_all(); | |
682 } | |
683 _memory->kill(); | |
684 } | |
685 | |
686 | |
687 BlockBegin* GraphBuilder::ScopeData::block_at(int bci) { | |
688 if (parsing_jsr()) { | |
689 // It is necessary to clone all blocks associated with a | |
690 // subroutine, including those for exception handlers in the scope | |
691 // of the method containing the jsr (because those exception | |
692 // handlers may contain ret instructions in some cases). | |
693 BlockBegin* block = bci2block()->at(bci); | |
694 if (block != NULL && block == parent()->bci2block()->at(bci)) { | |
695 BlockBegin* new_block = new BlockBegin(block->bci()); | |
696 #ifndef PRODUCT | |
697 if (PrintInitialBlockList) { | |
698 tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr", | |
699 block->block_id(), block->bci(), new_block->block_id()); | |
700 } | |
701 #endif | |
702 // copy data from cloned blocked | |
703 new_block->set_depth_first_number(block->depth_first_number()); | |
704 if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag); | |
705 // Preserve certain flags for assertion checking | |
706 if (block->is_set(BlockBegin::subroutine_entry_flag)) new_block->set(BlockBegin::subroutine_entry_flag); | |
707 if (block->is_set(BlockBegin::exception_entry_flag)) new_block->set(BlockBegin::exception_entry_flag); | |
708 | |
709 // copy was_visited_flag to allow early detection of bailouts | |
710 // if a block that is used in a jsr has already been visited before, | |
711 // it is shared between the normal control flow and a subroutine | |
712 // BlockBegin::try_merge returns false when the flag is set, this leads | |
713 // to a compilation bailout | |
714 if (block->is_set(BlockBegin::was_visited_flag)) new_block->set(BlockBegin::was_visited_flag); | |
715 | |
716 bci2block()->at_put(bci, new_block); | |
717 block = new_block; | |
718 } | |
719 return block; | |
720 } else { | |
721 return bci2block()->at(bci); | |
722 } | |
723 } | |
724 | |
725 | |
726 XHandlers* GraphBuilder::ScopeData::xhandlers() const { | |
727 if (_jsr_xhandlers == NULL) { | |
728 assert(!parsing_jsr(), ""); | |
729 return scope()->xhandlers(); | |
730 } | |
731 assert(parsing_jsr(), ""); | |
732 return _jsr_xhandlers; | |
733 } | |
734 | |
735 | |
736 void GraphBuilder::ScopeData::set_scope(IRScope* scope) { | |
737 _scope = scope; | |
738 bool parent_has_handler = false; | |
739 if (parent() != NULL) { | |
740 parent_has_handler = parent()->has_handler(); | |
741 } | |
742 _has_handler = parent_has_handler || scope->xhandlers()->has_handlers(); | |
743 } | |
744 | |
745 | |
746 void GraphBuilder::ScopeData::set_inline_cleanup_info(BlockBegin* block, | |
747 Instruction* return_prev, | |
748 ValueStack* return_state) { | |
749 _cleanup_block = block; | |
750 _cleanup_return_prev = return_prev; | |
751 _cleanup_state = return_state; | |
752 } | |
753 | |
754 | |
755 void GraphBuilder::ScopeData::add_to_work_list(BlockBegin* block) { | |
756 if (_work_list == NULL) { | |
757 _work_list = new BlockList(); | |
758 } | |
759 | |
760 if (!block->is_set(BlockBegin::is_on_work_list_flag)) { | |
761 // Do not start parsing the continuation block while in a | |
762 // sub-scope | |
763 if (parsing_jsr()) { | |
764 if (block == jsr_continuation()) { | |
765 return; | |
766 } | |
767 } else { | |
768 if (block == continuation()) { | |
769 return; | |
770 } | |
771 } | |
772 block->set(BlockBegin::is_on_work_list_flag); | |
773 _work_list->push(block); | |
774 | |
775 sort_top_into_worklist(_work_list, block); | |
776 } | |
777 } | |
778 | |
779 | |
780 void GraphBuilder::sort_top_into_worklist(BlockList* worklist, BlockBegin* top) { | |
781 assert(worklist->top() == top, ""); | |
782 // sort block descending into work list | |
783 const int dfn = top->depth_first_number(); | |
784 assert(dfn != -1, "unknown depth first number"); | |
785 int i = worklist->length()-2; | |
786 while (i >= 0) { | |
787 BlockBegin* b = worklist->at(i); | |
788 if (b->depth_first_number() < dfn) { | |
789 worklist->at_put(i+1, b); | |
790 } else { | |
791 break; | |
792 } | |
793 i --; | |
794 } | |
795 if (i >= -1) worklist->at_put(i + 1, top); | |
796 } | |
797 | |
798 int GraphBuilder::ScopeData::caller_stack_size() const { | |
799 ValueStack* state = scope()->caller_state(); | |
800 if (state == NULL) { | |
801 return 0; | |
802 } | |
803 return state->stack_size(); | |
804 } | |
805 | |
806 | |
807 BlockBegin* GraphBuilder::ScopeData::remove_from_work_list() { | |
808 if (is_work_list_empty()) { | |
809 return NULL; | |
810 } | |
811 return _work_list->pop(); | |
812 } | |
813 | |
814 | |
815 bool GraphBuilder::ScopeData::is_work_list_empty() const { | |
816 return (_work_list == NULL || _work_list->length() == 0); | |
817 } | |
818 | |
819 | |
820 void GraphBuilder::ScopeData::setup_jsr_xhandlers() { | |
821 assert(parsing_jsr(), ""); | |
822 // clone all the exception handlers from the scope | |
823 XHandlers* handlers = new XHandlers(scope()->xhandlers()); | |
824 const int n = handlers->length(); | |
825 for (int i = 0; i < n; i++) { | |
826 // The XHandlers need to be adjusted to dispatch to the cloned | |
827 // handler block instead of the default one but the synthetic | |
828 // unlocker needs to be handled specially. The synthetic unlocker | |
829 // should be left alone since there can be only one and all code | |
830 // should dispatch to the same one. | |
831 XHandler* h = handlers->handler_at(i); | |
1378
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1295
diff
changeset
|
832 assert(h->handler_bci() != SynchronizationEntryBCI, "must be real"); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1295
diff
changeset
|
833 h->set_entry_block(block_at(h->handler_bci())); |
0 | 834 } |
835 _jsr_xhandlers = handlers; | |
836 } | |
837 | |
838 | |
839 int GraphBuilder::ScopeData::num_returns() { | |
840 if (parsing_jsr()) { | |
841 return parent()->num_returns(); | |
842 } | |
843 return _num_returns; | |
844 } | |
845 | |
846 | |
847 void GraphBuilder::ScopeData::incr_num_returns() { | |
848 if (parsing_jsr()) { | |
849 parent()->incr_num_returns(); | |
850 } else { | |
851 ++_num_returns; | |
852 } | |
853 } | |
854 | |
855 | |
856 // Implementation of GraphBuilder | |
857 | |
858 #define INLINE_BAILOUT(msg) { inline_bailout(msg); return false; } | |
859 | |
860 | |
861 void GraphBuilder::load_constant() { | |
862 ciConstant con = stream()->get_constant(); | |
863 if (con.basic_type() == T_ILLEGAL) { | |
864 BAILOUT("could not resolve a constant"); | |
865 } else { | |
866 ValueType* t = illegalType; | |
867 ValueStack* patch_state = NULL; | |
868 switch (con.basic_type()) { | |
869 case T_BOOLEAN: t = new IntConstant (con.as_boolean()); break; | |
870 case T_BYTE : t = new IntConstant (con.as_byte ()); break; | |
871 case T_CHAR : t = new IntConstant (con.as_char ()); break; | |
872 case T_SHORT : t = new IntConstant (con.as_short ()); break; | |
873 case T_INT : t = new IntConstant (con.as_int ()); break; | |
874 case T_LONG : t = new LongConstant (con.as_long ()); break; | |
875 case T_FLOAT : t = new FloatConstant (con.as_float ()); break; | |
876 case T_DOUBLE : t = new DoubleConstant (con.as_double ()); break; | |
877 case T_ARRAY : t = new ArrayConstant (con.as_object ()->as_array ()); break; | |
878 case T_OBJECT : | |
879 { | |
880 ciObject* obj = con.as_object(); | |
1602 | 881 if (!obj->is_loaded() |
882 || (PatchALot && obj->klass() != ciEnv::current()->String_klass())) { | |
883 patch_state = state()->copy(); | |
884 t = new ObjectConstant(obj); | |
0 | 885 } else { |
1602 | 886 assert(!obj->is_klass(), "must be java_mirror of klass"); |
0 | 887 t = new InstanceConstant(obj->as_instance()); |
888 } | |
889 break; | |
890 } | |
891 default : ShouldNotReachHere(); | |
892 } | |
893 Value x; | |
894 if (patch_state != NULL) { | |
895 x = new Constant(t, patch_state); | |
896 } else { | |
897 x = new Constant(t); | |
898 } | |
899 push(t, append(x)); | |
900 } | |
901 } | |
902 | |
903 | |
904 void GraphBuilder::load_local(ValueType* type, int index) { | |
905 Value x = state()->load_local(index); | |
906 push(type, x); | |
907 } | |
908 | |
909 | |
910 void GraphBuilder::store_local(ValueType* type, int index) { | |
911 Value x = pop(type); | |
912 store_local(state(), x, type, index); | |
913 } | |
914 | |
915 | |
916 void GraphBuilder::store_local(ValueStack* state, Value x, ValueType* type, int index) { | |
917 if (parsing_jsr()) { | |
918 // We need to do additional tracking of the location of the return | |
919 // address for jsrs since we don't handle arbitrary jsr/ret | |
920 // constructs. Here we are figuring out in which circumstances we | |
921 // need to bail out. | |
922 if (x->type()->is_address()) { | |
923 scope_data()->set_jsr_return_address_local(index); | |
924 | |
925 // Also check parent jsrs (if any) at this time to see whether | |
926 // they are using this local. We don't handle skipping over a | |
927 // ret. | |
928 for (ScopeData* cur_scope_data = scope_data()->parent(); | |
929 cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope(); | |
930 cur_scope_data = cur_scope_data->parent()) { | |
931 if (cur_scope_data->jsr_return_address_local() == index) { | |
932 BAILOUT("subroutine overwrites return address from previous subroutine"); | |
933 } | |
934 } | |
935 } else if (index == scope_data()->jsr_return_address_local()) { | |
936 scope_data()->set_jsr_return_address_local(-1); | |
937 } | |
938 } | |
939 | |
940 state->store_local(index, round_fp(x)); | |
941 } | |
942 | |
943 | |
944 void GraphBuilder::load_indexed(BasicType type) { | |
945 Value index = ipop(); | |
946 Value array = apop(); | |
947 Value length = NULL; | |
948 if (CSEArrayLength || | |
949 (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) || | |
950 (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) { | |
951 length = append(new ArrayLength(array, lock_stack())); | |
952 } | |
953 push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, lock_stack()))); | |
954 } | |
955 | |
956 | |
957 void GraphBuilder::store_indexed(BasicType type) { | |
958 Value value = pop(as_ValueType(type)); | |
959 Value index = ipop(); | |
960 Value array = apop(); | |
961 Value length = NULL; | |
962 if (CSEArrayLength || | |
963 (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) || | |
964 (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) { | |
965 length = append(new ArrayLength(array, lock_stack())); | |
966 } | |
967 StoreIndexed* result = new StoreIndexed(array, index, length, type, value, lock_stack()); | |
968 append(result); | |
459 | 969 _memory->store_value(value); |
0 | 970 } |
971 | |
972 | |
973 void GraphBuilder::stack_op(Bytecodes::Code code) { | |
974 switch (code) { | |
975 case Bytecodes::_pop: | |
976 { state()->raw_pop(); | |
977 } | |
978 break; | |
979 case Bytecodes::_pop2: | |
980 { state()->raw_pop(); | |
981 state()->raw_pop(); | |
982 } | |
983 break; | |
984 case Bytecodes::_dup: | |
985 { Value w = state()->raw_pop(); | |
986 state()->raw_push(w); | |
987 state()->raw_push(w); | |
988 } | |
989 break; | |
990 case Bytecodes::_dup_x1: | |
991 { Value w1 = state()->raw_pop(); | |
992 Value w2 = state()->raw_pop(); | |
993 state()->raw_push(w1); | |
994 state()->raw_push(w2); | |
995 state()->raw_push(w1); | |
996 } | |
997 break; | |
998 case Bytecodes::_dup_x2: | |
999 { Value w1 = state()->raw_pop(); | |
1000 Value w2 = state()->raw_pop(); | |
1001 Value w3 = state()->raw_pop(); | |
1002 state()->raw_push(w1); | |
1003 state()->raw_push(w3); | |
1004 state()->raw_push(w2); | |
1005 state()->raw_push(w1); | |
1006 } | |
1007 break; | |
1008 case Bytecodes::_dup2: | |
1009 { Value w1 = state()->raw_pop(); | |
1010 Value w2 = state()->raw_pop(); | |
1011 state()->raw_push(w2); | |
1012 state()->raw_push(w1); | |
1013 state()->raw_push(w2); | |
1014 state()->raw_push(w1); | |
1015 } | |
1016 break; | |
1017 case Bytecodes::_dup2_x1: | |
1018 { Value w1 = state()->raw_pop(); | |
1019 Value w2 = state()->raw_pop(); | |
1020 Value w3 = state()->raw_pop(); | |
1021 state()->raw_push(w2); | |
1022 state()->raw_push(w1); | |
1023 state()->raw_push(w3); | |
1024 state()->raw_push(w2); | |
1025 state()->raw_push(w1); | |
1026 } | |
1027 break; | |
1028 case Bytecodes::_dup2_x2: | |
1029 { Value w1 = state()->raw_pop(); | |
1030 Value w2 = state()->raw_pop(); | |
1031 Value w3 = state()->raw_pop(); | |
1032 Value w4 = state()->raw_pop(); | |
1033 state()->raw_push(w2); | |
1034 state()->raw_push(w1); | |
1035 state()->raw_push(w4); | |
1036 state()->raw_push(w3); | |
1037 state()->raw_push(w2); | |
1038 state()->raw_push(w1); | |
1039 } | |
1040 break; | |
1041 case Bytecodes::_swap: | |
1042 { Value w1 = state()->raw_pop(); | |
1043 Value w2 = state()->raw_pop(); | |
1044 state()->raw_push(w1); | |
1045 state()->raw_push(w2); | |
1046 } | |
1047 break; | |
1048 default: | |
1049 ShouldNotReachHere(); | |
1050 break; | |
1051 } | |
1052 } | |
1053 | |
1054 | |
1055 void GraphBuilder::arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* stack) { | |
1056 Value y = pop(type); | |
1057 Value x = pop(type); | |
1058 // NOTE: strictfp can be queried from current method since we don't | |
1059 // inline methods with differing strictfp bits | |
1060 Value res = new ArithmeticOp(code, x, y, method()->is_strict(), stack); | |
1061 // Note: currently single-precision floating-point rounding on Intel is handled at the LIRGenerator level | |
1062 res = append(res); | |
1063 if (method()->is_strict()) { | |
1064 res = round_fp(res); | |
1065 } | |
1066 push(type, res); | |
1067 } | |
1068 | |
1069 | |
1070 void GraphBuilder::negate_op(ValueType* type) { | |
1071 push(type, append(new NegateOp(pop(type)))); | |
1072 } | |
1073 | |
1074 | |
1075 void GraphBuilder::shift_op(ValueType* type, Bytecodes::Code code) { | |
1076 Value s = ipop(); | |
1077 Value x = pop(type); | |
1078 // try to simplify | |
1079 // Note: This code should go into the canonicalizer as soon as it can | |
1080 // can handle canonicalized forms that contain more than one node. | |
1081 if (CanonicalizeNodes && code == Bytecodes::_iushr) { | |
1082 // pattern: x >>> s | |
1083 IntConstant* s1 = s->type()->as_IntConstant(); | |
1084 if (s1 != NULL) { | |
1085 // pattern: x >>> s1, with s1 constant | |
1086 ShiftOp* l = x->as_ShiftOp(); | |
1087 if (l != NULL && l->op() == Bytecodes::_ishl) { | |
1088 // pattern: (a << b) >>> s1 | |
1089 IntConstant* s0 = l->y()->type()->as_IntConstant(); | |
1090 if (s0 != NULL) { | |
1091 // pattern: (a << s0) >>> s1 | |
1092 const int s0c = s0->value() & 0x1F; // only the low 5 bits are significant for shifts | |
1093 const int s1c = s1->value() & 0x1F; // only the low 5 bits are significant for shifts | |
1094 if (s0c == s1c) { | |
1095 if (s0c == 0) { | |
1096 // pattern: (a << 0) >>> 0 => simplify to: a | |
1097 ipush(l->x()); | |
1098 } else { | |
1099 // pattern: (a << s0c) >>> s0c => simplify to: a & m, with m constant | |
1100 assert(0 < s0c && s0c < BitsPerInt, "adjust code below to handle corner cases"); | |
1101 const int m = (1 << (BitsPerInt - s0c)) - 1; | |
1102 Value s = append(new Constant(new IntConstant(m))); | |
1103 ipush(append(new LogicOp(Bytecodes::_iand, l->x(), s))); | |
1104 } | |
1105 return; | |
1106 } | |
1107 } | |
1108 } | |
1109 } | |
1110 } | |
1111 // could not simplify | |
1112 push(type, append(new ShiftOp(code, x, s))); | |
1113 } | |
1114 | |
1115 | |
1116 void GraphBuilder::logic_op(ValueType* type, Bytecodes::Code code) { | |
1117 Value y = pop(type); | |
1118 Value x = pop(type); | |
1119 push(type, append(new LogicOp(code, x, y))); | |
1120 } | |
1121 | |
1122 | |
1123 void GraphBuilder::compare_op(ValueType* type, Bytecodes::Code code) { | |
1124 ValueStack* state_before = state()->copy(); | |
1125 Value y = pop(type); | |
1126 Value x = pop(type); | |
1127 ipush(append(new CompareOp(code, x, y, state_before))); | |
1128 } | |
1129 | |
1130 | |
1131 void GraphBuilder::convert(Bytecodes::Code op, BasicType from, BasicType to) { | |
1132 push(as_ValueType(to), append(new Convert(op, pop(as_ValueType(from)), as_ValueType(to)))); | |
1133 } | |
1134 | |
1135 | |
1136 void GraphBuilder::increment() { | |
1137 int index = stream()->get_index(); | |
1138 int delta = stream()->is_wide() ? (signed short)Bytes::get_Java_u2(stream()->cur_bcp() + 4) : (signed char)(stream()->cur_bcp()[2]); | |
1139 load_local(intType, index); | |
1140 ipush(append(new Constant(new IntConstant(delta)))); | |
1141 arithmetic_op(intType, Bytecodes::_iadd); | |
1142 store_local(intType, index); | |
1143 } | |
1144 | |
1145 | |
1146 void GraphBuilder::_goto(int from_bci, int to_bci) { | |
1147 profile_bci(from_bci); | |
1148 append(new Goto(block_at(to_bci), to_bci <= from_bci)); | |
1149 } | |
1150 | |
1151 | |
1152 void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) { | |
1153 BlockBegin* tsux = block_at(stream()->get_dest()); | |
1154 BlockBegin* fsux = block_at(stream()->next_bci()); | |
1155 bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci(); | |
1156 If* if_node = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb))->as_If(); | |
1157 if (profile_branches() && (if_node != NULL)) { | |
1158 if_node->set_profiled_method(method()); | |
1159 if_node->set_profiled_bci(bci()); | |
1160 if_node->set_should_profile(true); | |
1161 } | |
1162 } | |
1163 | |
1164 | |
1165 void GraphBuilder::if_zero(ValueType* type, If::Condition cond) { | |
1166 Value y = append(new Constant(intZero)); | |
1167 ValueStack* state_before = state()->copy(); | |
1168 Value x = ipop(); | |
1169 if_node(x, cond, y, state_before); | |
1170 } | |
1171 | |
1172 | |
1173 void GraphBuilder::if_null(ValueType* type, If::Condition cond) { | |
1174 Value y = append(new Constant(objectNull)); | |
1175 ValueStack* state_before = state()->copy(); | |
1176 Value x = apop(); | |
1177 if_node(x, cond, y, state_before); | |
1178 } | |
1179 | |
1180 | |
1181 void GraphBuilder::if_same(ValueType* type, If::Condition cond) { | |
1182 ValueStack* state_before = state()->copy(); | |
1183 Value y = pop(type); | |
1184 Value x = pop(type); | |
1185 if_node(x, cond, y, state_before); | |
1186 } | |
1187 | |
1188 | |
1189 void GraphBuilder::jsr(int dest) { | |
1190 // We only handle well-formed jsrs (those which are "block-structured"). | |
1191 // If the bytecodes are strange (jumping out of a jsr block) then we | |
1192 // might end up trying to re-parse a block containing a jsr which | |
1193 // has already been activated. Watch for this case and bail out. | |
1194 for (ScopeData* cur_scope_data = scope_data(); | |
1195 cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope(); | |
1196 cur_scope_data = cur_scope_data->parent()) { | |
1197 if (cur_scope_data->jsr_entry_bci() == dest) { | |
1198 BAILOUT("too-complicated jsr/ret structure"); | |
1199 } | |
1200 } | |
1201 | |
1202 push(addressType, append(new Constant(new AddressConstant(next_bci())))); | |
1203 if (!try_inline_jsr(dest)) { | |
1204 return; // bailed out while parsing and inlining subroutine | |
1205 } | |
1206 } | |
1207 | |
1208 | |
1209 void GraphBuilder::ret(int local_index) { | |
1210 if (!parsing_jsr()) BAILOUT("ret encountered while not parsing subroutine"); | |
1211 | |
1212 if (local_index != scope_data()->jsr_return_address_local()) { | |
1213 BAILOUT("can not handle complicated jsr/ret constructs"); | |
1214 } | |
1215 | |
1216 // Rets simply become (NON-SAFEPOINT) gotos to the jsr continuation | |
1217 append(new Goto(scope_data()->jsr_continuation(), false)); | |
1218 } | |
1219 | |
1220 | |
1221 void GraphBuilder::table_switch() { | |
1222 Bytecode_tableswitch* switch_ = Bytecode_tableswitch_at(method()->code() + bci()); | |
1223 const int l = switch_->length(); | |
1224 if (CanonicalizeNodes && l == 1) { | |
1225 // total of 2 successors => use If instead of switch | |
1226 // Note: This code should go into the canonicalizer as soon as it can | |
1227 // can handle canonicalized forms that contain more than one node. | |
1228 Value key = append(new Constant(new IntConstant(switch_->low_key()))); | |
1229 BlockBegin* tsux = block_at(bci() + switch_->dest_offset_at(0)); | |
1230 BlockBegin* fsux = block_at(bci() + switch_->default_offset()); | |
1231 bool is_bb = tsux->bci() < bci() || fsux->bci() < bci(); | |
1232 ValueStack* state_before = is_bb ? state() : NULL; | |
1233 append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); | |
1234 } else { | |
1235 // collect successors | |
1236 BlockList* sux = new BlockList(l + 1, NULL); | |
1237 int i; | |
1238 bool has_bb = false; | |
1239 for (i = 0; i < l; i++) { | |
1240 sux->at_put(i, block_at(bci() + switch_->dest_offset_at(i))); | |
1241 if (switch_->dest_offset_at(i) < 0) has_bb = true; | |
1242 } | |
1243 // add default successor | |
1244 sux->at_put(i, block_at(bci() + switch_->default_offset())); | |
1245 ValueStack* state_before = has_bb ? state() : NULL; | |
1246 append(new TableSwitch(ipop(), sux, switch_->low_key(), state_before, has_bb)); | |
1247 } | |
1248 } | |
1249 | |
1250 | |
1251 void GraphBuilder::lookup_switch() { | |
1252 Bytecode_lookupswitch* switch_ = Bytecode_lookupswitch_at(method()->code() + bci()); | |
1253 const int l = switch_->number_of_pairs(); | |
1254 if (CanonicalizeNodes && l == 1) { | |
1255 // total of 2 successors => use If instead of switch | |
1256 // Note: This code should go into the canonicalizer as soon as it can | |
1257 // can handle canonicalized forms that contain more than one node. | |
1258 // simplify to If | |
1259 LookupswitchPair* pair = switch_->pair_at(0); | |
1260 Value key = append(new Constant(new IntConstant(pair->match()))); | |
1261 BlockBegin* tsux = block_at(bci() + pair->offset()); | |
1262 BlockBegin* fsux = block_at(bci() + switch_->default_offset()); | |
1263 bool is_bb = tsux->bci() < bci() || fsux->bci() < bci(); | |
1264 ValueStack* state_before = is_bb ? state() : NULL; | |
1265 append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); | |
1266 } else { | |
1267 // collect successors & keys | |
1268 BlockList* sux = new BlockList(l + 1, NULL); | |
1269 intArray* keys = new intArray(l, 0); | |
1270 int i; | |
1271 bool has_bb = false; | |
1272 for (i = 0; i < l; i++) { | |
1273 LookupswitchPair* pair = switch_->pair_at(i); | |
1274 if (pair->offset() < 0) has_bb = true; | |
1275 sux->at_put(i, block_at(bci() + pair->offset())); | |
1276 keys->at_put(i, pair->match()); | |
1277 } | |
1278 // add default successor | |
1279 sux->at_put(i, block_at(bci() + switch_->default_offset())); | |
1280 ValueStack* state_before = has_bb ? state() : NULL; | |
1281 append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb)); | |
1282 } | |
1283 } | |
1284 | |
1285 void GraphBuilder::call_register_finalizer() { | |
1286 // If the receiver requires finalization then emit code to perform | |
1287 // the registration on return. | |
1288 | |
1289 // Gather some type information about the receiver | |
1290 Value receiver = state()->load_local(0); | |
1291 assert(receiver != NULL, "must have a receiver"); | |
1292 ciType* declared_type = receiver->declared_type(); | |
1293 ciType* exact_type = receiver->exact_type(); | |
1294 if (exact_type == NULL && | |
1295 receiver->as_Local() && | |
1296 receiver->as_Local()->java_index() == 0) { | |
1297 ciInstanceKlass* ik = compilation()->method()->holder(); | |
1298 if (ik->is_final()) { | |
1299 exact_type = ik; | |
1300 } else if (UseCHA && !(ik->has_subklass() || ik->is_interface())) { | |
1301 // test class is leaf class | |
1302 compilation()->dependency_recorder()->assert_leaf_type(ik); | |
1303 exact_type = ik; | |
1304 } else { | |
1305 declared_type = ik; | |
1306 } | |
1307 } | |
1308 | |
1309 // see if we know statically that registration isn't required | |
1310 bool needs_check = true; | |
1311 if (exact_type != NULL) { | |
1312 needs_check = exact_type->as_instance_klass()->has_finalizer(); | |
1313 } else if (declared_type != NULL) { | |
1314 ciInstanceKlass* ik = declared_type->as_instance_klass(); | |
1315 if (!Dependencies::has_finalizable_subclass(ik)) { | |
1316 compilation()->dependency_recorder()->assert_has_no_finalizable_subclasses(ik); | |
1317 needs_check = false; | |
1318 } | |
1319 } | |
1320 | |
1321 if (needs_check) { | |
1322 // Perform the registration of finalizable objects. | |
1323 load_local(objectType, 0); | |
1324 append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init, | |
1325 state()->pop_arguments(1), | |
1326 true, lock_stack(), true)); | |
1327 } | |
1328 } | |
1329 | |
1330 | |
1331 void GraphBuilder::method_return(Value x) { | |
1332 if (RegisterFinalizersAtInit && | |
1333 method()->intrinsic_id() == vmIntrinsics::_Object_init) { | |
1334 call_register_finalizer(); | |
1335 } | |
1336 | |
1337 // Check to see whether we are inlining. If so, Return | |
1338 // instructions become Gotos to the continuation point. | |
1339 if (continuation() != NULL) { | |
1340 assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet"); | |
1341 | |
1342 // If the inlined method is synchronized, the monitor must be | |
1343 // released before we jump to the continuation block. | |
1344 if (method()->is_synchronized()) { | |
1345 int i = state()->caller_state()->locks_size(); | |
1346 assert(state()->locks_size() == i + 1, "receiver must be locked here"); | |
1347 monitorexit(state()->lock_at(i), SynchronizationEntryBCI); | |
1348 } | |
1349 | |
1350 state()->truncate_stack(caller_stack_size()); | |
1351 if (x != NULL) { | |
1352 state()->push(x->type(), x); | |
1353 } | |
1354 Goto* goto_callee = new Goto(continuation(), false); | |
1355 | |
1356 // See whether this is the first return; if so, store off some | |
1357 // of the state for later examination | |
1358 if (num_returns() == 0) { | |
1359 set_inline_cleanup_info(_block, _last, state()); | |
1360 } | |
1361 | |
1362 // State at end of inlined method is the state of the caller | |
1363 // without the method parameters on stack, including the | |
1364 // return value, if any, of the inlined method on operand stack. | |
1365 set_state(scope_data()->continuation_state()->copy()); | |
1366 if (x) { | |
1367 state()->push(x->type(), x); | |
1368 } | |
1369 | |
1370 // The current bci() is in the wrong scope, so use the bci() of | |
1371 // the continuation point. | |
1372 append_with_bci(goto_callee, scope_data()->continuation()->bci()); | |
1373 incr_num_returns(); | |
1374 | |
1375 return; | |
1376 } | |
1377 | |
1378 state()->truncate_stack(0); | |
1379 if (method()->is_synchronized()) { | |
1380 // perform the unlocking before exiting the method | |
1381 Value receiver; | |
1382 if (!method()->is_static()) { | |
1383 receiver = _initial_state->local_at(0); | |
1384 } else { | |
1385 receiver = append(new Constant(new ClassConstant(method()->holder()))); | |
1386 } | |
1387 append_split(new MonitorExit(receiver, state()->unlock())); | |
1388 } | |
1389 | |
1390 append(new Return(x)); | |
1391 } | |
1392 | |
1393 | |
1394 void GraphBuilder::access_field(Bytecodes::Code code) { | |
1395 bool will_link; | |
1396 ciField* field = stream()->get_field(will_link); | |
1397 ciInstanceKlass* holder = field->holder(); | |
1398 BasicType field_type = field->type()->basic_type(); | |
1399 ValueType* type = as_ValueType(field_type); | |
1400 // call will_link again to determine if the field is valid. | |
1401 const bool is_loaded = holder->is_loaded() && | |
1402 field->will_link(method()->holder(), code); | |
1403 const bool is_initialized = is_loaded && holder->is_initialized(); | |
1404 | |
1405 ValueStack* state_copy = NULL; | |
1406 if (!is_initialized || PatchALot) { | |
1407 // save state before instruction for debug info when | |
1408 // deoptimization happens during patching | |
1409 state_copy = state()->copy(); | |
1410 } | |
1411 | |
1412 Value obj = NULL; | |
1413 if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) { | |
1414 // commoning of class constants should only occur if the class is | |
1415 // fully initialized and resolved in this constant pool. The will_link test | |
1416 // above essentially checks if this class is resolved in this constant pool | |
1417 // so, the is_initialized flag should be suffiect. | |
1418 if (state_copy != NULL) { | |
1419 // build a patching constant | |
1420 obj = new Constant(new ClassConstant(holder), state_copy); | |
1421 } else { | |
1422 obj = new Constant(new ClassConstant(holder)); | |
1423 } | |
1424 } | |
1425 | |
1426 | |
1427 const int offset = is_loaded ? field->offset() : -1; | |
1428 switch (code) { | |
1429 case Bytecodes::_getstatic: { | |
1430 // check for compile-time constants, i.e., initialized static final fields | |
1431 Instruction* constant = NULL; | |
1432 if (field->is_constant() && !PatchALot) { | |
1433 ciConstant field_val = field->constant_value(); | |
1434 BasicType field_type = field_val.basic_type(); | |
1435 switch (field_type) { | |
1436 case T_ARRAY: | |
1437 case T_OBJECT: | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
844
diff
changeset
|
1438 if (field_val.as_object()->should_be_constant()) { |
0 | 1439 constant = new Constant(as_ValueType(field_val)); |
1440 } | |
1441 break; | |
1442 | |
1443 default: | |
1444 constant = new Constant(as_ValueType(field_val)); | |
1445 } | |
1446 } | |
1447 if (constant != NULL) { | |
1448 push(type, append(constant)); | |
1449 state_copy = NULL; // Not a potential deoptimization point (see set_state_before logic below) | |
1450 } else { | |
1451 push(type, append(new LoadField(append(obj), offset, field, true, | |
1452 lock_stack(), state_copy, is_loaded, is_initialized))); | |
1453 } | |
1454 break; | |
1455 } | |
1456 case Bytecodes::_putstatic: | |
1457 { Value val = pop(type); | |
1458 append(new StoreField(append(obj), offset, field, val, true, lock_stack(), state_copy, is_loaded, is_initialized)); | |
1459 } | |
1460 break; | |
1461 case Bytecodes::_getfield : | |
1462 { | |
1463 LoadField* load = new LoadField(apop(), offset, field, false, lock_stack(), state_copy, is_loaded, true); | |
1464 Value replacement = is_loaded ? _memory->load(load) : load; | |
1465 if (replacement != load) { | |
1466 assert(replacement->bci() != -99 || replacement->as_Phi() || replacement->as_Local(), | |
1467 "should already by linked"); | |
1468 push(type, replacement); | |
1469 } else { | |
1470 push(type, append(load)); | |
1471 } | |
1472 break; | |
1473 } | |
1474 | |
1475 case Bytecodes::_putfield : | |
1476 { Value val = pop(type); | |
1477 StoreField* store = new StoreField(apop(), offset, field, val, false, lock_stack(), state_copy, is_loaded, true); | |
1478 if (is_loaded) store = _memory->store(store); | |
1479 if (store != NULL) { | |
1480 append(store); | |
1481 } | |
1482 } | |
1483 break; | |
1484 default : | |
1485 ShouldNotReachHere(); | |
1486 break; | |
1487 } | |
1488 } | |
1489 | |
1490 | |
1491 Dependencies* GraphBuilder::dependency_recorder() const { | |
1492 assert(DeoptC1, "need debug information"); | |
1493 return compilation()->dependency_recorder(); | |
1494 } | |
1495 | |
1496 | |
1497 void GraphBuilder::invoke(Bytecodes::Code code) { | |
1498 bool will_link; | |
1499 ciMethod* target = stream()->get_method(will_link); | |
1500 // we have to make sure the argument size (incl. the receiver) | |
1501 // is correct for compilation (the call would fail later during | |
1502 // linkage anyway) - was bug (gri 7/28/99) | |
1503 if (target->is_loaded() && target->is_static() != (code == Bytecodes::_invokestatic)) BAILOUT("will cause link error"); | |
1504 ciInstanceKlass* klass = target->holder(); | |
1505 | |
1506 // check if CHA possible: if so, change the code to invoke_special | |
1507 ciInstanceKlass* calling_klass = method()->holder(); | |
1508 ciKlass* holder = stream()->get_declared_method_holder(); | |
1509 ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder); | |
1510 ciInstanceKlass* actual_recv = callee_holder; | |
1511 | |
1512 // some methods are obviously bindable without any type checks so | |
1513 // convert them directly to an invokespecial. | |
1514 if (target->is_loaded() && !target->is_abstract() && | |
1515 target->can_be_statically_bound() && code == Bytecodes::_invokevirtual) { | |
1516 code = Bytecodes::_invokespecial; | |
1517 } | |
1518 | |
1519 // NEEDS_CLEANUP | |
1520 // I've added the target-is_loaded() test below but I don't really understand | |
1521 // how klass->is_loaded() can be true and yet target->is_loaded() is false. | |
1522 // this happened while running the JCK invokevirtual tests under doit. TKR | |
1523 ciMethod* cha_monomorphic_target = NULL; | |
1524 ciMethod* exact_target = NULL; | |
1295 | 1525 if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() && |
1526 !target->is_method_handle_invoke()) { | |
0 | 1527 Value receiver = NULL; |
1528 ciInstanceKlass* receiver_klass = NULL; | |
1529 bool type_is_exact = false; | |
1530 // try to find a precise receiver type | |
1531 if (will_link && !target->is_static()) { | |
1532 int index = state()->stack_size() - (target->arg_size_no_receiver() + 1); | |
1533 receiver = state()->stack_at(index); | |
1534 ciType* type = receiver->exact_type(); | |
1535 if (type != NULL && type->is_loaded() && | |
1536 type->is_instance_klass() && !type->as_instance_klass()->is_interface()) { | |
1537 receiver_klass = (ciInstanceKlass*) type; | |
1538 type_is_exact = true; | |
1539 } | |
1540 if (type == NULL) { | |
1541 type = receiver->declared_type(); | |
1542 if (type != NULL && type->is_loaded() && | |
1543 type->is_instance_klass() && !type->as_instance_klass()->is_interface()) { | |
1544 receiver_klass = (ciInstanceKlass*) type; | |
1545 if (receiver_klass->is_leaf_type() && !receiver_klass->is_final()) { | |
1546 // Insert a dependency on this type since | |
1547 // find_monomorphic_target may assume it's already done. | |
1548 dependency_recorder()->assert_leaf_type(receiver_klass); | |
1549 type_is_exact = true; | |
1550 } | |
1551 } | |
1552 } | |
1553 } | |
1554 if (receiver_klass != NULL && type_is_exact && | |
1555 receiver_klass->is_loaded() && code != Bytecodes::_invokespecial) { | |
1556 // If we have the exact receiver type we can bind directly to | |
1557 // the method to call. | |
1558 exact_target = target->resolve_invoke(calling_klass, receiver_klass); | |
1559 if (exact_target != NULL) { | |
1560 target = exact_target; | |
1561 code = Bytecodes::_invokespecial; | |
1562 } | |
1563 } | |
1564 if (receiver_klass != NULL && | |
1565 receiver_klass->is_subtype_of(actual_recv) && | |
1566 actual_recv->is_initialized()) { | |
1567 actual_recv = receiver_klass; | |
1568 } | |
1569 | |
1570 if ((code == Bytecodes::_invokevirtual && callee_holder->is_initialized()) || | |
1571 (code == Bytecodes::_invokeinterface && callee_holder->is_initialized() && !actual_recv->is_interface())) { | |
1572 // Use CHA on the receiver to select a more precise method. | |
1573 cha_monomorphic_target = target->find_monomorphic_target(calling_klass, callee_holder, actual_recv); | |
1574 } else if (code == Bytecodes::_invokeinterface && callee_holder->is_loaded() && receiver != NULL) { | |
1575 // if there is only one implementor of this interface then we | |
1576 // may be able bind this invoke directly to the implementing | |
1577 // klass but we need both a dependence on the single interface | |
1578 // and on the method we bind to. Additionally since all we know | |
1579 // about the receiver type is the it's supposed to implement the | |
1580 // interface we have to insert a check that it's the class we | |
1581 // expect. Interface types are not checked by the verifier so | |
1582 // they are roughly equivalent to Object. | |
1583 ciInstanceKlass* singleton = NULL; | |
1584 if (target->holder()->nof_implementors() == 1) { | |
1585 singleton = target->holder()->implementor(0); | |
1586 } | |
1587 if (singleton) { | |
1588 cha_monomorphic_target = target->find_monomorphic_target(calling_klass, target->holder(), singleton); | |
1589 if (cha_monomorphic_target != NULL) { | |
1590 // If CHA is able to bind this invoke then update the class | |
1591 // to match that class, otherwise klass will refer to the | |
1592 // interface. | |
1593 klass = cha_monomorphic_target->holder(); | |
1594 actual_recv = target->holder(); | |
1595 | |
1596 // insert a check it's really the expected class. | |
1597 CheckCast* c = new CheckCast(klass, receiver, NULL); | |
1598 c->set_incompatible_class_change_check(); | |
1599 c->set_direct_compare(klass->is_final()); | |
1600 append_split(c); | |
1601 } | |
1602 } | |
1603 } | |
1604 } | |
1605 | |
1606 if (cha_monomorphic_target != NULL) { | |
1607 if (cha_monomorphic_target->is_abstract()) { | |
1608 // Do not optimize for abstract methods | |
1609 cha_monomorphic_target = NULL; | |
1610 } | |
1611 } | |
1612 | |
1613 if (cha_monomorphic_target != NULL) { | |
1614 if (!(target->is_final_method())) { | |
1615 // If we inlined because CHA revealed only a single target method, | |
1616 // then we are dependent on that target method not getting overridden | |
1617 // by dynamic class loading. Be sure to test the "static" receiver | |
1618 // dest_method here, as opposed to the actual receiver, which may | |
1619 // falsely lead us to believe that the receiver is final or private. | |
1620 dependency_recorder()->assert_unique_concrete_method(actual_recv, cha_monomorphic_target); | |
1621 } | |
1622 code = Bytecodes::_invokespecial; | |
1623 } | |
1624 // check if we could do inlining | |
1625 if (!PatchALot && Inline && klass->is_loaded() && | |
1626 (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized()) | |
1627 && target->will_link(klass, callee_holder, code)) { | |
1628 // callee is known => check if we have static binding | |
1629 assert(target->is_loaded(), "callee must be known"); | |
1630 if (code == Bytecodes::_invokestatic | |
1631 || code == Bytecodes::_invokespecial | |
1632 || code == Bytecodes::_invokevirtual && target->is_final_method() | |
1633 ) { | |
1634 // static binding => check if callee is ok | |
1635 ciMethod* inline_target = (cha_monomorphic_target != NULL) | |
1636 ? cha_monomorphic_target | |
1637 : target; | |
1638 bool res = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL)); | |
1639 CHECK_BAILOUT(); | |
1640 | |
1641 #ifndef PRODUCT | |
1642 // printing | |
1643 if (PrintInlining && !res) { | |
1644 // if it was successfully inlined, then it was already printed. | |
1645 print_inline_result(inline_target, res); | |
1646 } | |
1647 #endif | |
1648 clear_inline_bailout(); | |
1649 if (res) { | |
1650 // Register dependence if JVMTI has either breakpoint | |
1651 // setting or hotswapping of methods capabilities since they may | |
1652 // cause deoptimization. | |
780
c96bf21b756f
6788527: Server vm intermittently fails with assertion "live value must not be garbage" with fastdebug bits
kvn
parents:
726
diff
changeset
|
1653 if (compilation()->env()->jvmti_can_hotswap_or_post_breakpoint()) { |
0 | 1654 dependency_recorder()->assert_evol_method(inline_target); |
1655 } | |
1656 return; | |
1657 } | |
1658 } | |
1659 } | |
1660 // If we attempted an inline which did not succeed because of a | |
1661 // bailout during construction of the callee graph, the entire | |
1662 // compilation has to be aborted. This is fairly rare and currently | |
1663 // seems to only occur for jasm-generated classes which contain | |
1664 // jsr/ret pairs which are not associated with finally clauses and | |
1665 // do not have exception handlers in the containing method, and are | |
1666 // therefore not caught early enough to abort the inlining without | |
1667 // corrupting the graph. (We currently bail out with a non-empty | |
1668 // stack at a ret in these situations.) | |
1669 CHECK_BAILOUT(); | |
1670 | |
1671 // inlining not successful => standard invoke | |
1295 | 1672 bool is_loaded = target->is_loaded(); |
1673 bool has_receiver = | |
1674 code == Bytecodes::_invokespecial || | |
1675 code == Bytecodes::_invokevirtual || | |
1676 code == Bytecodes::_invokeinterface; | |
1677 bool is_invokedynamic = code == Bytecodes::_invokedynamic; | |
0 | 1678 ValueType* result_type = as_ValueType(target->return_type()); |
1295 | 1679 |
1680 // We require the debug info to be the "state before" because | |
1681 // invokedynamics may deoptimize. | |
1682 ValueStack* state_before = is_invokedynamic ? state()->copy() : NULL; | |
1683 | |
0 | 1684 Values* args = state()->pop_arguments(target->arg_size_no_receiver()); |
1295 | 1685 Value recv = has_receiver ? apop() : NULL; |
0 | 1686 int vtable_index = methodOopDesc::invalid_vtable_index; |
1687 | |
1688 #ifdef SPARC | |
1689 // Currently only supported on Sparc. | |
1690 // The UseInlineCaches only controls dispatch to invokevirtuals for | |
1691 // loaded classes which we weren't able to statically bind. | |
1692 if (!UseInlineCaches && is_loaded && code == Bytecodes::_invokevirtual | |
1693 && !target->can_be_statically_bound()) { | |
1694 // Find a vtable index if one is available | |
1695 vtable_index = target->resolve_vtable_index(calling_klass, callee_holder); | |
1696 } | |
1697 #endif | |
1698 | |
1699 if (recv != NULL && | |
1700 (code == Bytecodes::_invokespecial || | |
1701 !is_loaded || target->is_final() || | |
1702 profile_calls())) { | |
1703 // invokespecial always needs a NULL check. invokevirtual where | |
1704 // the target is final or where it's not known that whether the | |
1705 // target is final requires a NULL check. Otherwise normal | |
1706 // invokevirtual will perform the null check during the lookup | |
1707 // logic or the unverified entry point. Profiling of calls | |
1708 // requires that the null check is performed in all cases. | |
1709 null_check(recv); | |
1710 } | |
1711 | |
1712 if (profile_calls()) { | |
1713 assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set"); | |
1714 ciKlass* target_klass = NULL; | |
1715 if (cha_monomorphic_target != NULL) { | |
1716 target_klass = cha_monomorphic_target->holder(); | |
1717 } else if (exact_target != NULL) { | |
1718 target_klass = exact_target->holder(); | |
1719 } | |
1720 profile_call(recv, target_klass); | |
1721 } | |
1722 | |
1295 | 1723 Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before); |
0 | 1724 // push result |
1725 append_split(result); | |
1726 | |
1727 if (result_type != voidType) { | |
1728 if (method()->is_strict()) { | |
1729 push(result_type, round_fp(result)); | |
1730 } else { | |
1731 push(result_type, result); | |
1732 } | |
1733 } | |
1734 } | |
1735 | |
1736 | |
1737 void GraphBuilder::new_instance(int klass_index) { | |
1738 bool will_link; | |
1739 ciKlass* klass = stream()->get_klass(will_link); | |
1740 assert(klass->is_instance_klass(), "must be an instance klass"); | |
1741 NewInstance* new_instance = new NewInstance(klass->as_instance_klass()); | |
1742 _memory->new_instance(new_instance); | |
1743 apush(append_split(new_instance)); | |
1744 } | |
1745 | |
1746 | |
1747 void GraphBuilder::new_type_array() { | |
1748 apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index()))); | |
1749 } | |
1750 | |
1751 | |
1752 void GraphBuilder::new_object_array() { | |
1753 bool will_link; | |
1754 ciKlass* klass = stream()->get_klass(will_link); | |
1755 ValueStack* state_before = !klass->is_loaded() || PatchALot ? state()->copy() : NULL; | |
1756 NewArray* n = new NewObjectArray(klass, ipop(), state_before); | |
1757 apush(append_split(n)); | |
1758 } | |
1759 | |
1760 | |
1761 bool GraphBuilder::direct_compare(ciKlass* k) { | |
1762 if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) { | |
1763 ciInstanceKlass* ik = k->as_instance_klass(); | |
1764 if (ik->is_final()) { | |
1765 return true; | |
1766 } else { | |
1767 if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) { | |
1768 // test class is leaf class | |
1769 dependency_recorder()->assert_leaf_type(ik); | |
1770 return true; | |
1771 } | |
1772 } | |
1773 } | |
1774 return false; | |
1775 } | |
1776 | |
1777 | |
1778 void GraphBuilder::check_cast(int klass_index) { | |
1779 bool will_link; | |
1780 ciKlass* klass = stream()->get_klass(will_link); | |
1781 ValueStack* state_before = !klass->is_loaded() || PatchALot ? state()->copy() : NULL; | |
1782 CheckCast* c = new CheckCast(klass, apop(), state_before); | |
1783 apush(append_split(c)); | |
1784 c->set_direct_compare(direct_compare(klass)); | |
1785 if (profile_checkcasts()) { | |
1786 c->set_profiled_method(method()); | |
1787 c->set_profiled_bci(bci()); | |
1788 c->set_should_profile(true); | |
1789 } | |
1790 } | |
1791 | |
1792 | |
1793 void GraphBuilder::instance_of(int klass_index) { | |
1794 bool will_link; | |
1795 ciKlass* klass = stream()->get_klass(will_link); | |
1796 ValueStack* state_before = !klass->is_loaded() || PatchALot ? state()->copy() : NULL; | |
1797 InstanceOf* i = new InstanceOf(klass, apop(), state_before); | |
1798 ipush(append_split(i)); | |
1799 i->set_direct_compare(direct_compare(klass)); | |
1800 } | |
1801 | |
1802 | |
1803 void GraphBuilder::monitorenter(Value x, int bci) { | |
1804 // save state before locking in case of deoptimization after a NullPointerException | |
1805 ValueStack* lock_stack_before = lock_stack(); | |
1806 append_with_bci(new MonitorEnter(x, state()->lock(scope(), x), lock_stack_before), bci); | |
1807 kill_all(); | |
1808 } | |
1809 | |
1810 | |
1811 void GraphBuilder::monitorexit(Value x, int bci) { | |
1812 // Note: the comment below is only relevant for the case where we do | |
1813 // not deoptimize due to asynchronous exceptions (!(DeoptC1 && | |
1814 // DeoptOnAsyncException), which is not used anymore) | |
1815 | |
1816 // Note: Potentially, the monitor state in an exception handler | |
1817 // can be wrong due to wrong 'initialization' of the handler | |
1818 // via a wrong asynchronous exception path. This can happen, | |
1819 // if the exception handler range for asynchronous exceptions | |
1820 // is too long (see also java bug 4327029, and comment in | |
1821 // GraphBuilder::handle_exception()). This may cause 'under- | |
1822 // flow' of the monitor stack => bailout instead. | |
1823 if (state()->locks_size() < 1) BAILOUT("monitor stack underflow"); | |
1824 append_with_bci(new MonitorExit(x, state()->unlock()), bci); | |
1825 kill_all(); | |
1826 } | |
1827 | |
1828 | |
1829 void GraphBuilder::new_multi_array(int dimensions) { | |
1830 bool will_link; | |
1831 ciKlass* klass = stream()->get_klass(will_link); | |
1832 ValueStack* state_before = !klass->is_loaded() || PatchALot ? state()->copy() : NULL; | |
1833 | |
1834 Values* dims = new Values(dimensions, NULL); | |
1835 // fill in all dimensions | |
1836 int i = dimensions; | |
1837 while (i-- > 0) dims->at_put(i, ipop()); | |
1838 // create array | |
1839 NewArray* n = new NewMultiArray(klass, dims, state_before); | |
1840 apush(append_split(n)); | |
1841 } | |
1842 | |
1843 | |
1844 void GraphBuilder::throw_op(int bci) { | |
1845 // We require that the debug info for a Throw be the "state before" | |
1846 // the Throw (i.e., exception oop is still on TOS) | |
1847 ValueStack* state_before = state()->copy(); | |
1848 Throw* t = new Throw(apop(), state_before); | |
1849 append_with_bci(t, bci); | |
1850 } | |
1851 | |
1852 | |
1853 Value GraphBuilder::round_fp(Value fp_value) { | |
1854 // no rounding needed if SSE2 is used | |
1855 if (RoundFPResults && UseSSE < 2) { | |
1856 // Must currently insert rounding node for doubleword values that | |
1857 // are results of expressions (i.e., not loads from memory or | |
1858 // constants) | |
1859 if (fp_value->type()->tag() == doubleTag && | |
1860 fp_value->as_Constant() == NULL && | |
1861 fp_value->as_Local() == NULL && // method parameters need no rounding | |
1862 fp_value->as_RoundFP() == NULL) { | |
1863 return append(new RoundFP(fp_value)); | |
1864 } | |
1865 } | |
1866 return fp_value; | |
1867 } | |
1868 | |
1869 | |
1870 Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) { | |
1871 Canonicalizer canon(instr, bci); | |
1872 Instruction* i1 = canon.canonical(); | |
1873 if (i1->bci() != -99) { | |
1874 // Canonicalizer returned an instruction which was already | |
1875 // appended so simply return it. | |
1876 return i1; | |
1877 } else if (UseLocalValueNumbering) { | |
1878 // Lookup the instruction in the ValueMap and add it to the map if | |
1879 // it's not found. | |
1880 Instruction* i2 = vmap()->find_insert(i1); | |
1881 if (i2 != i1) { | |
1882 // found an entry in the value map, so just return it. | |
1883 assert(i2->bci() != -1, "should already be linked"); | |
1884 return i2; | |
1885 } | |
459 | 1886 ValueNumberingEffects vne(vmap()); |
1887 i1->visit(&vne); | |
0 | 1888 } |
1889 | |
1890 if (i1->as_Phi() == NULL && i1->as_Local() == NULL) { | |
1891 // i1 was not eliminated => append it | |
1892 assert(i1->next() == NULL, "shouldn't already be linked"); | |
1893 _last = _last->set_next(i1, canon.bci()); | |
1894 if (++_instruction_count >= InstructionCountCutoff | |
1895 && !bailed_out()) { | |
1896 // set the bailout state but complete normal processing. We | |
1897 // might do a little more work before noticing the bailout so we | |
1898 // want processing to continue normally until it's noticed. | |
1899 bailout("Method and/or inlining is too large"); | |
1900 } | |
1901 | |
1902 #ifndef PRODUCT | |
1903 if (PrintIRDuringConstruction) { | |
1904 InstructionPrinter ip; | |
1905 ip.print_line(i1); | |
1906 if (Verbose) { | |
1907 state()->print(); | |
1908 } | |
1909 } | |
1910 #endif | |
1911 assert(_last == i1, "adjust code below"); | |
1912 StateSplit* s = i1->as_StateSplit(); | |
1913 if (s != NULL && i1->as_BlockEnd() == NULL) { | |
1914 if (EliminateFieldAccess) { | |
459 | 1915 Intrinsic* intrinsic = s->as_Intrinsic(); |
0 | 1916 if (s->as_Invoke() != NULL || (intrinsic && !intrinsic->preserves_state())) { |
1917 _memory->kill(); | |
1918 } | |
1919 } | |
1920 s->set_state(state()->copy()); | |
1921 } | |
1922 // set up exception handlers for this instruction if necessary | |
1923 if (i1->can_trap()) { | |
1924 assert(exception_state() != NULL || !has_handler(), "must have setup exception state"); | |
1925 i1->set_exception_handlers(handle_exception(bci)); | |
1926 } | |
1927 } | |
1928 return i1; | |
1929 } | |
1930 | |
1931 | |
1932 Instruction* GraphBuilder::append(Instruction* instr) { | |
1933 assert(instr->as_StateSplit() == NULL || instr->as_BlockEnd() != NULL, "wrong append used"); | |
1934 return append_with_bci(instr, bci()); | |
1935 } | |
1936 | |
1937 | |
1938 Instruction* GraphBuilder::append_split(StateSplit* instr) { | |
1939 return append_with_bci(instr, bci()); | |
1940 } | |
1941 | |
1942 | |
1943 void GraphBuilder::null_check(Value value) { | |
1944 if (value->as_NewArray() != NULL || value->as_NewInstance() != NULL) { | |
1945 return; | |
1946 } else { | |
1947 Constant* con = value->as_Constant(); | |
1948 if (con) { | |
1949 ObjectType* c = con->type()->as_ObjectType(); | |
1950 if (c && c->is_loaded()) { | |
1951 ObjectConstant* oc = c->as_ObjectConstant(); | |
1952 if (!oc || !oc->value()->is_null_object()) { | |
1953 return; | |
1954 } | |
1955 } | |
1956 } | |
1957 } | |
1958 append(new NullCheck(value, lock_stack())); | |
1959 } | |
1960 | |
1961 | |
1962 | |
1963 XHandlers* GraphBuilder::handle_exception(int cur_bci) { | |
1964 // fast path if it is guaranteed that no exception handlers are present | |
1965 if (!has_handler()) { | |
1966 // TODO: check if return NULL is possible (avoids empty lists) | |
1967 return new XHandlers(); | |
1968 } | |
1969 | |
1970 XHandlers* exception_handlers = new XHandlers(); | |
1971 ScopeData* cur_scope_data = scope_data(); | |
1972 ValueStack* s = exception_state(); | |
1973 int scope_count = 0; | |
1974 | |
1975 assert(s != NULL, "exception state must be set"); | |
1976 do { | |
1977 assert(cur_scope_data->scope() == s->scope(), "scopes do not match"); | |
1978 assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci(), "invalid bci"); | |
1979 | |
1980 // join with all potential exception handlers | |
1981 XHandlers* list = cur_scope_data->xhandlers(); | |
1982 const int n = list->length(); | |
1983 for (int i = 0; i < n; i++) { | |
1984 XHandler* h = list->handler_at(i); | |
1985 if (h->covers(cur_bci)) { | |
1986 // h is a potential exception handler => join it | |
1987 compilation()->set_has_exception_handlers(true); | |
1988 | |
1989 BlockBegin* entry = h->entry_block(); | |
1990 if (entry == block()) { | |
1991 // It's acceptable for an exception handler to cover itself | |
1992 // but we don't handle that in the parser currently. It's | |
1993 // very rare so we bailout instead of trying to handle it. | |
1994 BAILOUT_("exception handler covers itself", exception_handlers); | |
1995 } | |
1996 assert(entry->bci() == h->handler_bci(), "must match"); | |
1997 assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond"); | |
1998 | |
1999 // previously this was a BAILOUT, but this is not necessary | |
2000 // now because asynchronous exceptions are not handled this way. | |
2001 assert(entry->state() == NULL || s->locks_size() == entry->state()->locks_size(), "locks do not match"); | |
2002 | |
2003 // xhandler start with an empty expression stack | |
2004 s->truncate_stack(cur_scope_data->caller_stack_size()); | |
2005 | |
2006 // Note: Usually this join must work. However, very | |
2007 // complicated jsr-ret structures where we don't ret from | |
2008 // the subroutine can cause the objects on the monitor | |
2009 // stacks to not match because blocks can be parsed twice. | |
2010 // The only test case we've seen so far which exhibits this | |
2011 // problem is caught by the infinite recursion test in | |
2012 // GraphBuilder::jsr() if the join doesn't work. | |
2013 if (!entry->try_merge(s)) { | |
2014 BAILOUT_("error while joining with exception handler, prob. due to complicated jsr/rets", exception_handlers); | |
2015 } | |
2016 | |
2017 // add current state for correct handling of phi functions at begin of xhandler | |
2018 int phi_operand = entry->add_exception_state(s); | |
2019 | |
2020 // add entry to the list of xhandlers of this block | |
2021 _block->add_exception_handler(entry); | |
2022 | |
2023 // add back-edge from xhandler entry to this block | |
2024 if (!entry->is_predecessor(_block)) { | |
2025 entry->add_predecessor(_block); | |
2026 } | |
2027 | |
2028 // clone XHandler because phi_operand and scope_count can not be shared | |
2029 XHandler* new_xhandler = new XHandler(h); | |
2030 new_xhandler->set_phi_operand(phi_operand); | |
2031 new_xhandler->set_scope_count(scope_count); | |
2032 exception_handlers->append(new_xhandler); | |
2033 | |
2034 // fill in exception handler subgraph lazily | |
2035 assert(!entry->is_set(BlockBegin::was_visited_flag), "entry must not be visited yet"); | |
2036 cur_scope_data->add_to_work_list(entry); | |
2037 | |
2038 // stop when reaching catchall | |
2039 if (h->catch_type() == 0) { | |
2040 return exception_handlers; | |
2041 } | |
2042 } | |
2043 } | |
2044 | |
2045 // Set up iteration for next time. | |
2046 // If parsing a jsr, do not grab exception handlers from the | |
2047 // parent scopes for this method (already got them, and they | |
2048 // needed to be cloned) | |
2049 if (cur_scope_data->parsing_jsr()) { | |
2050 IRScope* tmp_scope = cur_scope_data->scope(); | |
2051 while (cur_scope_data->parent() != NULL && | |
2052 cur_scope_data->parent()->scope() == tmp_scope) { | |
2053 cur_scope_data = cur_scope_data->parent(); | |
2054 } | |
2055 } | |
2056 if (cur_scope_data != NULL) { | |
2057 if (cur_scope_data->parent() != NULL) { | |
2058 // must use pop_scope instead of caller_state to preserve all monitors | |
2059 s = s->pop_scope(); | |
2060 } | |
2061 cur_bci = cur_scope_data->scope()->caller_bci(); | |
2062 cur_scope_data = cur_scope_data->parent(); | |
2063 scope_count++; | |
2064 } | |
2065 } while (cur_scope_data != NULL); | |
2066 | |
2067 return exception_handlers; | |
2068 } | |
2069 | |
2070 | |
2071 // Helper class for simplifying Phis. | |
2072 class PhiSimplifier : public BlockClosure { | |
2073 private: | |
2074 bool _has_substitutions; | |
2075 Value simplify(Value v); | |
2076 | |
2077 public: | |
2078 PhiSimplifier(BlockBegin* start) : _has_substitutions(false) { | |
2079 start->iterate_preorder(this); | |
2080 if (_has_substitutions) { | |
2081 SubstitutionResolver sr(start); | |
2082 } | |
2083 } | |
2084 void block_do(BlockBegin* b); | |
2085 bool has_substitutions() const { return _has_substitutions; } | |
2086 }; | |
2087 | |
2088 | |
2089 Value PhiSimplifier::simplify(Value v) { | |
2090 Phi* phi = v->as_Phi(); | |
2091 | |
2092 if (phi == NULL) { | |
2093 // no phi function | |
2094 return v; | |
2095 } else if (v->has_subst()) { | |
2096 // already substituted; subst can be phi itself -> simplify | |
2097 return simplify(v->subst()); | |
2098 } else if (phi->is_set(Phi::cannot_simplify)) { | |
2099 // already tried to simplify phi before | |
2100 return phi; | |
2101 } else if (phi->is_set(Phi::visited)) { | |
2102 // break cycles in phi functions | |
2103 return phi; | |
2104 } else if (phi->type()->is_illegal()) { | |
2105 // illegal phi functions are ignored anyway | |
2106 return phi; | |
2107 | |
2108 } else { | |
2109 // mark phi function as processed to break cycles in phi functions | |
2110 phi->set(Phi::visited); | |
2111 | |
2112 // simplify x = [y, x] and x = [y, y] to y | |
2113 Value subst = NULL; | |
2114 int opd_count = phi->operand_count(); | |
2115 for (int i = 0; i < opd_count; i++) { | |
2116 Value opd = phi->operand_at(i); | |
2117 assert(opd != NULL, "Operand must exist!"); | |
2118 | |
2119 if (opd->type()->is_illegal()) { | |
2120 // if one operand is illegal, the entire phi function is illegal | |
2121 phi->make_illegal(); | |
2122 phi->clear(Phi::visited); | |
2123 return phi; | |
2124 } | |
2125 | |
2126 Value new_opd = simplify(opd); | |
2127 assert(new_opd != NULL, "Simplified operand must exist!"); | |
2128 | |
2129 if (new_opd != phi && new_opd != subst) { | |
2130 if (subst == NULL) { | |
2131 subst = new_opd; | |
2132 } else { | |
2133 // no simplification possible | |
2134 phi->set(Phi::cannot_simplify); | |
2135 phi->clear(Phi::visited); | |
2136 return phi; | |
2137 } | |
2138 } | |
2139 } | |
2140 | |
2141 // sucessfully simplified phi function | |
2142 assert(subst != NULL, "illegal phi function"); | |
2143 _has_substitutions = true; | |
2144 phi->clear(Phi::visited); | |
2145 phi->set_subst(subst); | |
2146 | |
2147 #ifndef PRODUCT | |
2148 if (PrintPhiFunctions) { | |
2149 tty->print_cr("simplified phi function %c%d to %c%d (Block B%d)", phi->type()->tchar(), phi->id(), subst->type()->tchar(), subst->id(), phi->block()->block_id()); | |
2150 } | |
2151 #endif | |
2152 | |
2153 return subst; | |
2154 } | |
2155 } | |
2156 | |
2157 | |
2158 void PhiSimplifier::block_do(BlockBegin* b) { | |
2159 for_each_phi_fun(b, phi, | |
2160 simplify(phi); | |
2161 ); | |
2162 | |
2163 #ifdef ASSERT | |
2164 for_each_phi_fun(b, phi, | |
2165 assert(phi->operand_count() != 1 || phi->subst() != phi, "missed trivial simplification"); | |
2166 ); | |
2167 | |
2168 ValueStack* state = b->state()->caller_state(); | |
2169 int index; | |
2170 Value value; | |
2171 for_each_state(state) { | |
2172 for_each_local_value(state, index, value) { | |
2173 Phi* phi = value->as_Phi(); | |
2174 assert(phi == NULL || phi->block() != b, "must not have phi function to simplify in caller state"); | |
2175 } | |
2176 } | |
2177 #endif | |
2178 } | |
2179 | |
2180 // This method is called after all blocks are filled with HIR instructions | |
2181 // It eliminates all Phi functions of the form x = [y, y] and x = [y, x] | |
2182 void GraphBuilder::eliminate_redundant_phis(BlockBegin* start) { | |
2183 PhiSimplifier simplifier(start); | |
2184 } | |
2185 | |
2186 | |
2187 void GraphBuilder::connect_to_end(BlockBegin* beg) { | |
2188 // setup iteration | |
2189 kill_all(); | |
2190 _block = beg; | |
2191 _state = beg->state()->copy(); | |
2192 _last = beg; | |
2193 iterate_bytecodes_for_block(beg->bci()); | |
2194 } | |
2195 | |
2196 | |
2197 BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) { | |
2198 #ifndef PRODUCT | |
2199 if (PrintIRDuringConstruction) { | |
2200 tty->cr(); | |
2201 InstructionPrinter ip; | |
2202 ip.print_instr(_block); tty->cr(); | |
2203 ip.print_stack(_block->state()); tty->cr(); | |
2204 ip.print_inline_level(_block); | |
2205 ip.print_head(); | |
2206 tty->print_cr("locals size: %d stack size: %d", state()->locals_size(), state()->stack_size()); | |
2207 } | |
2208 #endif | |
2209 _skip_block = false; | |
2210 assert(state() != NULL, "ValueStack missing!"); | |
2211 ciBytecodeStream s(method()); | |
2212 s.reset_to_bci(bci); | |
2213 int prev_bci = bci; | |
2214 scope_data()->set_stream(&s); | |
2215 // iterate | |
2216 Bytecodes::Code code = Bytecodes::_illegal; | |
2217 bool push_exception = false; | |
2218 | |
2219 if (block()->is_set(BlockBegin::exception_entry_flag) && block()->next() == NULL) { | |
2220 // first thing in the exception entry block should be the exception object. | |
2221 push_exception = true; | |
2222 } | |
2223 | |
2224 while (!bailed_out() && last()->as_BlockEnd() == NULL && | |
2225 (code = stream()->next()) != ciBytecodeStream::EOBC() && | |
2226 (block_at(s.cur_bci()) == NULL || block_at(s.cur_bci()) == block())) { | |
2227 | |
2228 if (has_handler() && can_trap(method(), code)) { | |
2229 // copy the state because it is modified before handle_exception is called | |
2230 set_exception_state(state()->copy()); | |
2231 } else { | |
2232 // handle_exception is not called for this bytecode | |
2233 set_exception_state(NULL); | |
2234 } | |
2235 | |
2236 // Check for active jsr during OSR compilation | |
2237 if (compilation()->is_osr_compile() | |
2238 && scope()->is_top_scope() | |
2239 && parsing_jsr() | |
2240 && s.cur_bci() == compilation()->osr_bci()) { | |
2241 bailout("OSR not supported while a jsr is active"); | |
2242 } | |
2243 | |
2244 if (push_exception) { | |
2245 apush(append(new ExceptionObject())); | |
2246 push_exception = false; | |
2247 } | |
2248 | |
2249 // handle bytecode | |
2250 switch (code) { | |
2251 case Bytecodes::_nop : /* nothing to do */ break; | |
2252 case Bytecodes::_aconst_null : apush(append(new Constant(objectNull ))); break; | |
2253 case Bytecodes::_iconst_m1 : ipush(append(new Constant(new IntConstant (-1)))); break; | |
2254 case Bytecodes::_iconst_0 : ipush(append(new Constant(intZero ))); break; | |
2255 case Bytecodes::_iconst_1 : ipush(append(new Constant(intOne ))); break; | |
2256 case Bytecodes::_iconst_2 : ipush(append(new Constant(new IntConstant ( 2)))); break; | |
2257 case Bytecodes::_iconst_3 : ipush(append(new Constant(new IntConstant ( 3)))); break; | |
2258 case Bytecodes::_iconst_4 : ipush(append(new Constant(new IntConstant ( 4)))); break; | |
2259 case Bytecodes::_iconst_5 : ipush(append(new Constant(new IntConstant ( 5)))); break; | |
2260 case Bytecodes::_lconst_0 : lpush(append(new Constant(new LongConstant ( 0)))); break; | |
2261 case Bytecodes::_lconst_1 : lpush(append(new Constant(new LongConstant ( 1)))); break; | |
2262 case Bytecodes::_fconst_0 : fpush(append(new Constant(new FloatConstant ( 0)))); break; | |
2263 case Bytecodes::_fconst_1 : fpush(append(new Constant(new FloatConstant ( 1)))); break; | |
2264 case Bytecodes::_fconst_2 : fpush(append(new Constant(new FloatConstant ( 2)))); break; | |
2265 case Bytecodes::_dconst_0 : dpush(append(new Constant(new DoubleConstant( 0)))); break; | |
2266 case Bytecodes::_dconst_1 : dpush(append(new Constant(new DoubleConstant( 1)))); break; | |
2267 case Bytecodes::_bipush : ipush(append(new Constant(new IntConstant(((signed char*)s.cur_bcp())[1])))); break; | |
2268 case Bytecodes::_sipush : ipush(append(new Constant(new IntConstant((short)Bytes::get_Java_u2(s.cur_bcp()+1))))); break; | |
2269 case Bytecodes::_ldc : // fall through | |
2270 case Bytecodes::_ldc_w : // fall through | |
2271 case Bytecodes::_ldc2_w : load_constant(); break; | |
2272 case Bytecodes::_iload : load_local(intType , s.get_index()); break; | |
2273 case Bytecodes::_lload : load_local(longType , s.get_index()); break; | |
2274 case Bytecodes::_fload : load_local(floatType , s.get_index()); break; | |
2275 case Bytecodes::_dload : load_local(doubleType , s.get_index()); break; | |
2276 case Bytecodes::_aload : load_local(instanceType, s.get_index()); break; | |
2277 case Bytecodes::_iload_0 : load_local(intType , 0); break; | |
2278 case Bytecodes::_iload_1 : load_local(intType , 1); break; | |
2279 case Bytecodes::_iload_2 : load_local(intType , 2); break; | |
2280 case Bytecodes::_iload_3 : load_local(intType , 3); break; | |
2281 case Bytecodes::_lload_0 : load_local(longType , 0); break; | |
2282 case Bytecodes::_lload_1 : load_local(longType , 1); break; | |
2283 case Bytecodes::_lload_2 : load_local(longType , 2); break; | |
2284 case Bytecodes::_lload_3 : load_local(longType , 3); break; | |
2285 case Bytecodes::_fload_0 : load_local(floatType , 0); break; | |
2286 case Bytecodes::_fload_1 : load_local(floatType , 1); break; | |
2287 case Bytecodes::_fload_2 : load_local(floatType , 2); break; | |
2288 case Bytecodes::_fload_3 : load_local(floatType , 3); break; | |
2289 case Bytecodes::_dload_0 : load_local(doubleType, 0); break; | |
2290 case Bytecodes::_dload_1 : load_local(doubleType, 1); break; | |
2291 case Bytecodes::_dload_2 : load_local(doubleType, 2); break; | |
2292 case Bytecodes::_dload_3 : load_local(doubleType, 3); break; | |
2293 case Bytecodes::_aload_0 : load_local(objectType, 0); break; | |
2294 case Bytecodes::_aload_1 : load_local(objectType, 1); break; | |
2295 case Bytecodes::_aload_2 : load_local(objectType, 2); break; | |
2296 case Bytecodes::_aload_3 : load_local(objectType, 3); break; | |
2297 case Bytecodes::_iaload : load_indexed(T_INT ); break; | |
2298 case Bytecodes::_laload : load_indexed(T_LONG ); break; | |
2299 case Bytecodes::_faload : load_indexed(T_FLOAT ); break; | |
2300 case Bytecodes::_daload : load_indexed(T_DOUBLE); break; | |
2301 case Bytecodes::_aaload : load_indexed(T_OBJECT); break; | |
2302 case Bytecodes::_baload : load_indexed(T_BYTE ); break; | |
2303 case Bytecodes::_caload : load_indexed(T_CHAR ); break; | |
2304 case Bytecodes::_saload : load_indexed(T_SHORT ); break; | |
2305 case Bytecodes::_istore : store_local(intType , s.get_index()); break; | |
2306 case Bytecodes::_lstore : store_local(longType , s.get_index()); break; | |
2307 case Bytecodes::_fstore : store_local(floatType , s.get_index()); break; | |
2308 case Bytecodes::_dstore : store_local(doubleType, s.get_index()); break; | |
2309 case Bytecodes::_astore : store_local(objectType, s.get_index()); break; | |
2310 case Bytecodes::_istore_0 : store_local(intType , 0); break; | |
2311 case Bytecodes::_istore_1 : store_local(intType , 1); break; | |
2312 case Bytecodes::_istore_2 : store_local(intType , 2); break; | |
2313 case Bytecodes::_istore_3 : store_local(intType , 3); break; | |
2314 case Bytecodes::_lstore_0 : store_local(longType , 0); break; | |
2315 case Bytecodes::_lstore_1 : store_local(longType , 1); break; | |
2316 case Bytecodes::_lstore_2 : store_local(longType , 2); break; | |
2317 case Bytecodes::_lstore_3 : store_local(longType , 3); break; | |
2318 case Bytecodes::_fstore_0 : store_local(floatType , 0); break; | |
2319 case Bytecodes::_fstore_1 : store_local(floatType , 1); break; | |
2320 case Bytecodes::_fstore_2 : store_local(floatType , 2); break; | |
2321 case Bytecodes::_fstore_3 : store_local(floatType , 3); break; | |
2322 case Bytecodes::_dstore_0 : store_local(doubleType, 0); break; | |
2323 case Bytecodes::_dstore_1 : store_local(doubleType, 1); break; | |
2324 case Bytecodes::_dstore_2 : store_local(doubleType, 2); break; | |
2325 case Bytecodes::_dstore_3 : store_local(doubleType, 3); break; | |
2326 case Bytecodes::_astore_0 : store_local(objectType, 0); break; | |
2327 case Bytecodes::_astore_1 : store_local(objectType, 1); break; | |
2328 case Bytecodes::_astore_2 : store_local(objectType, 2); break; | |
2329 case Bytecodes::_astore_3 : store_local(objectType, 3); break; | |
2330 case Bytecodes::_iastore : store_indexed(T_INT ); break; | |
2331 case Bytecodes::_lastore : store_indexed(T_LONG ); break; | |
2332 case Bytecodes::_fastore : store_indexed(T_FLOAT ); break; | |
2333 case Bytecodes::_dastore : store_indexed(T_DOUBLE); break; | |
2334 case Bytecodes::_aastore : store_indexed(T_OBJECT); break; | |
2335 case Bytecodes::_bastore : store_indexed(T_BYTE ); break; | |
2336 case Bytecodes::_castore : store_indexed(T_CHAR ); break; | |
2337 case Bytecodes::_sastore : store_indexed(T_SHORT ); break; | |
2338 case Bytecodes::_pop : // fall through | |
2339 case Bytecodes::_pop2 : // fall through | |
2340 case Bytecodes::_dup : // fall through | |
2341 case Bytecodes::_dup_x1 : // fall through | |
2342 case Bytecodes::_dup_x2 : // fall through | |
2343 case Bytecodes::_dup2 : // fall through | |
2344 case Bytecodes::_dup2_x1 : // fall through | |
2345 case Bytecodes::_dup2_x2 : // fall through | |
2346 case Bytecodes::_swap : stack_op(code); break; | |
2347 case Bytecodes::_iadd : arithmetic_op(intType , code); break; | |
2348 case Bytecodes::_ladd : arithmetic_op(longType , code); break; | |
2349 case Bytecodes::_fadd : arithmetic_op(floatType , code); break; | |
2350 case Bytecodes::_dadd : arithmetic_op(doubleType, code); break; | |
2351 case Bytecodes::_isub : arithmetic_op(intType , code); break; | |
2352 case Bytecodes::_lsub : arithmetic_op(longType , code); break; | |
2353 case Bytecodes::_fsub : arithmetic_op(floatType , code); break; | |
2354 case Bytecodes::_dsub : arithmetic_op(doubleType, code); break; | |
2355 case Bytecodes::_imul : arithmetic_op(intType , code); break; | |
2356 case Bytecodes::_lmul : arithmetic_op(longType , code); break; | |
2357 case Bytecodes::_fmul : arithmetic_op(floatType , code); break; | |
2358 case Bytecodes::_dmul : arithmetic_op(doubleType, code); break; | |
2359 case Bytecodes::_idiv : arithmetic_op(intType , code, lock_stack()); break; | |
2360 case Bytecodes::_ldiv : arithmetic_op(longType , code, lock_stack()); break; | |
2361 case Bytecodes::_fdiv : arithmetic_op(floatType , code); break; | |
2362 case Bytecodes::_ddiv : arithmetic_op(doubleType, code); break; | |
2363 case Bytecodes::_irem : arithmetic_op(intType , code, lock_stack()); break; | |
2364 case Bytecodes::_lrem : arithmetic_op(longType , code, lock_stack()); break; | |
2365 case Bytecodes::_frem : arithmetic_op(floatType , code); break; | |
2366 case Bytecodes::_drem : arithmetic_op(doubleType, code); break; | |
2367 case Bytecodes::_ineg : negate_op(intType ); break; | |
2368 case Bytecodes::_lneg : negate_op(longType ); break; | |
2369 case Bytecodes::_fneg : negate_op(floatType ); break; | |
2370 case Bytecodes::_dneg : negate_op(doubleType); break; | |
2371 case Bytecodes::_ishl : shift_op(intType , code); break; | |
2372 case Bytecodes::_lshl : shift_op(longType, code); break; | |
2373 case Bytecodes::_ishr : shift_op(intType , code); break; | |
2374 case Bytecodes::_lshr : shift_op(longType, code); break; | |
2375 case Bytecodes::_iushr : shift_op(intType , code); break; | |
2376 case Bytecodes::_lushr : shift_op(longType, code); break; | |
2377 case Bytecodes::_iand : logic_op(intType , code); break; | |
2378 case Bytecodes::_land : logic_op(longType, code); break; | |
2379 case Bytecodes::_ior : logic_op(intType , code); break; | |
2380 case Bytecodes::_lor : logic_op(longType, code); break; | |
2381 case Bytecodes::_ixor : logic_op(intType , code); break; | |
2382 case Bytecodes::_lxor : logic_op(longType, code); break; | |
2383 case Bytecodes::_iinc : increment(); break; | |
2384 case Bytecodes::_i2l : convert(code, T_INT , T_LONG ); break; | |
2385 case Bytecodes::_i2f : convert(code, T_INT , T_FLOAT ); break; | |
2386 case Bytecodes::_i2d : convert(code, T_INT , T_DOUBLE); break; | |
2387 case Bytecodes::_l2i : convert(code, T_LONG , T_INT ); break; | |
2388 case Bytecodes::_l2f : convert(code, T_LONG , T_FLOAT ); break; | |
2389 case Bytecodes::_l2d : convert(code, T_LONG , T_DOUBLE); break; | |
2390 case Bytecodes::_f2i : convert(code, T_FLOAT , T_INT ); break; | |
2391 case Bytecodes::_f2l : convert(code, T_FLOAT , T_LONG ); break; | |
2392 case Bytecodes::_f2d : convert(code, T_FLOAT , T_DOUBLE); break; | |
2393 case Bytecodes::_d2i : convert(code, T_DOUBLE, T_INT ); break; | |
2394 case Bytecodes::_d2l : convert(code, T_DOUBLE, T_LONG ); break; | |
2395 case Bytecodes::_d2f : convert(code, T_DOUBLE, T_FLOAT ); break; | |
2396 case Bytecodes::_i2b : convert(code, T_INT , T_BYTE ); break; | |
2397 case Bytecodes::_i2c : convert(code, T_INT , T_CHAR ); break; | |
2398 case Bytecodes::_i2s : convert(code, T_INT , T_SHORT ); break; | |
2399 case Bytecodes::_lcmp : compare_op(longType , code); break; | |
2400 case Bytecodes::_fcmpl : compare_op(floatType , code); break; | |
2401 case Bytecodes::_fcmpg : compare_op(floatType , code); break; | |
2402 case Bytecodes::_dcmpl : compare_op(doubleType, code); break; | |
2403 case Bytecodes::_dcmpg : compare_op(doubleType, code); break; | |
2404 case Bytecodes::_ifeq : if_zero(intType , If::eql); break; | |
2405 case Bytecodes::_ifne : if_zero(intType , If::neq); break; | |
2406 case Bytecodes::_iflt : if_zero(intType , If::lss); break; | |
2407 case Bytecodes::_ifge : if_zero(intType , If::geq); break; | |
2408 case Bytecodes::_ifgt : if_zero(intType , If::gtr); break; | |
2409 case Bytecodes::_ifle : if_zero(intType , If::leq); break; | |
2410 case Bytecodes::_if_icmpeq : if_same(intType , If::eql); break; | |
2411 case Bytecodes::_if_icmpne : if_same(intType , If::neq); break; | |
2412 case Bytecodes::_if_icmplt : if_same(intType , If::lss); break; | |
2413 case Bytecodes::_if_icmpge : if_same(intType , If::geq); break; | |
2414 case Bytecodes::_if_icmpgt : if_same(intType , If::gtr); break; | |
2415 case Bytecodes::_if_icmple : if_same(intType , If::leq); break; | |
2416 case Bytecodes::_if_acmpeq : if_same(objectType, If::eql); break; | |
2417 case Bytecodes::_if_acmpne : if_same(objectType, If::neq); break; | |
2418 case Bytecodes::_goto : _goto(s.cur_bci(), s.get_dest()); break; | |
2419 case Bytecodes::_jsr : jsr(s.get_dest()); break; | |
2420 case Bytecodes::_ret : ret(s.get_index()); break; | |
2421 case Bytecodes::_tableswitch : table_switch(); break; | |
2422 case Bytecodes::_lookupswitch : lookup_switch(); break; | |
2423 case Bytecodes::_ireturn : method_return(ipop()); break; | |
2424 case Bytecodes::_lreturn : method_return(lpop()); break; | |
2425 case Bytecodes::_freturn : method_return(fpop()); break; | |
2426 case Bytecodes::_dreturn : method_return(dpop()); break; | |
2427 case Bytecodes::_areturn : method_return(apop()); break; | |
2428 case Bytecodes::_return : method_return(NULL ); break; | |
2429 case Bytecodes::_getstatic : // fall through | |
2430 case Bytecodes::_putstatic : // fall through | |
2431 case Bytecodes::_getfield : // fall through | |
2432 case Bytecodes::_putfield : access_field(code); break; | |
2433 case Bytecodes::_invokevirtual : // fall through | |
2434 case Bytecodes::_invokespecial : // fall through | |
2435 case Bytecodes::_invokestatic : // fall through | |
726
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
470
diff
changeset
|
2436 case Bytecodes::_invokedynamic : // fall through |
0 | 2437 case Bytecodes::_invokeinterface: invoke(code); break; |
1565 | 2438 case Bytecodes::_new : new_instance(s.get_index_u2()); break; |
0 | 2439 case Bytecodes::_newarray : new_type_array(); break; |
2440 case Bytecodes::_anewarray : new_object_array(); break; | |
2441 case Bytecodes::_arraylength : ipush(append(new ArrayLength(apop(), lock_stack()))); break; | |
2442 case Bytecodes::_athrow : throw_op(s.cur_bci()); break; | |
1565 | 2443 case Bytecodes::_checkcast : check_cast(s.get_index_u2()); break; |
2444 case Bytecodes::_instanceof : instance_of(s.get_index_u2()); break; | |
0 | 2445 // Note: we do not have special handling for the monitorenter bytecode if DeoptC1 && DeoptOnAsyncException |
2446 case Bytecodes::_monitorenter : monitorenter(apop(), s.cur_bci()); break; | |
2447 case Bytecodes::_monitorexit : monitorexit (apop(), s.cur_bci()); break; | |
2448 case Bytecodes::_wide : ShouldNotReachHere(); break; | |
2449 case Bytecodes::_multianewarray : new_multi_array(s.cur_bcp()[3]); break; | |
2450 case Bytecodes::_ifnull : if_null(objectType, If::eql); break; | |
2451 case Bytecodes::_ifnonnull : if_null(objectType, If::neq); break; | |
2452 case Bytecodes::_goto_w : _goto(s.cur_bci(), s.get_far_dest()); break; | |
2453 case Bytecodes::_jsr_w : jsr(s.get_far_dest()); break; | |
2454 case Bytecodes::_breakpoint : BAILOUT_("concurrent setting of breakpoint", NULL); | |
2455 default : ShouldNotReachHere(); break; | |
2456 } | |
2457 // save current bci to setup Goto at the end | |
2458 prev_bci = s.cur_bci(); | |
2459 } | |
2460 CHECK_BAILOUT_(NULL); | |
2461 // stop processing of this block (see try_inline_full) | |
2462 if (_skip_block) { | |
2463 _skip_block = false; | |
2464 assert(_last && _last->as_BlockEnd(), ""); | |
2465 return _last->as_BlockEnd(); | |
2466 } | |
2467 // if there are any, check if last instruction is a BlockEnd instruction | |
2468 BlockEnd* end = last()->as_BlockEnd(); | |
2469 if (end == NULL) { | |
2470 // all blocks must end with a BlockEnd instruction => add a Goto | |
2471 end = new Goto(block_at(s.cur_bci()), false); | |
2472 _last = _last->set_next(end, prev_bci); | |
2473 } | |
2474 assert(end == last()->as_BlockEnd(), "inconsistency"); | |
2475 | |
2476 // if the method terminates, we don't need the stack anymore | |
2477 if (end->as_Return() != NULL) { | |
2478 state()->clear_stack(); | |
2479 } else if (end->as_Throw() != NULL) { | |
2480 // May have exception handler in caller scopes | |
2481 state()->truncate_stack(scope()->lock_stack_size()); | |
2482 } | |
2483 | |
2484 // connect to begin & set state | |
2485 // NOTE that inlining may have changed the block we are parsing | |
2486 block()->set_end(end); | |
2487 end->set_state(state()); | |
2488 // propagate state | |
2489 for (int i = end->number_of_sux() - 1; i >= 0; i--) { | |
2490 BlockBegin* sux = end->sux_at(i); | |
2491 assert(sux->is_predecessor(block()), "predecessor missing"); | |
2492 // be careful, bailout if bytecodes are strange | |
2493 if (!sux->try_merge(state())) BAILOUT_("block join failed", NULL); | |
2494 scope_data()->add_to_work_list(end->sux_at(i)); | |
2495 } | |
2496 | |
2497 scope_data()->set_stream(NULL); | |
2498 | |
2499 // done | |
2500 return end; | |
2501 } | |
2502 | |
2503 | |
2504 void GraphBuilder::iterate_all_blocks(bool start_in_current_block_for_inlining) { | |
2505 do { | |
2506 if (start_in_current_block_for_inlining && !bailed_out()) { | |
2507 iterate_bytecodes_for_block(0); | |
2508 start_in_current_block_for_inlining = false; | |
2509 } else { | |
2510 BlockBegin* b; | |
2511 while ((b = scope_data()->remove_from_work_list()) != NULL) { | |
2512 if (!b->is_set(BlockBegin::was_visited_flag)) { | |
2513 if (b->is_set(BlockBegin::osr_entry_flag)) { | |
2514 // we're about to parse the osr entry block, so make sure | |
2515 // we setup the OSR edge leading into this block so that | |
2516 // Phis get setup correctly. | |
2517 setup_osr_entry_block(); | |
2518 // this is no longer the osr entry block, so clear it. | |
2519 b->clear(BlockBegin::osr_entry_flag); | |
2520 } | |
2521 b->set(BlockBegin::was_visited_flag); | |
2522 connect_to_end(b); | |
2523 } | |
2524 } | |
2525 } | |
2526 } while (!bailed_out() && !scope_data()->is_work_list_empty()); | |
2527 } | |
2528 | |
2529 | |
2530 bool GraphBuilder::_can_trap [Bytecodes::number_of_java_codes]; | |
2531 bool GraphBuilder::_is_async[Bytecodes::number_of_java_codes]; | |
2532 | |
2533 void GraphBuilder::initialize() { | |
2534 // the following bytecodes are assumed to potentially | |
2535 // throw exceptions in compiled code - note that e.g. | |
2536 // monitorexit & the return bytecodes do not throw | |
2537 // exceptions since monitor pairing proved that they | |
2538 // succeed (if monitor pairing succeeded) | |
2539 Bytecodes::Code can_trap_list[] = | |
2540 { Bytecodes::_ldc | |
2541 , Bytecodes::_ldc_w | |
2542 , Bytecodes::_ldc2_w | |
2543 , Bytecodes::_iaload | |
2544 , Bytecodes::_laload | |
2545 , Bytecodes::_faload | |
2546 , Bytecodes::_daload | |
2547 , Bytecodes::_aaload | |
2548 , Bytecodes::_baload | |
2549 , Bytecodes::_caload | |
2550 , Bytecodes::_saload | |
2551 , Bytecodes::_iastore | |
2552 , Bytecodes::_lastore | |
2553 , Bytecodes::_fastore | |
2554 , Bytecodes::_dastore | |
2555 , Bytecodes::_aastore | |
2556 , Bytecodes::_bastore | |
2557 , Bytecodes::_castore | |
2558 , Bytecodes::_sastore | |
2559 , Bytecodes::_idiv | |
2560 , Bytecodes::_ldiv | |
2561 , Bytecodes::_irem | |
2562 , Bytecodes::_lrem | |
2563 , Bytecodes::_getstatic | |
2564 , Bytecodes::_putstatic | |
2565 , Bytecodes::_getfield | |
2566 , Bytecodes::_putfield | |
2567 , Bytecodes::_invokevirtual | |
2568 , Bytecodes::_invokespecial | |
2569 , Bytecodes::_invokestatic | |
726
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
470
diff
changeset
|
2570 , Bytecodes::_invokedynamic |
0 | 2571 , Bytecodes::_invokeinterface |
2572 , Bytecodes::_new | |
2573 , Bytecodes::_newarray | |
2574 , Bytecodes::_anewarray | |
2575 , Bytecodes::_arraylength | |
2576 , Bytecodes::_athrow | |
2577 , Bytecodes::_checkcast | |
2578 , Bytecodes::_instanceof | |
2579 , Bytecodes::_monitorenter | |
2580 , Bytecodes::_multianewarray | |
2581 }; | |
2582 | |
2583 // the following bytecodes are assumed to potentially | |
2584 // throw asynchronous exceptions in compiled code due | |
2585 // to safepoints (note: these entries could be merged | |
2586 // with the can_trap_list - however, we need to know | |
2587 // which ones are asynchronous for now - see also the | |
2588 // comment in GraphBuilder::handle_exception) | |
2589 Bytecodes::Code is_async_list[] = | |
2590 { Bytecodes::_ifeq | |
2591 , Bytecodes::_ifne | |
2592 , Bytecodes::_iflt | |
2593 , Bytecodes::_ifge | |
2594 , Bytecodes::_ifgt | |
2595 , Bytecodes::_ifle | |
2596 , Bytecodes::_if_icmpeq | |
2597 , Bytecodes::_if_icmpne | |
2598 , Bytecodes::_if_icmplt | |
2599 , Bytecodes::_if_icmpge | |
2600 , Bytecodes::_if_icmpgt | |
2601 , Bytecodes::_if_icmple | |
2602 , Bytecodes::_if_acmpeq | |
2603 , Bytecodes::_if_acmpne | |
2604 , Bytecodes::_goto | |
2605 , Bytecodes::_jsr | |
2606 , Bytecodes::_ret | |
2607 , Bytecodes::_tableswitch | |
2608 , Bytecodes::_lookupswitch | |
2609 , Bytecodes::_ireturn | |
2610 , Bytecodes::_lreturn | |
2611 , Bytecodes::_freturn | |
2612 , Bytecodes::_dreturn | |
2613 , Bytecodes::_areturn | |
2614 , Bytecodes::_return | |
2615 , Bytecodes::_ifnull | |
2616 , Bytecodes::_ifnonnull | |
2617 , Bytecodes::_goto_w | |
2618 , Bytecodes::_jsr_w | |
2619 }; | |
2620 | |
2621 // inititialize trap tables | |
2622 for (int i = 0; i < Bytecodes::number_of_java_codes; i++) { | |
2623 _can_trap[i] = false; | |
2624 _is_async[i] = false; | |
2625 } | |
2626 // set standard trap info | |
2627 for (uint j = 0; j < ARRAY_SIZE(can_trap_list); j++) { | |
2628 _can_trap[can_trap_list[j]] = true; | |
2629 } | |
2630 | |
2631 // We now deoptimize if an asynchronous exception is thrown. This | |
2632 // considerably cleans up corner case issues related to javac's | |
2633 // incorrect exception handler ranges for async exceptions and | |
2634 // allows us to precisely analyze the types of exceptions from | |
2635 // certain bytecodes. | |
2636 if (!(DeoptC1 && DeoptOnAsyncException)) { | |
2637 // set asynchronous trap info | |
2638 for (uint k = 0; k < ARRAY_SIZE(is_async_list); k++) { | |
2639 assert(!_can_trap[is_async_list[k]], "can_trap_list and is_async_list should be disjoint"); | |
2640 _can_trap[is_async_list[k]] = true; | |
2641 _is_async[is_async_list[k]] = true; | |
2642 } | |
2643 } | |
2644 } | |
2645 | |
2646 | |
2647 BlockBegin* GraphBuilder::header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state) { | |
2648 assert(entry->is_set(f), "entry/flag mismatch"); | |
2649 // create header block | |
2650 BlockBegin* h = new BlockBegin(entry->bci()); | |
2651 h->set_depth_first_number(0); | |
2652 | |
2653 Value l = h; | |
2654 if (profile_branches()) { | |
2655 // Increment the invocation count on entry to the method. We | |
2656 // can't use profile_invocation here because append isn't setup to | |
2657 // work properly at this point. The instruction have to be | |
2658 // appended to the instruction stream by hand. | |
2659 Value m = new Constant(new ObjectConstant(compilation()->method())); | |
2660 h->set_next(m, 0); | |
2661 Value p = new ProfileCounter(m, methodOopDesc::interpreter_invocation_counter_offset_in_bytes(), 1); | |
2662 m->set_next(p, 0); | |
2663 l = p; | |
2664 } | |
2665 | |
2666 BlockEnd* g = new Goto(entry, false); | |
2667 l->set_next(g, entry->bci()); | |
2668 h->set_end(g); | |
2669 h->set(f); | |
2670 // setup header block end state | |
2671 ValueStack* s = state->copy(); // can use copy since stack is empty (=> no phis) | |
2672 assert(s->stack_is_empty(), "must have empty stack at entry point"); | |
2673 g->set_state(s); | |
2674 return h; | |
2675 } | |
2676 | |
2677 | |
2678 | |
2679 BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* state) { | |
2680 BlockBegin* start = new BlockBegin(0); | |
2681 | |
2682 // This code eliminates the empty start block at the beginning of | |
2683 // each method. Previously, each method started with the | |
2684 // start-block created below, and this block was followed by the | |
2685 // header block that was always empty. This header block is only | |
2686 // necesary if std_entry is also a backward branch target because | |
2687 // then phi functions may be necessary in the header block. It's | |
2688 // also necessary when profiling so that there's a single block that | |
2689 // can increment the interpreter_invocation_count. | |
2690 BlockBegin* new_header_block; | |
2691 if (std_entry->number_of_preds() == 0 && !profile_branches()) { | |
2692 new_header_block = std_entry; | |
2693 } else { | |
2694 new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state); | |
2695 } | |
2696 | |
2697 // setup start block (root for the IR graph) | |
2698 Base* base = | |
2699 new Base( | |
2700 new_header_block, | |
2701 osr_entry | |
2702 ); | |
2703 start->set_next(base, 0); | |
2704 start->set_end(base); | |
2705 // create & setup state for start block | |
2706 start->set_state(state->copy()); | |
2707 base->set_state(state->copy()); | |
2708 | |
2709 if (base->std_entry()->state() == NULL) { | |
2710 // setup states for header blocks | |
2711 base->std_entry()->merge(state); | |
2712 } | |
2713 | |
2714 assert(base->std_entry()->state() != NULL, ""); | |
2715 return start; | |
2716 } | |
2717 | |
2718 | |
2719 void GraphBuilder::setup_osr_entry_block() { | |
2720 assert(compilation()->is_osr_compile(), "only for osrs"); | |
2721 | |
2722 int osr_bci = compilation()->osr_bci(); | |
2723 ciBytecodeStream s(method()); | |
2724 s.reset_to_bci(osr_bci); | |
2725 s.next(); | |
2726 scope_data()->set_stream(&s); | |
2727 | |
2728 // create a new block to be the osr setup code | |
2729 _osr_entry = new BlockBegin(osr_bci); | |
2730 _osr_entry->set(BlockBegin::osr_entry_flag); | |
2731 _osr_entry->set_depth_first_number(0); | |
2732 BlockBegin* target = bci2block()->at(osr_bci); | |
2733 assert(target != NULL && target->is_set(BlockBegin::osr_entry_flag), "must be there"); | |
2734 // the osr entry has no values for locals | |
2735 ValueStack* state = target->state()->copy(); | |
2736 _osr_entry->set_state(state); | |
2737 | |
2738 kill_all(); | |
2739 _block = _osr_entry; | |
2740 _state = _osr_entry->state()->copy(); | |
2741 _last = _osr_entry; | |
2742 Value e = append(new OsrEntry()); | |
2743 e->set_needs_null_check(false); | |
2744 | |
2745 // OSR buffer is | |
2746 // | |
2747 // locals[nlocals-1..0] | |
2748 // monitors[number_of_locks-1..0] | |
2749 // | |
2750 // locals is a direct copy of the interpreter frame so in the osr buffer | |
2751 // so first slot in the local array is the last local from the interpreter | |
2752 // and last slot is local[0] (receiver) from the interpreter | |
2753 // | |
2754 // Similarly with locks. The first lock slot in the osr buffer is the nth lock | |
2755 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock | |
2756 // in the interpreter frame (the method lock if a sync method) | |
2757 | |
2758 // Initialize monitors in the compiled activation. | |
2759 | |
2760 int index; | |
2761 Value local; | |
2762 | |
2763 // find all the locals that the interpreter thinks contain live oops | |
2764 const BitMap live_oops = method()->live_local_oops_at_bci(osr_bci); | |
2765 | |
2766 // compute the offset into the locals so that we can treat the buffer | |
2767 // as if the locals were still in the interpreter frame | |
2768 int locals_offset = BytesPerWord * (method()->max_locals() - 1); | |
2769 for_each_local_value(state, index, local) { | |
2770 int offset = locals_offset - (index + local->type()->size() - 1) * BytesPerWord; | |
2771 Value get; | |
2772 if (local->type()->is_object_kind() && !live_oops.at(index)) { | |
2773 // The interpreter thinks this local is dead but the compiler | |
2774 // doesn't so pretend that the interpreter passed in null. | |
2775 get = append(new Constant(objectNull)); | |
2776 } else { | |
2777 get = append(new UnsafeGetRaw(as_BasicType(local->type()), e, | |
2778 append(new Constant(new IntConstant(offset))), | |
2779 0, | |
2780 true)); | |
2781 } | |
2782 _state->store_local(index, get); | |
2783 } | |
2784 | |
2785 // the storage for the OSR buffer is freed manually in the LIRGenerator. | |
2786 | |
2787 assert(state->caller_state() == NULL, "should be top scope"); | |
2788 state->clear_locals(); | |
2789 Goto* g = new Goto(target, false); | |
2790 g->set_state(_state->copy()); | |
2791 append(g); | |
2792 _osr_entry->set_end(g); | |
2793 target->merge(_osr_entry->end()->state()); | |
2794 | |
2795 scope_data()->set_stream(NULL); | |
2796 } | |
2797 | |
2798 | |
2799 ValueStack* GraphBuilder::state_at_entry() { | |
2800 ValueStack* state = new ValueStack(scope(), method()->max_locals(), method()->max_stack()); | |
2801 | |
2802 // Set up locals for receiver | |
2803 int idx = 0; | |
2804 if (!method()->is_static()) { | |
2805 // we should always see the receiver | |
2806 state->store_local(idx, new Local(objectType, idx)); | |
2807 idx = 1; | |
2808 } | |
2809 | |
2810 // Set up locals for incoming arguments | |
2811 ciSignature* sig = method()->signature(); | |
2812 for (int i = 0; i < sig->count(); i++) { | |
2813 ciType* type = sig->type_at(i); | |
2814 BasicType basic_type = type->basic_type(); | |
2815 // don't allow T_ARRAY to propagate into locals types | |
2816 if (basic_type == T_ARRAY) basic_type = T_OBJECT; | |
2817 ValueType* vt = as_ValueType(basic_type); | |
2818 state->store_local(idx, new Local(vt, idx)); | |
2819 idx += type->size(); | |
2820 } | |
2821 | |
2822 // lock synchronized method | |
2823 if (method()->is_synchronized()) { | |
2824 state->lock(scope(), NULL); | |
2825 } | |
2826 | |
2827 return state; | |
2828 } | |
2829 | |
2830 | |
2831 GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope) | |
2832 : _scope_data(NULL) | |
2833 , _exception_state(NULL) | |
2834 , _instruction_count(0) | |
2835 , _osr_entry(NULL) | |
2836 , _memory(new MemoryBuffer()) | |
2837 , _compilation(compilation) | |
2838 , _inline_bailout_msg(NULL) | |
2839 { | |
2840 int osr_bci = compilation->osr_bci(); | |
2841 | |
2842 // determine entry points and bci2block mapping | |
2843 BlockListBuilder blm(compilation, scope, osr_bci); | |
2844 CHECK_BAILOUT(); | |
2845 | |
2846 BlockList* bci2block = blm.bci2block(); | |
2847 BlockBegin* start_block = bci2block->at(0); | |
2848 | |
2849 push_root_scope(scope, bci2block, start_block); | |
2850 | |
2851 // setup state for std entry | |
2852 _initial_state = state_at_entry(); | |
2853 start_block->merge(_initial_state); | |
2854 | |
2855 // complete graph | |
2856 _vmap = new ValueMap(); | |
2857 scope->compute_lock_stack_size(); | |
2858 switch (scope->method()->intrinsic_id()) { | |
2859 case vmIntrinsics::_dabs : // fall through | |
2860 case vmIntrinsics::_dsqrt : // fall through | |
2861 case vmIntrinsics::_dsin : // fall through | |
2862 case vmIntrinsics::_dcos : // fall through | |
2863 case vmIntrinsics::_dtan : // fall through | |
2864 case vmIntrinsics::_dlog : // fall through | |
2865 case vmIntrinsics::_dlog10 : // fall through | |
2866 { | |
2867 // Compiles where the root method is an intrinsic need a special | |
2868 // compilation environment because the bytecodes for the method | |
2869 // shouldn't be parsed during the compilation, only the special | |
2870 // Intrinsic node should be emitted. If this isn't done the the | |
2871 // code for the inlined version will be different than the root | |
2872 // compiled version which could lead to monotonicity problems on | |
2873 // intel. | |
2874 | |
2875 // Set up a stream so that appending instructions works properly. | |
2876 ciBytecodeStream s(scope->method()); | |
2877 s.reset_to_bci(0); | |
2878 scope_data()->set_stream(&s); | |
2879 s.next(); | |
2880 | |
2881 // setup the initial block state | |
2882 _block = start_block; | |
2883 _state = start_block->state()->copy(); | |
2884 _last = start_block; | |
2885 load_local(doubleType, 0); | |
2886 | |
2887 // Emit the intrinsic node. | |
2888 bool result = try_inline_intrinsics(scope->method()); | |
2889 if (!result) BAILOUT("failed to inline intrinsic"); | |
2890 method_return(dpop()); | |
2891 | |
2892 // connect the begin and end blocks and we're all done. | |
2893 BlockEnd* end = last()->as_BlockEnd(); | |
2894 block()->set_end(end); | |
2895 end->set_state(state()); | |
2896 break; | |
2897 } | |
2898 default: | |
2899 scope_data()->add_to_work_list(start_block); | |
2900 iterate_all_blocks(); | |
2901 break; | |
2902 } | |
2903 CHECK_BAILOUT(); | |
2904 | |
2905 _start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state); | |
2906 | |
2907 eliminate_redundant_phis(_start); | |
2908 | |
2909 NOT_PRODUCT(if (PrintValueNumbering && Verbose) print_stats()); | |
2910 // for osr compile, bailout if some requirements are not fulfilled | |
2911 if (osr_bci != -1) { | |
2912 BlockBegin* osr_block = blm.bci2block()->at(osr_bci); | |
2913 assert(osr_block->is_set(BlockBegin::was_visited_flag),"osr entry must have been visited for osr compile"); | |
2914 | |
2915 // check if osr entry point has empty stack - we cannot handle non-empty stacks at osr entry points | |
2916 if (!osr_block->state()->stack_is_empty()) { | |
2917 BAILOUT("stack not empty at OSR entry point"); | |
2918 } | |
2919 } | |
2920 #ifndef PRODUCT | |
2921 if (PrintCompilation && Verbose) tty->print_cr("Created %d Instructions", _instruction_count); | |
2922 #endif | |
2923 } | |
2924 | |
2925 | |
2926 ValueStack* GraphBuilder::lock_stack() { | |
2927 // return a new ValueStack representing just the current lock stack | |
2928 // (for debug info at safepoints in exception throwing or handling) | |
2929 ValueStack* new_stack = state()->copy_locks(); | |
2930 return new_stack; | |
2931 } | |
2932 | |
2933 | |
2934 int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const { | |
2935 int recur_level = 0; | |
2936 for (IRScope* s = scope(); s != NULL; s = s->caller()) { | |
2937 if (s->method() == cur_callee) { | |
2938 ++recur_level; | |
2939 } | |
2940 } | |
2941 return recur_level; | |
2942 } | |
2943 | |
2944 | |
2945 bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) { | |
2946 // Clear out any existing inline bailout condition | |
2947 clear_inline_bailout(); | |
2948 | |
2949 if (callee->should_exclude()) { | |
2950 // callee is excluded | |
2951 INLINE_BAILOUT("excluded by CompilerOracle") | |
2952 } else if (!callee->can_be_compiled()) { | |
2953 // callee is not compilable (prob. has breakpoints) | |
2954 INLINE_BAILOUT("not compilable") | |
2955 } else if (callee->intrinsic_id() != vmIntrinsics::_none && try_inline_intrinsics(callee)) { | |
2956 // intrinsics can be native or not | |
2957 return true; | |
2958 } else if (callee->is_native()) { | |
2959 // non-intrinsic natives cannot be inlined | |
2960 INLINE_BAILOUT("non-intrinsic native") | |
2961 } else if (callee->is_abstract()) { | |
2962 INLINE_BAILOUT("abstract") | |
2963 } else { | |
2964 return try_inline_full(callee, holder_known); | |
2965 } | |
2966 } | |
2967 | |
2968 | |
2969 bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) { | |
2970 if (!InlineNatives ) INLINE_BAILOUT("intrinsic method inlining disabled"); | |
1540
99791ad65936
6953539: after 6892658 c1 reports that it doesn't inline StringBuffer.append
never
parents:
1397
diff
changeset
|
2971 if (callee->is_synchronized()) { |
99791ad65936
6953539: after 6892658 c1 reports that it doesn't inline StringBuffer.append
never
parents:
1397
diff
changeset
|
2972 // We don't currently support any synchronized intrinsics |
99791ad65936
6953539: after 6892658 c1 reports that it doesn't inline StringBuffer.append
never
parents:
1397
diff
changeset
|
2973 return false; |
99791ad65936
6953539: after 6892658 c1 reports that it doesn't inline StringBuffer.append
never
parents:
1397
diff
changeset
|
2974 } |
99791ad65936
6953539: after 6892658 c1 reports that it doesn't inline StringBuffer.append
never
parents:
1397
diff
changeset
|
2975 |
0 | 2976 // callee seems like a good candidate |
2977 // determine id | |
2978 bool preserves_state = false; | |
2979 bool cantrap = true; | |
2980 vmIntrinsics::ID id = callee->intrinsic_id(); | |
2981 switch (id) { | |
2982 case vmIntrinsics::_arraycopy : | |
2983 if (!InlineArrayCopy) return false; | |
2984 break; | |
2985 | |
2986 case vmIntrinsics::_currentTimeMillis: | |
2987 case vmIntrinsics::_nanoTime: | |
2988 preserves_state = true; | |
2989 cantrap = false; | |
2990 break; | |
2991 | |
2992 case vmIntrinsics::_floatToRawIntBits : | |
2993 case vmIntrinsics::_intBitsToFloat : | |
2994 case vmIntrinsics::_doubleToRawLongBits : | |
2995 case vmIntrinsics::_longBitsToDouble : | |
2996 if (!InlineMathNatives) return false; | |
2997 preserves_state = true; | |
2998 cantrap = false; | |
2999 break; | |
3000 | |
3001 case vmIntrinsics::_getClass : | |
3002 if (!InlineClassNatives) return false; | |
3003 preserves_state = true; | |
3004 break; | |
3005 | |
3006 case vmIntrinsics::_currentThread : | |
3007 if (!InlineThreadNatives) return false; | |
3008 preserves_state = true; | |
3009 cantrap = false; | |
3010 break; | |
3011 | |
3012 case vmIntrinsics::_dabs : // fall through | |
3013 case vmIntrinsics::_dsqrt : // fall through | |
3014 case vmIntrinsics::_dsin : // fall through | |
3015 case vmIntrinsics::_dcos : // fall through | |
3016 case vmIntrinsics::_dtan : // fall through | |
3017 case vmIntrinsics::_dlog : // fall through | |
3018 case vmIntrinsics::_dlog10 : // fall through | |
3019 if (!InlineMathNatives) return false; | |
3020 cantrap = false; | |
3021 preserves_state = true; | |
3022 break; | |
3023 | |
3024 // sun/misc/AtomicLong.attemptUpdate | |
3025 case vmIntrinsics::_attemptUpdate : | |
3026 if (!VM_Version::supports_cx8()) return false; | |
3027 if (!InlineAtomicLong) return false; | |
3028 preserves_state = true; | |
3029 break; | |
3030 | |
3031 // Use special nodes for Unsafe instructions so we can more easily | |
3032 // perform an address-mode optimization on the raw variants | |
3033 case vmIntrinsics::_getObject : return append_unsafe_get_obj(callee, T_OBJECT, false); | |
3034 case vmIntrinsics::_getBoolean: return append_unsafe_get_obj(callee, T_BOOLEAN, false); | |
3035 case vmIntrinsics::_getByte : return append_unsafe_get_obj(callee, T_BYTE, false); | |
3036 case vmIntrinsics::_getShort : return append_unsafe_get_obj(callee, T_SHORT, false); | |
3037 case vmIntrinsics::_getChar : return append_unsafe_get_obj(callee, T_CHAR, false); | |
3038 case vmIntrinsics::_getInt : return append_unsafe_get_obj(callee, T_INT, false); | |
3039 case vmIntrinsics::_getLong : return append_unsafe_get_obj(callee, T_LONG, false); | |
3040 case vmIntrinsics::_getFloat : return append_unsafe_get_obj(callee, T_FLOAT, false); | |
3041 case vmIntrinsics::_getDouble : return append_unsafe_get_obj(callee, T_DOUBLE, false); | |
3042 | |
3043 case vmIntrinsics::_putObject : return append_unsafe_put_obj(callee, T_OBJECT, false); | |
3044 case vmIntrinsics::_putBoolean: return append_unsafe_put_obj(callee, T_BOOLEAN, false); | |
3045 case vmIntrinsics::_putByte : return append_unsafe_put_obj(callee, T_BYTE, false); | |
3046 case vmIntrinsics::_putShort : return append_unsafe_put_obj(callee, T_SHORT, false); | |
3047 case vmIntrinsics::_putChar : return append_unsafe_put_obj(callee, T_CHAR, false); | |
3048 case vmIntrinsics::_putInt : return append_unsafe_put_obj(callee, T_INT, false); | |
3049 case vmIntrinsics::_putLong : return append_unsafe_put_obj(callee, T_LONG, false); | |
3050 case vmIntrinsics::_putFloat : return append_unsafe_put_obj(callee, T_FLOAT, false); | |
3051 case vmIntrinsics::_putDouble : return append_unsafe_put_obj(callee, T_DOUBLE, false); | |
3052 | |
3053 case vmIntrinsics::_getObjectVolatile : return append_unsafe_get_obj(callee, T_OBJECT, true); | |
3054 case vmIntrinsics::_getBooleanVolatile: return append_unsafe_get_obj(callee, T_BOOLEAN, true); | |
3055 case vmIntrinsics::_getByteVolatile : return append_unsafe_get_obj(callee, T_BYTE, true); | |
3056 case vmIntrinsics::_getShortVolatile : return append_unsafe_get_obj(callee, T_SHORT, true); | |
3057 case vmIntrinsics::_getCharVolatile : return append_unsafe_get_obj(callee, T_CHAR, true); | |
3058 case vmIntrinsics::_getIntVolatile : return append_unsafe_get_obj(callee, T_INT, true); | |
3059 case vmIntrinsics::_getLongVolatile : return append_unsafe_get_obj(callee, T_LONG, true); | |
3060 case vmIntrinsics::_getFloatVolatile : return append_unsafe_get_obj(callee, T_FLOAT, true); | |
3061 case vmIntrinsics::_getDoubleVolatile : return append_unsafe_get_obj(callee, T_DOUBLE, true); | |
3062 | |
3063 case vmIntrinsics::_putObjectVolatile : return append_unsafe_put_obj(callee, T_OBJECT, true); | |
3064 case vmIntrinsics::_putBooleanVolatile: return append_unsafe_put_obj(callee, T_BOOLEAN, true); | |
3065 case vmIntrinsics::_putByteVolatile : return append_unsafe_put_obj(callee, T_BYTE, true); | |
3066 case vmIntrinsics::_putShortVolatile : return append_unsafe_put_obj(callee, T_SHORT, true); | |
3067 case vmIntrinsics::_putCharVolatile : return append_unsafe_put_obj(callee, T_CHAR, true); | |
3068 case vmIntrinsics::_putIntVolatile : return append_unsafe_put_obj(callee, T_INT, true); | |
3069 case vmIntrinsics::_putLongVolatile : return append_unsafe_put_obj(callee, T_LONG, true); | |
3070 case vmIntrinsics::_putFloatVolatile : return append_unsafe_put_obj(callee, T_FLOAT, true); | |
3071 case vmIntrinsics::_putDoubleVolatile : return append_unsafe_put_obj(callee, T_DOUBLE, true); | |
3072 | |
3073 case vmIntrinsics::_getByte_raw : return append_unsafe_get_raw(callee, T_BYTE); | |
3074 case vmIntrinsics::_getShort_raw : return append_unsafe_get_raw(callee, T_SHORT); | |
3075 case vmIntrinsics::_getChar_raw : return append_unsafe_get_raw(callee, T_CHAR); | |
3076 case vmIntrinsics::_getInt_raw : return append_unsafe_get_raw(callee, T_INT); | |
3077 case vmIntrinsics::_getLong_raw : return append_unsafe_get_raw(callee, T_LONG); | |
3078 case vmIntrinsics::_getFloat_raw : return append_unsafe_get_raw(callee, T_FLOAT); | |
3079 case vmIntrinsics::_getDouble_raw : return append_unsafe_get_raw(callee, T_DOUBLE); | |
3080 | |
3081 case vmIntrinsics::_putByte_raw : return append_unsafe_put_raw(callee, T_BYTE); | |
3082 case vmIntrinsics::_putShort_raw : return append_unsafe_put_raw(callee, T_SHORT); | |
3083 case vmIntrinsics::_putChar_raw : return append_unsafe_put_raw(callee, T_CHAR); | |
3084 case vmIntrinsics::_putInt_raw : return append_unsafe_put_raw(callee, T_INT); | |
3085 case vmIntrinsics::_putLong_raw : return append_unsafe_put_raw(callee, T_LONG); | |
3086 case vmIntrinsics::_putFloat_raw : return append_unsafe_put_raw(callee, T_FLOAT); | |
3087 case vmIntrinsics::_putDouble_raw : return append_unsafe_put_raw(callee, T_DOUBLE); | |
3088 | |
3089 case vmIntrinsics::_prefetchRead : return append_unsafe_prefetch(callee, false, false); | |
3090 case vmIntrinsics::_prefetchWrite : return append_unsafe_prefetch(callee, false, true); | |
3091 case vmIntrinsics::_prefetchReadStatic : return append_unsafe_prefetch(callee, true, false); | |
3092 case vmIntrinsics::_prefetchWriteStatic : return append_unsafe_prefetch(callee, true, true); | |
3093 | |
3094 case vmIntrinsics::_checkIndex : | |
3095 if (!InlineNIOCheckIndex) return false; | |
3096 preserves_state = true; | |
3097 break; | |
3098 case vmIntrinsics::_putOrderedObject : return append_unsafe_put_obj(callee, T_OBJECT, true); | |
3099 case vmIntrinsics::_putOrderedInt : return append_unsafe_put_obj(callee, T_INT, true); | |
3100 case vmIntrinsics::_putOrderedLong : return append_unsafe_put_obj(callee, T_LONG, true); | |
3101 | |
3102 case vmIntrinsics::_compareAndSwapLong: | |
3103 if (!VM_Version::supports_cx8()) return false; | |
3104 // fall through | |
3105 case vmIntrinsics::_compareAndSwapInt: | |
3106 case vmIntrinsics::_compareAndSwapObject: | |
3107 append_unsafe_CAS(callee); | |
3108 return true; | |
3109 | |
3110 default : return false; // do not inline | |
3111 } | |
3112 // create intrinsic node | |
3113 const bool has_receiver = !callee->is_static(); | |
3114 ValueType* result_type = as_ValueType(callee->return_type()); | |
3115 | |
3116 Values* args = state()->pop_arguments(callee->arg_size()); | |
3117 ValueStack* locks = lock_stack(); | |
3118 if (profile_calls()) { | |
3119 // Don't profile in the special case where the root method | |
3120 // is the intrinsic | |
3121 if (callee != method()) { | |
3122 Value recv = NULL; | |
3123 if (has_receiver) { | |
3124 recv = args->at(0); | |
3125 null_check(recv); | |
3126 } | |
3127 profile_call(recv, NULL); | |
3128 } | |
3129 } | |
3130 | |
3131 Intrinsic* result = new Intrinsic(result_type, id, args, has_receiver, lock_stack(), | |
3132 preserves_state, cantrap); | |
3133 // append instruction & push result | |
3134 Value value = append_split(result); | |
3135 if (result_type != voidType) push(result_type, value); | |
3136 | |
3137 #ifndef PRODUCT | |
3138 // printing | |
3139 if (PrintInlining) { | |
3140 print_inline_result(callee, true); | |
3141 } | |
3142 #endif | |
3143 | |
3144 // done | |
3145 return true; | |
3146 } | |
3147 | |
3148 | |
3149 bool GraphBuilder::try_inline_jsr(int jsr_dest_bci) { | |
3150 // Introduce a new callee continuation point - all Ret instructions | |
3151 // will be replaced with Gotos to this point. | |
3152 BlockBegin* cont = block_at(next_bci()); | |
3153 assert(cont != NULL, "continuation must exist (BlockListBuilder starts a new block after a jsr"); | |
3154 | |
3155 // Note: can not assign state to continuation yet, as we have to | |
3156 // pick up the state from the Ret instructions. | |
3157 | |
3158 // Push callee scope | |
3159 push_scope_for_jsr(cont, jsr_dest_bci); | |
3160 | |
3161 // Temporarily set up bytecode stream so we can append instructions | |
3162 // (only using the bci of this stream) | |
3163 scope_data()->set_stream(scope_data()->parent()->stream()); | |
3164 | |
3165 BlockBegin* jsr_start_block = block_at(jsr_dest_bci); | |
3166 assert(jsr_start_block != NULL, "jsr start block must exist"); | |
3167 assert(!jsr_start_block->is_set(BlockBegin::was_visited_flag), "should not have visited jsr yet"); | |
3168 Goto* goto_sub = new Goto(jsr_start_block, false); | |
3169 goto_sub->set_state(state()); | |
3170 // Must copy state to avoid wrong sharing when parsing bytecodes | |
3171 assert(jsr_start_block->state() == NULL, "should have fresh jsr starting block"); | |
3172 jsr_start_block->set_state(state()->copy()); | |
3173 append(goto_sub); | |
3174 _block->set_end(goto_sub); | |
3175 _last = _block = jsr_start_block; | |
3176 | |
3177 // Clear out bytecode stream | |
3178 scope_data()->set_stream(NULL); | |
3179 | |
3180 scope_data()->add_to_work_list(jsr_start_block); | |
3181 | |
3182 // Ready to resume parsing in subroutine | |
3183 iterate_all_blocks(); | |
3184 | |
3185 // If we bailed out during parsing, return immediately (this is bad news) | |
3186 CHECK_BAILOUT_(false); | |
3187 | |
3188 // Detect whether the continuation can actually be reached. If not, | |
3189 // it has not had state set by the join() operations in | |
3190 // iterate_bytecodes_for_block()/ret() and we should not touch the | |
3191 // iteration state. The calling activation of | |
3192 // iterate_bytecodes_for_block will then complete normally. | |
3193 if (cont->state() != NULL) { | |
3194 if (!cont->is_set(BlockBegin::was_visited_flag)) { | |
3195 // add continuation to work list instead of parsing it immediately | |
3196 scope_data()->parent()->add_to_work_list(cont); | |
3197 } | |
3198 } | |
3199 | |
3200 assert(jsr_continuation() == cont, "continuation must not have changed"); | |
3201 assert(!jsr_continuation()->is_set(BlockBegin::was_visited_flag) || | |
3202 jsr_continuation()->is_set(BlockBegin::parser_loop_header_flag), | |
3203 "continuation can only be visited in case of backward branches"); | |
3204 assert(_last && _last->as_BlockEnd(), "block must have end"); | |
3205 | |
3206 // continuation is in work list, so end iteration of current block | |
3207 _skip_block = true; | |
3208 pop_scope_for_jsr(); | |
3209 | |
3210 return true; | |
3211 } | |
3212 | |
3213 | |
3214 // Inline the entry of a synchronized method as a monitor enter and | |
3215 // register the exception handler which releases the monitor if an | |
3216 // exception is thrown within the callee. Note that the monitor enter | |
3217 // cannot throw an exception itself, because the receiver is | |
3218 // guaranteed to be non-null by the explicit null check at the | |
3219 // beginning of inlining. | |
3220 void GraphBuilder::inline_sync_entry(Value lock, BlockBegin* sync_handler) { | |
3221 assert(lock != NULL && sync_handler != NULL, "lock or handler missing"); | |
3222 | |
3223 set_exception_state(state()->copy()); | |
3224 monitorenter(lock, SynchronizationEntryBCI); | |
3225 assert(_last->as_MonitorEnter() != NULL, "monitor enter expected"); | |
3226 _last->set_needs_null_check(false); | |
3227 | |
3228 sync_handler->set(BlockBegin::exception_entry_flag); | |
3229 sync_handler->set(BlockBegin::is_on_work_list_flag); | |
3230 | |
3231 ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0); | |
3232 XHandler* h = new XHandler(desc); | |
3233 h->set_entry_block(sync_handler); | |
3234 scope_data()->xhandlers()->append(h); | |
3235 scope_data()->set_has_handler(); | |
3236 } | |
3237 | |
3238 | |
3239 // If an exception is thrown and not handled within an inlined | |
3240 // synchronized method, the monitor must be released before the | |
3241 // exception is rethrown in the outer scope. Generate the appropriate | |
3242 // instructions here. | |
3243 void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler) { | |
3244 BlockBegin* orig_block = _block; | |
3245 ValueStack* orig_state = _state; | |
3246 Instruction* orig_last = _last; | |
3247 _last = _block = sync_handler; | |
3248 _state = sync_handler->state()->copy(); | |
3249 | |
3250 assert(sync_handler != NULL, "handler missing"); | |
3251 assert(!sync_handler->is_set(BlockBegin::was_visited_flag), "is visited here"); | |
3252 | |
3253 assert(lock != NULL || default_handler, "lock or handler missing"); | |
3254 | |
3255 XHandler* h = scope_data()->xhandlers()->remove_last(); | |
3256 assert(h->entry_block() == sync_handler, "corrupt list of handlers"); | |
3257 | |
3258 block()->set(BlockBegin::was_visited_flag); | |
3259 Value exception = append_with_bci(new ExceptionObject(), SynchronizationEntryBCI); | |
3260 assert(exception->is_pinned(), "must be"); | |
3261 | |
3262 int bci = SynchronizationEntryBCI; | |
3263 if (lock) { | |
3264 assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing"); | |
3265 if (lock->bci() == -99) { | |
3266 lock = append_with_bci(lock, -1); | |
3267 } | |
3268 | |
3269 // exit the monitor in the context of the synchronized method | |
3270 monitorexit(lock, SynchronizationEntryBCI); | |
3271 | |
3272 // exit the context of the synchronized method | |
3273 if (!default_handler) { | |
3274 pop_scope(); | |
3275 _state = _state->copy(); | |
3276 bci = _state->scope()->caller_bci(); | |
3277 _state = _state->pop_scope()->copy(); | |
3278 } | |
3279 } | |
3280 | |
3281 // perform the throw as if at the the call site | |
3282 apush(exception); | |
3283 | |
3284 set_exception_state(state()->copy()); | |
3285 throw_op(bci); | |
3286 | |
3287 BlockEnd* end = last()->as_BlockEnd(); | |
3288 block()->set_end(end); | |
3289 end->set_state(state()); | |
3290 | |
3291 _block = orig_block; | |
3292 _state = orig_state; | |
3293 _last = orig_last; | |
3294 } | |
3295 | |
3296 | |
3297 bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) { | |
3298 assert(!callee->is_native(), "callee must not be native"); | |
3299 | |
3300 // first perform tests of things it's not possible to inline | |
3301 if (callee->has_exception_handlers() && | |
3302 !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers"); | |
3303 if (callee->is_synchronized() && | |
3304 !InlineSynchronizedMethods ) INLINE_BAILOUT("callee is synchronized"); | |
3305 if (!callee->holder()->is_initialized()) INLINE_BAILOUT("callee's klass not initialized yet"); | |
3306 if (!callee->has_balanced_monitors()) INLINE_BAILOUT("callee's monitors do not match"); | |
3307 | |
3308 // Proper inlining of methods with jsrs requires a little more work. | |
3309 if (callee->has_jsrs() ) INLINE_BAILOUT("jsrs not handled properly by inliner yet"); | |
3310 | |
3311 // now perform tests that are based on flag settings | |
3312 if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("too-deep inlining"); | |
3313 if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining"); | |
3314 if (callee->code_size() > max_inline_size() ) INLINE_BAILOUT("callee is too large"); | |
3315 | |
3316 // don't inline throwable methods unless the inlining tree is rooted in a throwable class | |
3317 if (callee->name() == ciSymbol::object_initializer_name() && | |
3318 callee->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) { | |
3319 // Throwable constructor call | |
3320 IRScope* top = scope(); | |
3321 while (top->caller() != NULL) { | |
3322 top = top->caller(); | |
3323 } | |
3324 if (!top->method()->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) { | |
3325 INLINE_BAILOUT("don't inline Throwable constructors"); | |
3326 } | |
3327 } | |
3328 | |
3329 // When SSE2 is used on intel, then no special handling is needed | |
3330 // for strictfp because the enum-constant is fixed at compile time, | |
3331 // the check for UseSSE2 is needed here | |
3332 if (strict_fp_requires_explicit_rounding && UseSSE < 2 && method()->is_strict() != callee->is_strict()) { | |
3333 INLINE_BAILOUT("caller and callee have different strict fp requirements"); | |
3334 } | |
3335 | |
3336 if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) { | |
3337 INLINE_BAILOUT("total inlining greater than DesiredMethodLimit"); | |
3338 } | |
3339 | |
3340 #ifndef PRODUCT | |
3341 // printing | |
3342 if (PrintInlining) { | |
3343 print_inline_result(callee, true); | |
3344 } | |
3345 #endif | |
3346 | |
3347 // NOTE: Bailouts from this point on, which occur at the | |
3348 // GraphBuilder level, do not cause bailout just of the inlining but | |
3349 // in fact of the entire compilation. | |
3350 | |
3351 BlockBegin* orig_block = block(); | |
3352 | |
3353 const int args_base = state()->stack_size() - callee->arg_size(); | |
3354 assert(args_base >= 0, "stack underflow during inlining"); | |
3355 | |
3356 // Insert null check if necessary | |
3357 Value recv = NULL; | |
3358 if (code() != Bytecodes::_invokestatic) { | |
3359 // note: null check must happen even if first instruction of callee does | |
3360 // an implicit null check since the callee is in a different scope | |
3361 // and we must make sure exception handling does the right thing | |
3362 assert(!callee->is_static(), "callee must not be static"); | |
3363 assert(callee->arg_size() > 0, "must have at least a receiver"); | |
3364 recv = state()->stack_at(args_base); | |
3365 null_check(recv); | |
3366 } | |
3367 | |
3368 if (profile_inlined_calls()) { | |
3369 profile_call(recv, holder_known ? callee->holder() : NULL); | |
3370 } | |
3371 | |
3372 profile_invocation(callee); | |
3373 | |
3374 // Introduce a new callee continuation point - if the callee has | |
3375 // more than one return instruction or the return does not allow | |
3376 // fall-through of control flow, all return instructions of the | |
3377 // callee will need to be replaced by Goto's pointing to this | |
3378 // continuation point. | |
3379 BlockBegin* cont = block_at(next_bci()); | |
3380 bool continuation_existed = true; | |
3381 if (cont == NULL) { | |
3382 cont = new BlockBegin(next_bci()); | |
3383 // low number so that continuation gets parsed as early as possible | |
3384 cont->set_depth_first_number(0); | |
3385 #ifndef PRODUCT | |
3386 if (PrintInitialBlockList) { | |
3387 tty->print_cr("CFG: created block %d (bci %d) as continuation for inline at bci %d", | |
3388 cont->block_id(), cont->bci(), bci()); | |
3389 } | |
3390 #endif | |
3391 continuation_existed = false; | |
3392 } | |
3393 // Record number of predecessors of continuation block before | |
3394 // inlining, to detect if inlined method has edges to its | |
3395 // continuation after inlining. | |
3396 int continuation_preds = cont->number_of_preds(); | |
3397 | |
3398 // Push callee scope | |
3399 push_scope(callee, cont); | |
3400 | |
3401 // the BlockListBuilder for the callee could have bailed out | |
3402 CHECK_BAILOUT_(false); | |
3403 | |
3404 // Temporarily set up bytecode stream so we can append instructions | |
3405 // (only using the bci of this stream) | |
3406 scope_data()->set_stream(scope_data()->parent()->stream()); | |
3407 | |
3408 // Pass parameters into callee state: add assignments | |
3409 // note: this will also ensure that all arguments are computed before being passed | |
3410 ValueStack* callee_state = state(); | |
3411 ValueStack* caller_state = scope()->caller_state(); | |
3412 { int i = args_base; | |
3413 while (i < caller_state->stack_size()) { | |
3414 const int par_no = i - args_base; | |
3415 Value arg = caller_state->stack_at_inc(i); | |
3416 // NOTE: take base() of arg->type() to avoid problems storing | |
3417 // constants | |
3418 store_local(callee_state, arg, arg->type()->base(), par_no); | |
3419 } | |
3420 } | |
3421 | |
3422 // Remove args from stack. | |
3423 // Note that we preserve locals state in case we can use it later | |
3424 // (see use of pop_scope() below) | |
3425 caller_state->truncate_stack(args_base); | |
3426 callee_state->truncate_stack(args_base); | |
3427 | |
3428 // Setup state that is used at returns form the inlined method. | |
3429 // This is essentially the state of the continuation block, | |
3430 // but without the return value on stack, if any, this will | |
3431 // be pushed at the return instruction (see method_return). | |
3432 scope_data()->set_continuation_state(caller_state->copy()); | |
3433 | |
3434 // Compute lock stack size for callee scope now that args have been passed | |
3435 scope()->compute_lock_stack_size(); | |
3436 | |
3437 Value lock; | |
3438 BlockBegin* sync_handler; | |
3439 | |
3440 // Inline the locking of the receiver if the callee is synchronized | |
3441 if (callee->is_synchronized()) { | |
3442 lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror()))) | |
3443 : state()->local_at(0); | |
3444 sync_handler = new BlockBegin(-1); | |
3445 inline_sync_entry(lock, sync_handler); | |
3446 | |
3447 // recompute the lock stack size | |
3448 scope()->compute_lock_stack_size(); | |
3449 } | |
3450 | |
3451 | |
3452 BlockBegin* callee_start_block = block_at(0); | |
3453 if (callee_start_block != NULL) { | |
3454 assert(callee_start_block->is_set(BlockBegin::parser_loop_header_flag), "must be loop header"); | |
3455 Goto* goto_callee = new Goto(callee_start_block, false); | |
3456 goto_callee->set_state(state()); | |
3457 // The state for this goto is in the scope of the callee, so use | |
3458 // the entry bci for the callee instead of the call site bci. | |
3459 append_with_bci(goto_callee, 0); | |
3460 _block->set_end(goto_callee); | |
3461 callee_start_block->merge(callee_state); | |
3462 | |
3463 _last = _block = callee_start_block; | |
3464 | |
3465 scope_data()->add_to_work_list(callee_start_block); | |
3466 } | |
3467 | |
3468 // Clear out bytecode stream | |
3469 scope_data()->set_stream(NULL); | |
3470 | |
3471 // Ready to resume parsing in callee (either in the same block we | |
3472 // were in before or in the callee's start block) | |
3473 iterate_all_blocks(callee_start_block == NULL); | |
3474 | |
3475 // If we bailed out during parsing, return immediately (this is bad news) | |
3476 if (bailed_out()) return false; | |
3477 | |
3478 // iterate_all_blocks theoretically traverses in random order; in | |
3479 // practice, we have only traversed the continuation if we are | |
3480 // inlining into a subroutine | |
3481 assert(continuation_existed || | |
3482 !continuation()->is_set(BlockBegin::was_visited_flag), | |
3483 "continuation should not have been parsed yet if we created it"); | |
3484 | |
3485 // If we bailed out during parsing, return immediately (this is bad news) | |
3486 CHECK_BAILOUT_(false); | |
3487 | |
3488 // At this point we are almost ready to return and resume parsing of | |
3489 // the caller back in the GraphBuilder. The only thing we want to do | |
3490 // first is an optimization: during parsing of the callee we | |
3491 // generated at least one Goto to the continuation block. If we | |
3492 // generated exactly one, and if the inlined method spanned exactly | |
3493 // one block (and we didn't have to Goto its entry), then we snip | |
3494 // off the Goto to the continuation, allowing control to fall | |
3495 // through back into the caller block and effectively performing | |
3496 // block merging. This allows load elimination and CSE to take place | |
3497 // across multiple callee scopes if they are relatively simple, and | |
3498 // is currently essential to making inlining profitable. | |
3499 if ( num_returns() == 1 | |
3500 && block() == orig_block | |
3501 && block() == inline_cleanup_block()) { | |
3502 _last = inline_cleanup_return_prev(); | |
3503 _state = inline_cleanup_state()->pop_scope(); | |
3504 } else if (continuation_preds == cont->number_of_preds()) { | |
3505 // Inlining caused that the instructions after the invoke in the | |
3506 // caller are not reachable any more. So skip filling this block | |
3507 // with instructions! | |
3508 assert (cont == continuation(), ""); | |
3509 assert(_last && _last->as_BlockEnd(), ""); | |
3510 _skip_block = true; | |
3511 } else { | |
3512 // Resume parsing in continuation block unless it was already parsed. | |
3513 // Note that if we don't change _last here, iteration in | |
3514 // iterate_bytecodes_for_block will stop when we return. | |
3515 if (!continuation()->is_set(BlockBegin::was_visited_flag)) { | |
3516 // add continuation to work list instead of parsing it immediately | |
3517 assert(_last && _last->as_BlockEnd(), ""); | |
3518 scope_data()->parent()->add_to_work_list(continuation()); | |
3519 _skip_block = true; | |
3520 } | |
3521 } | |
3522 | |
3523 // Fill the exception handler for synchronized methods with instructions | |
3524 if (callee->is_synchronized() && sync_handler->state() != NULL) { | |
3525 fill_sync_handler(lock, sync_handler); | |
3526 } else { | |
3527 pop_scope(); | |
3528 } | |
3529 | |
3530 compilation()->notice_inlined_method(callee); | |
3531 | |
3532 return true; | |
3533 } | |
3534 | |
3535 | |
3536 void GraphBuilder::inline_bailout(const char* msg) { | |
3537 assert(msg != NULL, "inline bailout msg must exist"); | |
3538 _inline_bailout_msg = msg; | |
3539 } | |
3540 | |
3541 | |
3542 void GraphBuilder::clear_inline_bailout() { | |
3543 _inline_bailout_msg = NULL; | |
3544 } | |
3545 | |
3546 | |
3547 void GraphBuilder::push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start) { | |
3548 ScopeData* data = new ScopeData(NULL); | |
3549 data->set_scope(scope); | |
3550 data->set_bci2block(bci2block); | |
3551 _scope_data = data; | |
3552 _block = start; | |
3553 } | |
3554 | |
3555 | |
3556 void GraphBuilder::push_scope(ciMethod* callee, BlockBegin* continuation) { | |
3557 IRScope* callee_scope = new IRScope(compilation(), scope(), bci(), callee, -1, false); | |
3558 scope()->add_callee(callee_scope); | |
3559 | |
3560 BlockListBuilder blb(compilation(), callee_scope, -1); | |
3561 CHECK_BAILOUT(); | |
3562 | |
3563 if (!blb.bci2block()->at(0)->is_set(BlockBegin::parser_loop_header_flag)) { | |
3564 // this scope can be inlined directly into the caller so remove | |
3565 // the block at bci 0. | |
3566 blb.bci2block()->at_put(0, NULL); | |
3567 } | |
3568 | |
3569 callee_scope->set_caller_state(state()); | |
3570 set_state(state()->push_scope(callee_scope)); | |
3571 | |
3572 ScopeData* data = new ScopeData(scope_data()); | |
3573 data->set_scope(callee_scope); | |
3574 data->set_bci2block(blb.bci2block()); | |
3575 data->set_continuation(continuation); | |
3576 _scope_data = data; | |
3577 } | |
3578 | |
3579 | |
3580 void GraphBuilder::push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci) { | |
3581 ScopeData* data = new ScopeData(scope_data()); | |
3582 data->set_parsing_jsr(); | |
3583 data->set_jsr_entry_bci(jsr_dest_bci); | |
3584 data->set_jsr_return_address_local(-1); | |
3585 // Must clone bci2block list as we will be mutating it in order to | |
3586 // properly clone all blocks in jsr region as well as exception | |
3587 // handlers containing rets | |
3588 BlockList* new_bci2block = new BlockList(bci2block()->length()); | |
3589 new_bci2block->push_all(bci2block()); | |
3590 data->set_bci2block(new_bci2block); | |
3591 data->set_scope(scope()); | |
3592 data->setup_jsr_xhandlers(); | |
3593 data->set_continuation(continuation()); | |
3594 if (continuation() != NULL) { | |
3595 assert(continuation_state() != NULL, ""); | |
3596 data->set_continuation_state(continuation_state()->copy()); | |
3597 } | |
3598 data->set_jsr_continuation(jsr_continuation); | |
3599 _scope_data = data; | |
3600 } | |
3601 | |
3602 | |
3603 void GraphBuilder::pop_scope() { | |
3604 int number_of_locks = scope()->number_of_locks(); | |
3605 _scope_data = scope_data()->parent(); | |
3606 // accumulate minimum number of monitor slots to be reserved | |
3607 scope()->set_min_number_of_locks(number_of_locks); | |
3608 } | |
3609 | |
3610 | |
3611 void GraphBuilder::pop_scope_for_jsr() { | |
3612 _scope_data = scope_data()->parent(); | |
3613 } | |
3614 | |
3615 bool GraphBuilder::append_unsafe_get_obj(ciMethod* callee, BasicType t, bool is_volatile) { | |
3616 if (InlineUnsafeOps) { | |
3617 Values* args = state()->pop_arguments(callee->arg_size()); | |
3618 null_check(args->at(0)); | |
3619 Instruction* offset = args->at(2); | |
3620 #ifndef _LP64 | |
3621 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); | |
3622 #endif | |
3623 Instruction* op = append(new UnsafeGetObject(t, args->at(1), offset, is_volatile)); | |
3624 push(op->type(), op); | |
3625 compilation()->set_has_unsafe_access(true); | |
3626 } | |
3627 return InlineUnsafeOps; | |
3628 } | |
3629 | |
3630 | |
3631 bool GraphBuilder::append_unsafe_put_obj(ciMethod* callee, BasicType t, bool is_volatile) { | |
3632 if (InlineUnsafeOps) { | |
3633 Values* args = state()->pop_arguments(callee->arg_size()); | |
3634 null_check(args->at(0)); | |
3635 Instruction* offset = args->at(2); | |
3636 #ifndef _LP64 | |
3637 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); | |
3638 #endif | |
3639 Instruction* op = append(new UnsafePutObject(t, args->at(1), offset, args->at(3), is_volatile)); | |
3640 compilation()->set_has_unsafe_access(true); | |
3641 kill_all(); | |
3642 } | |
3643 return InlineUnsafeOps; | |
3644 } | |
3645 | |
3646 | |
3647 bool GraphBuilder::append_unsafe_get_raw(ciMethod* callee, BasicType t) { | |
3648 if (InlineUnsafeOps) { | |
3649 Values* args = state()->pop_arguments(callee->arg_size()); | |
3650 null_check(args->at(0)); | |
3651 Instruction* op = append(new UnsafeGetRaw(t, args->at(1), false)); | |
3652 push(op->type(), op); | |
3653 compilation()->set_has_unsafe_access(true); | |
3654 } | |
3655 return InlineUnsafeOps; | |
3656 } | |
3657 | |
3658 | |
3659 bool GraphBuilder::append_unsafe_put_raw(ciMethod* callee, BasicType t) { | |
3660 if (InlineUnsafeOps) { | |
3661 Values* args = state()->pop_arguments(callee->arg_size()); | |
3662 null_check(args->at(0)); | |
3663 Instruction* op = append(new UnsafePutRaw(t, args->at(1), args->at(2))); | |
3664 compilation()->set_has_unsafe_access(true); | |
3665 } | |
3666 return InlineUnsafeOps; | |
3667 } | |
3668 | |
3669 | |
3670 bool GraphBuilder::append_unsafe_prefetch(ciMethod* callee, bool is_static, bool is_store) { | |
3671 if (InlineUnsafeOps) { | |
3672 Values* args = state()->pop_arguments(callee->arg_size()); | |
3673 int obj_arg_index = 1; // Assume non-static case | |
3674 if (is_static) { | |
3675 obj_arg_index = 0; | |
3676 } else { | |
3677 null_check(args->at(0)); | |
3678 } | |
3679 Instruction* offset = args->at(obj_arg_index + 1); | |
3680 #ifndef _LP64 | |
3681 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); | |
3682 #endif | |
3683 Instruction* op = is_store ? append(new UnsafePrefetchWrite(args->at(obj_arg_index), offset)) | |
3684 : append(new UnsafePrefetchRead (args->at(obj_arg_index), offset)); | |
3685 compilation()->set_has_unsafe_access(true); | |
3686 } | |
3687 return InlineUnsafeOps; | |
3688 } | |
3689 | |
3690 | |
3691 void GraphBuilder::append_unsafe_CAS(ciMethod* callee) { | |
3692 ValueType* result_type = as_ValueType(callee->return_type()); | |
3693 assert(result_type->is_int(), "int result"); | |
3694 Values* args = state()->pop_arguments(callee->arg_size()); | |
3695 | |
3696 // Pop off some args to speically handle, then push back | |
3697 Value newval = args->pop(); | |
3698 Value cmpval = args->pop(); | |
3699 Value offset = args->pop(); | |
3700 Value src = args->pop(); | |
3701 Value unsafe_obj = args->pop(); | |
3702 | |
3703 // Separately handle the unsafe arg. It is not needed for code | |
3704 // generation, but must be null checked | |
3705 null_check(unsafe_obj); | |
3706 | |
3707 #ifndef _LP64 | |
3708 offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); | |
3709 #endif | |
3710 | |
3711 args->push(src); | |
3712 args->push(offset); | |
3713 args->push(cmpval); | |
3714 args->push(newval); | |
3715 | |
3716 // An unsafe CAS can alias with other field accesses, but we don't | |
3717 // know which ones so mark the state as no preserved. This will | |
3718 // cause CSE to invalidate memory across it. | |
3719 bool preserves_state = false; | |
3720 Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), args, false, lock_stack(), preserves_state); | |
3721 append_split(result); | |
3722 push(result_type, result); | |
3723 compilation()->set_has_unsafe_access(true); | |
3724 } | |
3725 | |
3726 | |
3727 #ifndef PRODUCT | |
3728 void GraphBuilder::print_inline_result(ciMethod* callee, bool res) { | |
3729 const char sync_char = callee->is_synchronized() ? 's' : ' '; | |
3730 const char exception_char = callee->has_exception_handlers() ? '!' : ' '; | |
3731 const char monitors_char = callee->has_monitor_bytecodes() ? 'm' : ' '; | |
3732 tty->print(" %c%c%c ", sync_char, exception_char, monitors_char); | |
3733 for (int i = 0; i < scope()->level(); i++) tty->print(" "); | |
3734 if (res) { | |
3735 tty->print(" "); | |
3736 } else { | |
3737 tty->print("- "); | |
3738 } | |
3739 tty->print("@ %d ", bci()); | |
3740 callee->print_short_name(); | |
3741 tty->print(" (%d bytes)", callee->code_size()); | |
3742 if (_inline_bailout_msg) { | |
3743 tty->print(" %s", _inline_bailout_msg); | |
3744 } | |
3745 tty->cr(); | |
3746 | |
3747 if (res && CIPrintMethodCodes) { | |
3748 callee->print_codes(); | |
3749 } | |
3750 } | |
3751 | |
3752 | |
3753 void GraphBuilder::print_stats() { | |
3754 vmap()->print(); | |
3755 } | |
3756 #endif // PRODUCT | |
3757 | |
3758 | |
3759 void GraphBuilder::profile_call(Value recv, ciKlass* known_holder) { | |
3760 append(new ProfileCall(method(), bci(), recv, known_holder)); | |
3761 } | |
3762 | |
3763 | |
3764 void GraphBuilder::profile_invocation(ciMethod* callee) { | |
3765 if (profile_calls()) { | |
3766 // increment the interpreter_invocation_count for the inlinee | |
3767 Value m = append(new Constant(new ObjectConstant(callee))); | |
3768 append(new ProfileCounter(m, methodOopDesc::interpreter_invocation_counter_offset_in_bytes(), 1)); | |
3769 } | |
3770 } | |
3771 | |
3772 | |
3773 void GraphBuilder::profile_bci(int bci) { | |
3774 if (profile_branches()) { | |
3775 ciMethodData* md = method()->method_data(); | |
3776 if (md == NULL) { | |
3777 BAILOUT("out of memory building methodDataOop"); | |
3778 } | |
3779 ciProfileData* data = md->bci_to_data(bci); | |
3780 assert(data != NULL && data->is_JumpData(), "need JumpData for goto"); | |
3781 Value mdo = append(new Constant(new ObjectConstant(md))); | |
3782 append(new ProfileCounter(mdo, md->byte_offset_of_slot(data, JumpData::taken_offset()), 1)); | |
3783 } | |
3784 } |