Mercurial > hg > graal-jvmci-8
annotate src/share/vm/opto/compile.hpp @ 851:fc4be448891f
6851742: (EA) allocation elimination doesn't work with UseG1GC
Summary: Fix eliminate_card_mark() to eliminate G1 pre/post barriers.
Reviewed-by: never
author | kvn |
---|---|
date | Thu, 16 Jul 2009 14:10:42 -0700 |
parents | 72c5366e5d86 |
children | ea3f9723b5cf |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 class Block; | |
26 class Bundle; | |
27 class C2Compiler; | |
28 class CallGenerator; | |
29 class ConnectionGraph; | |
30 class InlineTree; | |
31 class Int_Array; | |
32 class Matcher; | |
33 class MachNode; | |
38
b789bcaf2dd9
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
0
diff
changeset
|
34 class MachSafePointNode; |
0 | 35 class Node; |
36 class Node_Array; | |
37 class Node_Notes; | |
38 class OptoReg; | |
39 class PhaseCFG; | |
40 class PhaseGVN; | |
41 class PhaseRegAlloc; | |
42 class PhaseCCP; | |
43 class PhaseCCP_DCE; | |
44 class RootNode; | |
45 class relocInfo; | |
46 class Scope; | |
47 class StartNode; | |
48 class SafePointNode; | |
49 class JVMState; | |
50 class TypeData; | |
51 class TypePtr; | |
52 class TypeFunc; | |
53 class Unique_Node_List; | |
54 class nmethod; | |
55 class WarmCallInfo; | |
56 | |
57 //------------------------------Compile---------------------------------------- | |
58 // This class defines a top-level Compiler invocation. | |
59 | |
60 class Compile : public Phase { | |
61 public: | |
62 // Fixed alias indexes. (See also MergeMemNode.) | |
63 enum { | |
64 AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value) | |
65 AliasIdxBot = 2, // pseudo-index, aliases to everything | |
66 AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM | |
67 }; | |
68 | |
69 // Variant of TraceTime(NULL, &_t_accumulator, TimeCompiler); | |
70 // Integrated with logging. If logging is turned on, and dolog is true, | |
71 // then brackets are put into the log, with time stamps and node counts. | |
72 // (The time collection itself is always conditionalized on TimeCompiler.) | |
73 class TracePhase : public TraceTime { | |
74 private: | |
75 Compile* C; | |
76 CompileLog* _log; | |
77 public: | |
78 TracePhase(const char* name, elapsedTimer* accumulator, bool dolog); | |
79 ~TracePhase(); | |
80 }; | |
81 | |
82 // Information per category of alias (memory slice) | |
83 class AliasType { | |
84 private: | |
85 friend class Compile; | |
86 | |
87 int _index; // unique index, used with MergeMemNode | |
88 const TypePtr* _adr_type; // normalized address type | |
89 ciField* _field; // relevant instance field, or null if none | |
90 bool _is_rewritable; // false if the memory is write-once only | |
91 int _general_index; // if this is type is an instance, the general | |
92 // type that this is an instance of | |
93 | |
94 void Init(int i, const TypePtr* at); | |
95 | |
96 public: | |
97 int index() const { return _index; } | |
98 const TypePtr* adr_type() const { return _adr_type; } | |
99 ciField* field() const { return _field; } | |
100 bool is_rewritable() const { return _is_rewritable; } | |
101 bool is_volatile() const { return (_field ? _field->is_volatile() : false); } | |
102 int general_index() const { return (_general_index != 0) ? _general_index : _index; } | |
103 | |
104 void set_rewritable(bool z) { _is_rewritable = z; } | |
105 void set_field(ciField* f) { | |
106 assert(!_field,""); | |
107 _field = f; | |
108 if (f->is_final()) _is_rewritable = false; | |
109 } | |
110 | |
111 void print_on(outputStream* st) PRODUCT_RETURN; | |
112 }; | |
113 | |
114 enum { | |
115 logAliasCacheSize = 6, | |
116 AliasCacheSize = (1<<logAliasCacheSize) | |
117 }; | |
118 struct AliasCacheEntry { const TypePtr* _adr_type; int _index; }; // simple duple type | |
119 enum { | |
120 trapHistLength = methodDataOopDesc::_trap_hist_limit | |
121 }; | |
122 | |
123 private: | |
124 // Fixed parameters to this compilation. | |
125 const int _compile_id; | |
126 const bool _save_argument_registers; // save/restore arg regs for trampolines | |
127 const bool _subsume_loads; // Load can be matched as part of a larger op. | |
38
b789bcaf2dd9
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
0
diff
changeset
|
128 const bool _do_escape_analysis; // Do escape analysis. |
0 | 129 ciMethod* _method; // The method being compiled. |
130 int _entry_bci; // entry bci for osr methods. | |
131 const TypeFunc* _tf; // My kind of signature | |
132 InlineTree* _ilt; // Ditto (temporary). | |
133 address _stub_function; // VM entry for stub being compiled, or NULL | |
134 const char* _stub_name; // Name of stub or adapter being compiled, or NULL | |
135 address _stub_entry_point; // Compile code entry for generated stub, or NULL | |
136 | |
137 // Control of this compilation. | |
138 int _num_loop_opts; // Number of iterations for doing loop optimiztions | |
139 int _max_inline_size; // Max inline size for this compilation | |
140 int _freq_inline_size; // Max hot method inline size for this compilation | |
141 int _fixed_slots; // count of frame slots not allocated by the register | |
142 // allocator i.e. locks, original deopt pc, etc. | |
143 // For deopt | |
144 int _orig_pc_slot; | |
145 int _orig_pc_slot_offset_in_bytes; | |
146 | |
147 int _major_progress; // Count of something big happening | |
148 bool _deopt_happens; // TRUE if de-optimization CAN happen | |
149 bool _has_loops; // True if the method _may_ have some loops | |
150 bool _has_split_ifs; // True if the method _may_ have some split-if | |
151 bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores. | |
152 uint _trap_hist[trapHistLength]; // Cumulative traps | |
153 bool _trap_can_recompile; // Have we emitted a recompiling trap? | |
154 uint _decompile_count; // Cumulative decompilation counts. | |
155 bool _do_inlining; // True if we intend to do inlining | |
156 bool _do_scheduling; // True if we intend to do scheduling | |
418 | 157 bool _do_freq_based_layout; // True if we intend to do frequency based block layout |
0 | 158 bool _do_count_invocations; // True if we generate code to count invocations |
159 bool _do_method_data_update; // True if we generate code to update methodDataOops | |
160 int _AliasLevel; // Locally-adjusted version of AliasLevel flag. | |
161 bool _print_assembly; // True if we should dump assembly code for this compilation | |
162 #ifndef PRODUCT | |
163 bool _trace_opto_output; | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
164 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing |
0 | 165 #endif |
166 | |
167 // Compilation environment. | |
168 Arena _comp_arena; // Arena with lifetime equivalent to Compile | |
169 ciEnv* _env; // CI interface | |
170 CompileLog* _log; // from CompilerThread | |
171 const char* _failure_reason; // for record_failure/failing pattern | |
172 GrowableArray<CallGenerator*>* _intrinsics; // List of intrinsics. | |
173 GrowableArray<Node*>* _macro_nodes; // List of nodes which need to be expanded before matching. | |
174 ConnectionGraph* _congraph; | |
175 #ifndef PRODUCT | |
176 IdealGraphPrinter* _printer; | |
177 #endif | |
178 | |
179 // Node management | |
180 uint _unique; // Counter for unique Node indices | |
181 debug_only(static int _debug_idx;) // Monotonic counter (not reset), use -XX:BreakAtNode=<idx> | |
182 Arena _node_arena; // Arena for new-space Nodes | |
183 Arena _old_arena; // Arena for old-space Nodes, lifetime during xform | |
184 RootNode* _root; // Unique root of compilation, or NULL after bail-out. | |
185 Node* _top; // Unique top node. (Reset by various phases.) | |
186 | |
187 Node* _immutable_memory; // Initial memory state | |
188 | |
189 Node* _recent_alloc_obj; | |
190 Node* _recent_alloc_ctl; | |
191 | |
192 // Blocked array of debugging and profiling information, | |
193 // tracked per node. | |
194 enum { _log2_node_notes_block_size = 8, | |
195 _node_notes_block_size = (1<<_log2_node_notes_block_size) | |
196 }; | |
197 GrowableArray<Node_Notes*>* _node_note_array; | |
198 Node_Notes* _default_node_notes; // default notes for new nodes | |
199 | |
200 // After parsing and every bulk phase we hang onto the Root instruction. | |
201 // The RootNode instruction is where the whole program begins. It produces | |
202 // the initial Control and BOTTOM for everybody else. | |
203 | |
204 // Type management | |
205 Arena _Compile_types; // Arena for all types | |
206 Arena* _type_arena; // Alias for _Compile_types except in Initialize_shared() | |
207 Dict* _type_dict; // Intern table | |
208 void* _type_hwm; // Last allocation (see Type::operator new/delete) | |
209 size_t _type_last_size; // Last allocation size (see Type::operator new/delete) | |
210 ciMethod* _last_tf_m; // Cache for | |
211 const TypeFunc* _last_tf; // TypeFunc::make | |
212 AliasType** _alias_types; // List of alias types seen so far. | |
213 int _num_alias_types; // Logical length of _alias_types | |
214 int _max_alias_types; // Physical length of _alias_types | |
215 AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking | |
216 | |
217 // Parsing, optimization | |
218 PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN | |
219 Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN | |
220 WarmCallInfo* _warm_calls; // Sorted work-list for heat-based inlining. | |
221 | |
222 // Matching, CFG layout, allocation, code generation | |
223 PhaseCFG* _cfg; // Results of CFG finding | |
224 bool _select_24_bit_instr; // We selected an instruction with a 24-bit result | |
225 bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results | |
226 bool _has_java_calls; // True if the method has java calls | |
227 Matcher* _matcher; // Engine to map ideal to machine instructions | |
228 PhaseRegAlloc* _regalloc; // Results of register allocation. | |
229 int _frame_slots; // Size of total frame in stack slots | |
230 CodeOffsets _code_offsets; // Offsets into the code for various interesting entries | |
231 RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout) | |
232 Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin | |
233 void* _indexSet_free_block_list; // free list of IndexSet bit blocks | |
234 | |
235 uint _node_bundling_limit; | |
236 Bundle* _node_bundling_base; // Information for instruction bundling | |
237 | |
238 // Instruction bits passed off to the VM | |
239 int _method_size; // Size of nmethod code segment in bytes | |
240 CodeBuffer _code_buffer; // Where the code is assembled | |
241 int _first_block_size; // Size of unvalidated entry point code / OSR poison code | |
242 ExceptionHandlerTable _handler_table; // Table of native-code exception handlers | |
243 ImplicitExceptionTable _inc_table; // Table of implicit null checks in native code | |
244 OopMapSet* _oop_map_set; // Table of oop maps (one for each safepoint location) | |
245 static int _CompiledZap_count; // counter compared against CompileZap[First/Last] | |
246 BufferBlob* _scratch_buffer_blob; // For temporary code buffers. | |
247 relocInfo* _scratch_locs_memory; // For temporary code buffers. | |
248 | |
249 public: | |
250 // Accessors | |
251 | |
252 // The Compile instance currently active in this (compiler) thread. | |
253 static Compile* current() { | |
254 return (Compile*) ciEnv::current()->compiler_data(); | |
255 } | |
256 | |
257 // ID for this compilation. Useful for setting breakpoints in the debugger. | |
258 int compile_id() const { return _compile_id; } | |
259 | |
260 // Does this compilation allow instructions to subsume loads? User | |
261 // instructions that subsume a load may result in an unschedulable | |
262 // instruction sequence. | |
263 bool subsume_loads() const { return _subsume_loads; } | |
38
b789bcaf2dd9
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
0
diff
changeset
|
264 // Do escape analysis. |
b789bcaf2dd9
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
0
diff
changeset
|
265 bool do_escape_analysis() const { return _do_escape_analysis; } |
0 | 266 bool save_argument_registers() const { return _save_argument_registers; } |
267 | |
268 | |
269 // Other fixed compilation parameters. | |
270 ciMethod* method() const { return _method; } | |
271 int entry_bci() const { return _entry_bci; } | |
272 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; } | |
273 bool is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); } | |
274 const TypeFunc* tf() const { assert(_tf!=NULL, ""); return _tf; } | |
275 void init_tf(const TypeFunc* tf) { assert(_tf==NULL, ""); _tf = tf; } | |
276 InlineTree* ilt() const { return _ilt; } | |
277 address stub_function() const { return _stub_function; } | |
278 const char* stub_name() const { return _stub_name; } | |
279 address stub_entry_point() const { return _stub_entry_point; } | |
280 | |
281 // Control of this compilation. | |
282 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; } | |
283 void set_fixed_slots(int n) { _fixed_slots = n; } | |
284 int major_progress() const { return _major_progress; } | |
285 void set_major_progress() { _major_progress++; } | |
286 void clear_major_progress() { _major_progress = 0; } | |
287 int num_loop_opts() const { return _num_loop_opts; } | |
288 void set_num_loop_opts(int n) { _num_loop_opts = n; } | |
289 int max_inline_size() const { return _max_inline_size; } | |
290 void set_freq_inline_size(int n) { _freq_inline_size = n; } | |
291 int freq_inline_size() const { return _freq_inline_size; } | |
292 void set_max_inline_size(int n) { _max_inline_size = n; } | |
293 bool deopt_happens() const { return _deopt_happens; } | |
294 bool has_loops() const { return _has_loops; } | |
295 void set_has_loops(bool z) { _has_loops = z; } | |
296 bool has_split_ifs() const { return _has_split_ifs; } | |
297 void set_has_split_ifs(bool z) { _has_split_ifs = z; } | |
298 bool has_unsafe_access() const { return _has_unsafe_access; } | |
299 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } | |
300 void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; } | |
301 uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; } | |
302 bool trap_can_recompile() const { return _trap_can_recompile; } | |
303 void set_trap_can_recompile(bool z) { _trap_can_recompile = z; } | |
304 uint decompile_count() const { return _decompile_count; } | |
305 void set_decompile_count(uint c) { _decompile_count = c; } | |
306 bool allow_range_check_smearing() const; | |
307 bool do_inlining() const { return _do_inlining; } | |
308 void set_do_inlining(bool z) { _do_inlining = z; } | |
309 bool do_scheduling() const { return _do_scheduling; } | |
310 void set_do_scheduling(bool z) { _do_scheduling = z; } | |
418 | 311 bool do_freq_based_layout() const{ return _do_freq_based_layout; } |
312 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; } | |
0 | 313 bool do_count_invocations() const{ return _do_count_invocations; } |
314 void set_do_count_invocations(bool z){ _do_count_invocations = z; } | |
315 bool do_method_data_update() const { return _do_method_data_update; } | |
316 void set_do_method_data_update(bool z) { _do_method_data_update = z; } | |
317 int AliasLevel() const { return _AliasLevel; } | |
318 bool print_assembly() const { return _print_assembly; } | |
319 void set_print_assembly(bool z) { _print_assembly = z; } | |
320 // check the CompilerOracle for special behaviours for this compile | |
321 bool method_has_option(const char * option) { | |
322 return method() != NULL && method()->has_option(option); | |
323 } | |
324 #ifndef PRODUCT | |
325 bool trace_opto_output() const { return _trace_opto_output; } | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
326 bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
196
diff
changeset
|
327 void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; } |
0 | 328 #endif |
329 | |
330 void begin_method() { | |
331 #ifndef PRODUCT | |
332 if (_printer) _printer->begin_method(this); | |
333 #endif | |
334 } | |
335 void print_method(const char * name, int level = 1) { | |
336 #ifndef PRODUCT | |
337 if (_printer) _printer->print_method(this, name, level); | |
338 #endif | |
339 } | |
340 void end_method() { | |
341 #ifndef PRODUCT | |
342 if (_printer) _printer->end_method(); | |
343 #endif | |
344 } | |
345 | |
346 int macro_count() { return _macro_nodes->length(); } | |
347 Node* macro_node(int idx) { return _macro_nodes->at(idx); } | |
348 ConnectionGraph* congraph() { return _congraph;} | |
349 void add_macro_node(Node * n) { | |
350 //assert(n->is_macro(), "must be a macro node"); | |
351 assert(!_macro_nodes->contains(n), " duplicate entry in expand list"); | |
352 _macro_nodes->append(n); | |
353 } | |
354 void remove_macro_node(Node * n) { | |
355 // this function may be called twice for a node so check | |
356 // that the node is in the array before attempting to remove it | |
357 if (_macro_nodes->contains(n)) | |
358 _macro_nodes->remove(n); | |
359 } | |
360 | |
361 // Compilation environment. | |
362 Arena* comp_arena() { return &_comp_arena; } | |
363 ciEnv* env() const { return _env; } | |
364 CompileLog* log() const { return _log; } | |
365 bool failing() const { return _env->failing() || _failure_reason != NULL; } | |
366 const char* failure_reason() { return _failure_reason; } | |
367 bool failure_reason_is(const char* r) { return (r==_failure_reason) || (r!=NULL && _failure_reason!=NULL && strcmp(r, _failure_reason)==0); } | |
368 | |
369 void record_failure(const char* reason); | |
370 void record_method_not_compilable(const char* reason, bool all_tiers = false) { | |
371 // All bailouts cover "all_tiers" when TieredCompilation is off. | |
372 if (!TieredCompilation) all_tiers = true; | |
373 env()->record_method_not_compilable(reason, all_tiers); | |
374 // Record failure reason. | |
375 record_failure(reason); | |
376 } | |
377 void record_method_not_compilable_all_tiers(const char* reason) { | |
378 record_method_not_compilable(reason, true); | |
379 } | |
380 bool check_node_count(uint margin, const char* reason) { | |
381 if (unique() + margin > (uint)MaxNodeLimit) { | |
382 record_method_not_compilable(reason); | |
383 return true; | |
384 } else { | |
385 return false; | |
386 } | |
387 } | |
388 | |
389 // Node management | |
390 uint unique() const { return _unique; } | |
391 uint next_unique() { return _unique++; } | |
392 void set_unique(uint i) { _unique = i; } | |
393 static int debug_idx() { return debug_only(_debug_idx)+0; } | |
394 static void set_debug_idx(int i) { debug_only(_debug_idx = i); } | |
395 Arena* node_arena() { return &_node_arena; } | |
396 Arena* old_arena() { return &_old_arena; } | |
397 RootNode* root() const { return _root; } | |
398 void set_root(RootNode* r) { _root = r; } | |
399 StartNode* start() const; // (Derived from root.) | |
400 void init_start(StartNode* s); | |
401 Node* immutable_memory(); | |
402 | |
403 Node* recent_alloc_ctl() const { return _recent_alloc_ctl; } | |
404 Node* recent_alloc_obj() const { return _recent_alloc_obj; } | |
405 void set_recent_alloc(Node* ctl, Node* obj) { | |
406 _recent_alloc_ctl = ctl; | |
407 _recent_alloc_obj = obj; | |
408 } | |
409 | |
410 // Handy undefined Node | |
411 Node* top() const { return _top; } | |
412 | |
413 // these are used by guys who need to know about creation and transformation of top: | |
414 Node* cached_top_node() { return _top; } | |
415 void set_cached_top_node(Node* tn); | |
416 | |
417 GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; } | |
418 void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; } | |
419 Node_Notes* default_node_notes() const { return _default_node_notes; } | |
420 void set_default_node_notes(Node_Notes* n) { _default_node_notes = n; } | |
421 | |
422 Node_Notes* node_notes_at(int idx) { | |
423 return locate_node_notes(_node_note_array, idx, false); | |
424 } | |
425 inline bool set_node_notes_at(int idx, Node_Notes* value); | |
426 | |
427 // Copy notes from source to dest, if they exist. | |
428 // Overwrite dest only if source provides something. | |
429 // Return true if information was moved. | |
430 bool copy_node_notes_to(Node* dest, Node* source); | |
431 | |
432 // Workhorse function to sort out the blocked Node_Notes array: | |
433 inline Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr, | |
434 int idx, bool can_grow = false); | |
435 | |
436 void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by); | |
437 | |
438 // Type management | |
439 Arena* type_arena() { return _type_arena; } | |
440 Dict* type_dict() { return _type_dict; } | |
441 void* type_hwm() { return _type_hwm; } | |
442 size_t type_last_size() { return _type_last_size; } | |
443 int num_alias_types() { return _num_alias_types; } | |
444 | |
445 void init_type_arena() { _type_arena = &_Compile_types; } | |
446 void set_type_arena(Arena* a) { _type_arena = a; } | |
447 void set_type_dict(Dict* d) { _type_dict = d; } | |
448 void set_type_hwm(void* p) { _type_hwm = p; } | |
449 void set_type_last_size(size_t sz) { _type_last_size = sz; } | |
450 | |
451 const TypeFunc* last_tf(ciMethod* m) { | |
452 return (m == _last_tf_m) ? _last_tf : NULL; | |
453 } | |
454 void set_last_tf(ciMethod* m, const TypeFunc* tf) { | |
455 assert(m != NULL || tf == NULL, ""); | |
456 _last_tf_m = m; | |
457 _last_tf = tf; | |
458 } | |
459 | |
460 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; } | |
461 AliasType* alias_type(const TypePtr* adr_type) { return find_alias_type(adr_type, false); } | |
462 bool have_alias_type(const TypePtr* adr_type); | |
463 AliasType* alias_type(ciField* field); | |
464 | |
465 int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); } | |
466 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); } | |
467 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); } | |
468 | |
469 // Building nodes | |
470 void rethrow_exceptions(JVMState* jvms); | |
471 void return_values(JVMState* jvms); | |
472 JVMState* build_start_state(StartNode* start, const TypeFunc* tf); | |
473 | |
474 // Decide how to build a call. | |
475 // The profile factor is a discount to apply to this site's interp. profile. | |
476 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float profile_factor); | |
477 | |
478 // Report if there were too many traps at a current method and bci. | |
479 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded. | |
480 // If there is no MDO at all, report no trap unless told to assume it. | |
481 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason); | |
482 // This version, unspecific to a particular bci, asks if | |
483 // PerMethodTrapLimit was exceeded for all inlined methods seen so far. | |
484 bool too_many_traps(Deoptimization::DeoptReason reason, | |
485 // Privately used parameter for logging: | |
486 ciMethodData* logmd = NULL); | |
487 // Report if there were too many recompiles at a method and bci. | |
488 bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason); | |
489 | |
490 // Parsing, optimization | |
491 PhaseGVN* initial_gvn() { return _initial_gvn; } | |
492 Unique_Node_List* for_igvn() { return _for_igvn; } | |
493 inline void record_for_igvn(Node* n); // Body is after class Unique_Node_List. | |
494 void set_initial_gvn(PhaseGVN *gvn) { _initial_gvn = gvn; } | |
495 void set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; } | |
496 | |
497 void identify_useful_nodes(Unique_Node_List &useful); | |
498 void remove_useless_nodes (Unique_Node_List &useful); | |
499 | |
500 WarmCallInfo* warm_calls() const { return _warm_calls; } | |
501 void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; } | |
502 WarmCallInfo* pop_warm_call(); | |
503 | |
504 // Matching, CFG layout, allocation, code generation | |
505 PhaseCFG* cfg() { return _cfg; } | |
506 bool select_24_bit_instr() const { return _select_24_bit_instr; } | |
507 bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; } | |
508 bool has_java_calls() const { return _has_java_calls; } | |
509 Matcher* matcher() { return _matcher; } | |
510 PhaseRegAlloc* regalloc() { return _regalloc; } | |
511 int frame_slots() const { return _frame_slots; } | |
512 int frame_size_in_words() const; // frame_slots in units of the polymorphic 'words' | |
513 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; } | |
514 Arena* indexSet_arena() { return _indexSet_arena; } | |
515 void* indexSet_free_block_list() { return _indexSet_free_block_list; } | |
516 uint node_bundling_limit() { return _node_bundling_limit; } | |
517 Bundle* node_bundling_base() { return _node_bundling_base; } | |
518 void set_node_bundling_limit(uint n) { _node_bundling_limit = n; } | |
519 void set_node_bundling_base(Bundle* b) { _node_bundling_base = b; } | |
520 bool starts_bundle(const Node *n) const; | |
521 bool need_stack_bang(int frame_size_in_bytes) const; | |
522 bool need_register_stack_bang() const; | |
523 | |
524 void set_matcher(Matcher* m) { _matcher = m; } | |
525 //void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; } | |
526 void set_indexSet_arena(Arena* a) { _indexSet_arena = a; } | |
527 void set_indexSet_free_block_list(void* p) { _indexSet_free_block_list = p; } | |
528 | |
529 // Remember if this compilation changes hardware mode to 24-bit precision | |
530 void set_24_bit_selection_and_mode(bool selection, bool mode) { | |
531 _select_24_bit_instr = selection; | |
532 _in_24_bit_fp_mode = mode; | |
533 } | |
534 | |
535 void set_has_java_calls(bool z) { _has_java_calls = z; } | |
536 | |
537 // Instruction bits passed off to the VM | |
538 int code_size() { return _method_size; } | |
539 CodeBuffer* code_buffer() { return &_code_buffer; } | |
540 int first_block_size() { return _first_block_size; } | |
541 void set_frame_complete(int off) { _code_offsets.set_value(CodeOffsets::Frame_Complete, off); } | |
542 ExceptionHandlerTable* handler_table() { return &_handler_table; } | |
543 ImplicitExceptionTable* inc_table() { return &_inc_table; } | |
544 OopMapSet* oop_map_set() { return _oop_map_set; } | |
545 DebugInformationRecorder* debug_info() { return env()->debug_info(); } | |
546 Dependencies* dependencies() { return env()->dependencies(); } | |
547 static int CompiledZap_count() { return _CompiledZap_count; } | |
548 BufferBlob* scratch_buffer_blob() { return _scratch_buffer_blob; } | |
549 void init_scratch_buffer_blob(); | |
550 void set_scratch_buffer_blob(BufferBlob* b) { _scratch_buffer_blob = b; } | |
551 relocInfo* scratch_locs_memory() { return _scratch_locs_memory; } | |
552 void set_scratch_locs_memory(relocInfo* b) { _scratch_locs_memory = b; } | |
553 | |
554 // emit to scratch blob, report resulting size | |
555 uint scratch_emit_size(const Node* n); | |
556 | |
557 enum ScratchBufferBlob { | |
558 MAX_inst_size = 1024, | |
559 MAX_locs_size = 128, // number of relocInfo elements | |
560 MAX_const_size = 128, | |
561 MAX_stubs_size = 128 | |
562 }; | |
563 | |
564 // Major entry point. Given a Scope, compile the associated method. | |
565 // For normal compilations, entry_bci is InvocationEntryBci. For on stack | |
566 // replacement, entry_bci indicates the bytecode for which to compile a | |
567 // continuation. | |
568 Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, | |
38
b789bcaf2dd9
6667610: (Escape Analysis) retry compilation without EA if it fails
kvn
parents:
0
diff
changeset
|
569 int entry_bci, bool subsume_loads, bool do_escape_analysis); |
0 | 570 |
571 // Second major entry point. From the TypeFunc signature, generate code | |
572 // to pass arguments from the Java calling convention to the C calling | |
573 // convention. | |
574 Compile(ciEnv* ci_env, const TypeFunc *(*gen)(), | |
575 address stub_function, const char *stub_name, | |
576 int is_fancy_jump, bool pass_tls, | |
577 bool save_arg_registers, bool return_pc); | |
578 | |
579 // From the TypeFunc signature, generate code to pass arguments | |
580 // from Compiled calling convention to Interpreter's calling convention | |
581 void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry); | |
582 | |
583 // From the TypeFunc signature, generate code to pass arguments | |
584 // from Interpreter's calling convention to Compiler's calling convention | |
585 void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf); | |
586 | |
587 // Are we compiling a method? | |
588 bool has_method() { return method() != NULL; } | |
589 | |
590 // Maybe print some information about this compile. | |
591 void print_compile_messages(); | |
592 | |
593 // Final graph reshaping, a post-pass after the regular optimizer is done. | |
594 bool final_graph_reshaping(); | |
595 | |
596 // returns true if adr is completely contained in the given alias category | |
597 bool must_alias(const TypePtr* adr, int alias_idx); | |
598 | |
599 // returns true if adr overlaps with the given alias category | |
600 bool can_alias(const TypePtr* adr, int alias_idx); | |
601 | |
602 // Driver for converting compiler's IR into machine code bits | |
603 void Output(); | |
604 | |
605 // Accessors for node bundling info. | |
606 Bundle* node_bundling(const Node *n); | |
607 bool valid_bundle_info(const Node *n); | |
608 | |
609 // Schedule and Bundle the instructions | |
610 void ScheduleAndBundle(); | |
611 | |
612 // Build OopMaps for each GC point | |
613 void BuildOopMaps(); | |
63
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
38
diff
changeset
|
614 |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
38
diff
changeset
|
615 // Append debug info for the node "local" at safepoint node "sfpt" to the |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
38
diff
changeset
|
616 // "array", May also consult and add to "objs", which describes the |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
38
diff
changeset
|
617 // scalar-replaced objects. |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
38
diff
changeset
|
618 void FillLocArray( int idx, MachSafePointNode* sfpt, |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
38
diff
changeset
|
619 Node *local, GrowableArray<ScopeValue*> *array, |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
38
diff
changeset
|
620 GrowableArray<ScopeValue*> *objs ); |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
38
diff
changeset
|
621 |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
38
diff
changeset
|
622 // If "objs" contains an ObjectValue whose id is "id", returns it, else NULL. |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
38
diff
changeset
|
623 static ObjectValue* sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id); |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
38
diff
changeset
|
624 // Requres that "objs" does not contains an ObjectValue whose id matches |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
38
diff
changeset
|
625 // that of "sv. Appends "sv". |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
38
diff
changeset
|
626 static void set_sv_for_object_node(GrowableArray<ScopeValue*> *objs, |
eac007780a58
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
kvn
parents:
38
diff
changeset
|
627 ObjectValue* sv ); |
0 | 628 |
629 // Process an OopMap Element while emitting nodes | |
630 void Process_OopMap_Node(MachNode *mach, int code_offset); | |
631 | |
632 // Write out basic block data to code buffer | |
633 void Fill_buffer(); | |
634 | |
635 // Determine which variable sized branches can be shortened | |
636 void Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size, int& const_size); | |
637 | |
638 // Compute the size of first NumberOfLoopInstrToAlign instructions | |
639 // at the head of a loop. | |
640 void compute_loop_first_inst_sizes(); | |
641 | |
642 // Compute the information for the exception tables | |
643 void FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels); | |
644 | |
645 // Stack slots that may be unused by the calling convention but must | |
646 // otherwise be preserved. On Intel this includes the return address. | |
647 // On PowerPC it includes the 4 words holding the old TOC & LR glue. | |
648 uint in_preserve_stack_slots(); | |
649 | |
650 // "Top of Stack" slots that may be unused by the calling convention but must | |
651 // otherwise be preserved. | |
652 // On Intel these are not necessary and the value can be zero. | |
653 // On Sparc this describes the words reserved for storing a register window | |
654 // when an interrupt occurs. | |
655 static uint out_preserve_stack_slots(); | |
656 | |
657 // Number of outgoing stack slots killed above the out_preserve_stack_slots | |
658 // for calls to C. Supports the var-args backing area for register parms. | |
659 uint varargs_C_out_slots_killed() const; | |
660 | |
661 // Number of Stack Slots consumed by a synchronization entry | |
662 int sync_stack_slots() const; | |
663 | |
664 // Compute the name of old_SP. See <arch>.ad for frame layout. | |
665 OptoReg::Name compute_old_SP(); | |
666 | |
667 #ifdef ENABLE_ZAP_DEAD_LOCALS | |
668 static bool is_node_getting_a_safepoint(Node*); | |
669 void Insert_zap_nodes(); | |
670 Node* call_zap_node(MachSafePointNode* n, int block_no); | |
671 #endif | |
672 | |
673 private: | |
674 // Phase control: | |
675 void Init(int aliaslevel); // Prepare for a single compilation | |
676 int Inline_Warm(); // Find more inlining work. | |
677 void Finish_Warm(); // Give up on further inlines. | |
678 void Optimize(); // Given a graph, optimize it | |
679 void Code_Gen(); // Generate code from a graph | |
680 | |
681 // Management of the AliasType table. | |
682 void grow_alias_types(); | |
683 AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type); | |
684 const TypePtr *flatten_alias_type(const TypePtr* adr_type) const; | |
685 AliasType* find_alias_type(const TypePtr* adr_type, bool no_create); | |
686 | |
687 void verify_top(Node*) const PRODUCT_RETURN; | |
688 | |
689 // Intrinsic setup. | |
690 void register_library_intrinsics(); // initializer | |
691 CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual); // constructor | |
692 int intrinsic_insertion_index(ciMethod* m, bool is_virtual); // helper | |
693 CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual); // query fn | |
694 void register_intrinsic(CallGenerator* cg); // update fn | |
695 | |
696 #ifndef PRODUCT | |
697 static juint _intrinsic_hist_count[vmIntrinsics::ID_LIMIT]; | |
698 static jubyte _intrinsic_hist_flags[vmIntrinsics::ID_LIMIT]; | |
699 #endif | |
700 | |
701 public: | |
702 | |
703 // Note: Histogram array size is about 1 Kb. | |
704 enum { // flag bits: | |
705 _intrinsic_worked = 1, // succeeded at least once | |
706 _intrinsic_failed = 2, // tried it but it failed | |
707 _intrinsic_disabled = 4, // was requested but disabled (e.g., -XX:-InlineUnsafeOps) | |
708 _intrinsic_virtual = 8, // was seen in the virtual form (rare) | |
709 _intrinsic_both = 16 // was seen in the non-virtual form (usual) | |
710 }; | |
711 // Update histogram. Return boolean if this is a first-time occurrence. | |
712 static bool gather_intrinsic_statistics(vmIntrinsics::ID id, | |
713 bool is_virtual, int flags) PRODUCT_RETURN0; | |
714 static void print_intrinsic_statistics() PRODUCT_RETURN; | |
715 | |
716 // Graph verification code | |
717 // Walk the node list, verifying that there is a one-to-one | |
718 // correspondence between Use-Def edges and Def-Use edges | |
719 // The option no_dead_code enables stronger checks that the | |
720 // graph is strongly connected from root in both directions. | |
721 void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN; | |
722 | |
723 // Print bytecodes, including the scope inlining tree | |
724 void print_codes(); | |
725 | |
726 // End-of-run dumps. | |
727 static void print_statistics() PRODUCT_RETURN; | |
728 | |
729 // Dump formatted assembly | |
730 void dump_asm(int *pcs = NULL, uint pc_limit = 0) PRODUCT_RETURN; | |
731 void dump_pc(int *pcs, int pc_limit, Node *n); | |
732 | |
733 // Verify ADLC assumptions during startup | |
734 static void adlc_verification() PRODUCT_RETURN; | |
735 | |
736 // Definitions of pd methods | |
737 static void pd_compiler2_init(); | |
738 }; |