comparison src/share/vm/opto/block.hpp @ 12023:d1034bd8cefc

8022284: Hide internal data structure in PhaseCFG Summary: Hide private node to block mapping using public interface Reviewed-by: kvn, roland
author adlertz
date Wed, 07 Aug 2013 17:56:19 +0200
parents 2aff40cb4703
children adb9a7d94cb5
comparison
equal deleted inserted replaced
12004:71526a36ebb4 12023:d1034bd8cefc
46 // allocation I do not need a destructor to reclaim storage. 46 // allocation I do not need a destructor to reclaim storage.
47 class Block_Array : public ResourceObj { 47 class Block_Array : public ResourceObj {
48 friend class VMStructs; 48 friend class VMStructs;
49 uint _size; // allocated size, as opposed to formal limit 49 uint _size; // allocated size, as opposed to formal limit
50 debug_only(uint _limit;) // limit to formal domain 50 debug_only(uint _limit;) // limit to formal domain
51 Arena *_arena; // Arena to allocate in
51 protected: 52 protected:
52 Block **_blocks; 53 Block **_blocks;
53 void grow( uint i ); // Grow array node to fit 54 void grow( uint i ); // Grow array node to fit
54 55
55 public: 56 public:
56 Arena *_arena; // Arena to allocate in
57
58 Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) { 57 Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) {
59 debug_only(_limit=0); 58 debug_only(_limit=0);
60 _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize ); 59 _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize );
61 for( int i = 0; i < OptoBlockListSize; i++ ) { 60 for( int i = 0; i < OptoBlockListSize; i++ ) {
62 _blocks[i] = NULL; 61 _blocks[i] = NULL;
75 class Block_List : public Block_Array { 74 class Block_List : public Block_Array {
76 friend class VMStructs; 75 friend class VMStructs;
77 public: 76 public:
78 uint _cnt; 77 uint _cnt;
79 Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {} 78 Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {}
80 void push( Block *b ) { map(_cnt++,b); } 79 void push( Block *b ) { map(_cnt++,b); }
81 Block *pop() { return _blocks[--_cnt]; } 80 Block *pop() { return _blocks[--_cnt]; }
82 Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;} 81 Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;}
83 void remove( uint i ); 82 void remove( uint i );
84 void insert( uint i, Block *n ); 83 void insert( uint i, Block *n );
85 uint size() const { return _cnt; } 84 uint size() const { return _cnt; }
282 void find_remove( const Node *n ); 281 void find_remove( const Node *n );
283 282
284 // helper function that adds caller save registers to MachProjNode 283 // helper function that adds caller save registers to MachProjNode
285 void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe); 284 void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
286 // Schedule a call next in the block 285 // Schedule a call next in the block
287 uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call); 286 uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
288 287
289 // Perform basic-block local scheduling 288 // Perform basic-block local scheduling
290 Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot); 289 Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
291 void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ); 290 void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg);
292 void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs); 291 void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg);
293 bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call); 292 bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
294 // Cleanup if any code lands between a Call and his Catch 293 // Cleanup if any code lands between a Call and his Catch
295 void call_catch_cleanup(Block_Array &bbs, Compile *C); 294 void call_catch_cleanup(PhaseCFG* cfg, Compile *C);
296 // Detect implicit-null-check opportunities. Basically, find NULL checks 295 // Detect implicit-null-check opportunities. Basically, find NULL checks
297 // with suitable memory ops nearby. Use the memory op to do the NULL check. 296 // with suitable memory ops nearby. Use the memory op to do the NULL check.
298 // I can generate a memory op if there is not one nearby. 297 // I can generate a memory op if there is not one nearby.
299 void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons); 298 void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons);
300 299
329 // Examine block's code shape to predict if it is not commonly executed. 328 // Examine block's code shape to predict if it is not commonly executed.
330 bool has_uncommon_code() const; 329 bool has_uncommon_code() const;
331 330
332 // Use frequency calculations and code shape to predict if the block 331 // Use frequency calculations and code shape to predict if the block
333 // is uncommon. 332 // is uncommon.
334 bool is_uncommon( Block_Array &bbs ) const; 333 bool is_uncommon(PhaseCFG* cfg) const;
335 334
336 #ifndef PRODUCT 335 #ifndef PRODUCT
337 // Debugging print of basic block 336 // Debugging print of basic block
338 void dump_bidx(const Block* orig, outputStream* st = tty) const; 337 void dump_bidx(const Block* orig, outputStream* st = tty) const;
339 void dump_pred(const Block_Array *bbs, Block* orig, outputStream* st = tty) const; 338 void dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st = tty) const;
340 void dump_head( const Block_Array *bbs, outputStream* st = tty ) const; 339 void dump_head(const PhaseCFG* cfg, outputStream* st = tty) const;
341 void dump() const; 340 void dump() const;
342 void dump( const Block_Array *bbs ) const; 341 void dump(const PhaseCFG* cfg) const;
343 #endif 342 #endif
344 }; 343 };
345 344
346 345
347 //------------------------------PhaseCFG--------------------------------------- 346 //------------------------------PhaseCFG---------------------------------------
348 // Build an array of Basic Block pointers, one per Node. 347 // Build an array of Basic Block pointers, one per Node.
349 class PhaseCFG : public Phase { 348 class PhaseCFG : public Phase {
350 friend class VMStructs; 349 friend class VMStructs;
351 private: 350 private:
351 // Arena for the blocks to be stored in
352 Arena* _block_arena;
353
354 // Map nodes to owning basic block
355 Block_Array _node_to_block_mapping;
356
352 // Build a proper looking cfg. Return count of basic blocks 357 // Build a proper looking cfg. Return count of basic blocks
353 uint build_cfg(); 358 uint build_cfg();
354 359
355 // Perform DFS search. 360 // Perform DFS search.
356 // Setup 'vertex' as DFS to vertex mapping. 361 // Setup 'vertex' as DFS to vertex mapping.
369 // I'll need a few machine-specific GotoNodes. Clone from this one. 374 // I'll need a few machine-specific GotoNodes. Clone from this one.
370 MachNode *_goto; 375 MachNode *_goto;
371 376
372 Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false); 377 Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false);
373 void verify_anti_dependences(Block* LCA, Node* load) { 378 void verify_anti_dependences(Block* LCA, Node* load) {
374 assert(LCA == _bbs[load->_idx], "should already be scheduled"); 379 assert(LCA == get_block_for_node(load), "should already be scheduled");
375 insert_anti_dependences(LCA, load, true); 380 insert_anti_dependences(LCA, load, true);
376 } 381 }
377 382
378 public: 383 public:
379 PhaseCFG( Arena *a, RootNode *r, Matcher &m ); 384 PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher);
380 385
381 uint _num_blocks; // Count of basic blocks 386 uint _num_blocks; // Count of basic blocks
382 Block_List _blocks; // List of basic blocks 387 Block_List _blocks; // List of basic blocks
383 RootNode *_root; // Root of whole program 388 RootNode *_root; // Root of whole program
384 Block_Array _bbs; // Map Nodes to owning Basic Block
385 Block *_broot; // Basic block of root 389 Block *_broot; // Basic block of root
386 uint _rpo_ctr; 390 uint _rpo_ctr;
387 CFGLoop* _root_loop; 391 CFGLoop* _root_loop;
388 float _outer_loop_freq; // Outmost loop frequency 392 float _outer_loop_freq; // Outmost loop frequency
389 393
394
395 // set which block this node should reside in
396 void map_node_to_block(const Node* node, Block* block) {
397 _node_to_block_mapping.map(node->_idx, block);
398 }
399
400 // removes the mapping from a node to a block
401 void unmap_node_from_block(const Node* node) {
402 _node_to_block_mapping.map(node->_idx, NULL);
403 }
404
405 // get the block in which this node resides
406 Block* get_block_for_node(const Node* node) const {
407 return _node_to_block_mapping[node->_idx];
408 }
409
410 // does this node reside in a block; return true
411 bool has_block(const Node* node) const {
412 return (_node_to_block_mapping.lookup(node->_idx) != NULL);
413 }
414
390 // Per node latency estimation, valid only during GCM 415 // Per node latency estimation, valid only during GCM
391 GrowableArray<uint> *_node_latency; 416 GrowableArray<uint> *_node_latency;
392 417
393 #ifndef PRODUCT 418 #ifndef PRODUCT
394 bool _trace_opto_pipelining; // tracing flag 419 bool _trace_opto_pipelining; // tracing flag
403 428
404 // Estimate block frequencies based on IfNode probabilities 429 // Estimate block frequencies based on IfNode probabilities
405 void Estimate_Block_Frequency(); 430 void Estimate_Block_Frequency();
406 431
407 // Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific 432 // Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific
408 // basic blocks; i.e. _bbs now maps _idx for all Nodes to some Block. 433 // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block.
409 void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list ); 434 void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list );
410 435
411 // Compute the (backwards) latency of a node from the uses 436 // Compute the (backwards) latency of a node from the uses
412 void latency_from_uses(Node *n); 437 void latency_from_uses(Node *n);
413 438
452 CFGLoop* create_loop_tree(); 477 CFGLoop* create_loop_tree();
453 478
454 // Insert a node into a block, and update the _bbs 479 // Insert a node into a block, and update the _bbs
455 void insert( Block *b, uint idx, Node *n ) { 480 void insert( Block *b, uint idx, Node *n ) {
456 b->_nodes.insert( idx, n ); 481 b->_nodes.insert( idx, n );
457 _bbs.map( n->_idx, b ); 482 map_node_to_block(n, b);
458 } 483 }
459 484
460 #ifndef PRODUCT 485 #ifndef PRODUCT
461 bool trace_opto_pipelining() const { return _trace_opto_pipelining; } 486 bool trace_opto_pipelining() const { return _trace_opto_pipelining; }
462 487
541 _parent(NULL), 566 _parent(NULL),
542 _sibling(NULL), 567 _sibling(NULL),
543 _child(NULL), 568 _child(NULL),
544 _exit_prob(1.0f) {} 569 _exit_prob(1.0f) {}
545 CFGLoop* parent() { return _parent; } 570 CFGLoop* parent() { return _parent; }
546 void push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk); 571 void push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg);
547 void add_member(CFGElement *s) { _members.push(s); } 572 void add_member(CFGElement *s) { _members.push(s); }
548 void add_nested_loop(CFGLoop* cl); 573 void add_nested_loop(CFGLoop* cl);
549 Block* head() { 574 Block* head() {
550 assert(_members.at(0)->is_block(), "head must be a block"); 575 assert(_members.at(0)->is_block(), "head must be a block");
551 Block* hd = _members.at(0)->as_Block(); 576 Block* hd = _members.at(0)->as_Block();