comparison src/share/vm/opto/loopTransform.cpp @ 2468:6c97c830fb6f

Merge
author jrose
date Sat, 09 Apr 2011 21:16:12 -0700
parents 1d1603768966 3af54845df98
children ae93231c7a1f
comparison
equal deleted inserted replaced
2439:0930dc920c18 2468:6c97c830fb6f
58 // Put loop body on igvn work list 58 // Put loop body on igvn work list
59 void IdealLoopTree::record_for_igvn() { 59 void IdealLoopTree::record_for_igvn() {
60 for( uint i = 0; i < _body.size(); i++ ) { 60 for( uint i = 0; i < _body.size(); i++ ) {
61 Node *n = _body.at(i); 61 Node *n = _body.at(i);
62 _phase->_igvn._worklist.push(n); 62 _phase->_igvn._worklist.push(n);
63 }
64 }
65
66 //------------------------------compute_exact_trip_count-----------------------
67 // Compute loop exact trip count if possible. Do not recalculate trip count for
68 // split loops (pre-main-post) which have their limits and inits behind Opaque node.
69 void IdealLoopTree::compute_exact_trip_count( PhaseIdealLoop *phase ) {
70 if (!_head->as_Loop()->is_valid_counted_loop()) {
71 return;
72 }
73 CountedLoopNode* cl = _head->as_CountedLoop();
74 // Trip count may become nonexact for iteration split loops since
75 // RCE modifies limits. Note, _trip_count value is not reset since
76 // it is used to limit unrolling of main loop.
77 cl->set_nonexact_trip_count();
78
79 // Loop's test should be part of loop.
80 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
81 return; // Infinite loop
82
83 #ifdef ASSERT
84 BoolTest::mask bt = cl->loopexit()->test_trip();
85 assert(bt == BoolTest::lt || bt == BoolTest::gt ||
86 bt == BoolTest::ne, "canonical test is expected");
87 #endif
88
89 Node* init_n = cl->init_trip();
90 Node* limit_n = cl->limit();
91 if (init_n != NULL && init_n->is_Con() &&
92 limit_n != NULL && limit_n->is_Con()) {
93 // Use longs to avoid integer overflow.
94 int stride_con = cl->stride_con();
95 long init_con = cl->init_trip()->get_int();
96 long limit_con = cl->limit()->get_int();
97 int stride_m = stride_con - (stride_con > 0 ? 1 : -1);
98 long trip_count = (limit_con - init_con + stride_m)/stride_con;
99 if (trip_count > 0 && (julong)trip_count < (julong)max_juint) {
100 // Set exact trip count.
101 cl->set_exact_trip_count((uint)trip_count);
102 }
63 } 103 }
64 } 104 }
65 105
66 //------------------------------compute_profile_trip_cnt---------------------------- 106 //------------------------------compute_profile_trip_cnt----------------------------
67 // Compute loop trip count from profile data as 107 // Compute loop trip count from profile data as
299 // backedges) and then map to the new peeled iteration. This leaves 339 // backedges) and then map to the new peeled iteration. This leaves
300 // the pre-loop with only 1 user (the new peeled iteration), but the 340 // the pre-loop with only 1 user (the new peeled iteration), but the
301 // peeled-loop backedge has 2 users. 341 // peeled-loop backedge has 2 users.
302 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the 342 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
303 // extra backedge user. 343 // extra backedge user.
344 //
345 // orig
346 //
347 // stmt1
348 // |
349 // v
350 // loop predicate
351 // |
352 // v
353 // loop<----+
354 // | |
355 // stmt2 |
356 // | |
357 // v |
358 // if ^
359 // / \ |
360 // / \ |
361 // v v |
362 // false true |
363 // / \ |
364 // / ----+
365 // |
366 // v
367 // exit
368 //
369 //
370 // after clone loop
371 //
372 // stmt1
373 // |
374 // v
375 // loop predicate
376 // / \
377 // clone / \ orig
378 // / \
379 // / \
380 // v v
381 // +---->loop clone loop<----+
382 // | | | |
383 // | stmt2 clone stmt2 |
384 // | | | |
385 // | v v |
386 // ^ if clone If ^
387 // | / \ / \ |
388 // | / \ / \ |
389 // | v v v v |
390 // | true false false true |
391 // | / \ / \ |
392 // +---- \ / ----+
393 // \ /
394 // 1v v2
395 // region
396 // |
397 // v
398 // exit
399 //
400 //
401 // after peel and predicate move
402 //
403 // stmt1
404 // /
405 // /
406 // clone / orig
407 // /
408 // / +----------+
409 // / | |
410 // / loop predicate |
411 // / | |
412 // v v |
413 // TOP-->loop clone loop<----+ |
414 // | | | |
415 // stmt2 clone stmt2 | |
416 // | | | ^
417 // v v | |
418 // if clone If ^ |
419 // / \ / \ | |
420 // / \ / \ | |
421 // v v v v | |
422 // true false false true | |
423 // | \ / \ | |
424 // | \ / ----+ ^
425 // | \ / |
426 // | 1v v2 |
427 // v region |
428 // | | |
429 // | v |
430 // | exit |
431 // | |
432 // +--------------->-----------------+
433 //
434 //
435 // final graph
436 //
437 // stmt1
438 // |
439 // v
440 // stmt2 clone
441 // |
442 // v
443 // if clone
444 // / |
445 // / |
446 // v v
447 // false true
448 // | |
449 // | v
450 // | loop predicate
451 // | |
452 // | v
453 // | loop<----+
454 // | | |
455 // | stmt2 |
456 // | | |
457 // | v |
458 // v if ^
459 // | / \ |
460 // | / \ |
461 // | v v |
462 // | false true |
463 // | | \ |
464 // v v --+
465 // region
466 // |
467 // v
468 // exit
469 //
304 void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) { 470 void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
305 471
306 C->set_major_progress(); 472 C->set_major_progress();
307 // Peeling a 'main' loop in a pre/main/post situation obfuscates the 473 // Peeling a 'main' loop in a pre/main/post situation obfuscates the
308 // 'pre' loop from the main and the 'pre' can no longer have it's 474 // 'pre' loop from the main and the 'pre' can no longer have it's
313 if (TraceLoopOpts) { 479 if (TraceLoopOpts) {
314 tty->print("Peel "); 480 tty->print("Peel ");
315 loop->dump_head(); 481 loop->dump_head();
316 } 482 }
317 #endif 483 #endif
318 Node *h = loop->_head; 484 Node* head = loop->_head;
319 if (h->is_CountedLoop()) { 485 bool counted_loop = head->is_CountedLoop();
320 CountedLoopNode *cl = h->as_CountedLoop(); 486 if (counted_loop) {
487 CountedLoopNode *cl = head->as_CountedLoop();
321 assert(cl->trip_count() > 0, "peeling a fully unrolled loop"); 488 assert(cl->trip_count() > 0, "peeling a fully unrolled loop");
322 cl->set_trip_count(cl->trip_count() - 1); 489 cl->set_trip_count(cl->trip_count() - 1);
323 if (cl->is_main_loop()) { 490 if (cl->is_main_loop()) {
324 cl->set_normal_loop(); 491 cl->set_normal_loop();
325 #ifndef PRODUCT 492 #ifndef PRODUCT
328 loop->dump_head(); 495 loop->dump_head();
329 } 496 }
330 #endif 497 #endif
331 } 498 }
332 } 499 }
500 Node* entry = head->in(LoopNode::EntryControl);
333 501
334 // Step 1: Clone the loop body. The clone becomes the peeled iteration. 502 // Step 1: Clone the loop body. The clone becomes the peeled iteration.
335 // The pre-loop illegally has 2 control users (old & new loops). 503 // The pre-loop illegally has 2 control users (old & new loops).
336 clone_loop( loop, old_new, dom_depth(loop->_head) ); 504 clone_loop( loop, old_new, dom_depth(head) );
337
338 505
339 // Step 2: Make the old-loop fall-in edges point to the peeled iteration. 506 // Step 2: Make the old-loop fall-in edges point to the peeled iteration.
340 // Do this by making the old-loop fall-in edges act as if they came 507 // Do this by making the old-loop fall-in edges act as if they came
341 // around the loopback from the prior iteration (follow the old-loop 508 // around the loopback from the prior iteration (follow the old-loop
342 // backedges) and then map to the new peeled iteration. This leaves 509 // backedges) and then map to the new peeled iteration. This leaves
343 // the pre-loop with only 1 user (the new peeled iteration), but the 510 // the pre-loop with only 1 user (the new peeled iteration), but the
344 // peeled-loop backedge has 2 users. 511 // peeled-loop backedge has 2 users.
345 for (DUIterator_Fast jmax, j = loop->_head->fast_outs(jmax); j < jmax; j++) { 512 Node* new_exit_value = old_new[head->in(LoopNode::LoopBackControl)->_idx];
346 Node* old = loop->_head->fast_out(j); 513 new_exit_value = move_loop_predicates(entry, new_exit_value);
347 if( old->in(0) == loop->_head && old->req() == 3 && 514 _igvn.hash_delete(head);
348 (old->is_Loop() || old->is_Phi()) ) { 515 head->set_req(LoopNode::EntryControl, new_exit_value);
349 Node *new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx]; 516 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
350 if( !new_exit_value ) // Backedge value is ALSO loop invariant? 517 Node* old = head->fast_out(j);
518 if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) {
519 new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx];
520 if (!new_exit_value ) // Backedge value is ALSO loop invariant?
351 // Then loop body backedge value remains the same. 521 // Then loop body backedge value remains the same.
352 new_exit_value = old->in(LoopNode::LoopBackControl); 522 new_exit_value = old->in(LoopNode::LoopBackControl);
353 _igvn.hash_delete(old); 523 _igvn.hash_delete(old);
354 old->set_req(LoopNode::EntryControl, new_exit_value); 524 old->set_req(LoopNode::EntryControl, new_exit_value);
355 } 525 }
356 } 526 }
357 527
358 528
359 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the 529 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
360 // extra backedge user. 530 // extra backedge user.
361 Node *nnn = old_new[loop->_head->_idx]; 531 Node* new_head = old_new[head->_idx];
362 _igvn.hash_delete(nnn); 532 _igvn.hash_delete(new_head);
363 nnn->set_req(LoopNode::LoopBackControl, C->top()); 533 new_head->set_req(LoopNode::LoopBackControl, C->top());
364 for (DUIterator_Fast j2max, j2 = nnn->fast_outs(j2max); j2 < j2max; j2++) { 534 for (DUIterator_Fast j2max, j2 = new_head->fast_outs(j2max); j2 < j2max; j2++) {
365 Node* use = nnn->fast_out(j2); 535 Node* use = new_head->fast_out(j2);
366 if( use->in(0) == nnn && use->req() == 3 && use->is_Phi() ) { 536 if (use->in(0) == new_head && use->req() == 3 && use->is_Phi()) {
367 _igvn.hash_delete(use); 537 _igvn.hash_delete(use);
368 use->set_req(LoopNode::LoopBackControl, C->top()); 538 use->set_req(LoopNode::LoopBackControl, C->top());
369 } 539 }
370 } 540 }
371 541
372 542
373 // Step 4: Correct dom-depth info. Set to loop-head depth. 543 // Step 4: Correct dom-depth info. Set to loop-head depth.
374 int dd = dom_depth(loop->_head); 544 int dd = dom_depth(head);
375 set_idom(loop->_head, loop->_head->in(1), dd); 545 set_idom(head, head->in(1), dd);
376 for (uint j3 = 0; j3 < loop->_body.size(); j3++) { 546 for (uint j3 = 0; j3 < loop->_body.size(); j3++) {
377 Node *old = loop->_body.at(j3); 547 Node *old = loop->_body.at(j3);
378 Node *nnn = old_new[old->_idx]; 548 Node *nnn = old_new[old->_idx];
379 if (!has_ctrl(nnn)) 549 if (!has_ctrl(nnn))
380 set_idom(nnn, idom(nnn), dd-1); 550 set_idom(nnn, idom(nnn), dd-1);
381 // While we're at it, remove any SafePoints from the peeled code 551 // While we're at it, remove any SafePoints from the peeled code
382 if( old->Opcode() == Op_SafePoint ) { 552 if (old->Opcode() == Op_SafePoint) {
383 Node *nnn = old_new[old->_idx]; 553 Node *nnn = old_new[old->_idx];
384 lazy_replace(nnn,nnn->in(TypeFunc::Control)); 554 lazy_replace(nnn,nnn->in(TypeFunc::Control));
385 } 555 }
386 } 556 }
387 557
390 peeled_dom_test_elim(loop,old_new); 560 peeled_dom_test_elim(loop,old_new);
391 561
392 loop->record_for_igvn(); 562 loop->record_for_igvn();
393 } 563 }
394 564
565 #define EMPTY_LOOP_SIZE 7 // number of nodes in an empty loop
566
395 //------------------------------policy_maximally_unroll------------------------ 567 //------------------------------policy_maximally_unroll------------------------
396 // Return exact loop trip count, or 0 if not maximally unrolling 568 // Calculate exact loop trip count and return true if loop can be maximally
569 // unrolled.
397 bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const { 570 bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
398 CountedLoopNode *cl = _head->as_CountedLoop(); 571 CountedLoopNode *cl = _head->as_CountedLoop();
399 assert(cl->is_normal_loop(), ""); 572 assert(cl->is_normal_loop(), "");
400 573 if (!cl->is_valid_counted_loop())
401 Node *init_n = cl->init_trip(); 574 return false; // Malformed counted loop
402 Node *limit_n = cl->limit(); 575
403 576 if (!cl->has_exact_trip_count()) {
404 // Non-constant bounds 577 // Trip count is not exact.
405 if (init_n == NULL || !init_n->is_Con() ||
406 limit_n == NULL || !limit_n->is_Con() ||
407 // protect against stride not being a constant
408 !cl->stride_is_con()) {
409 return false; 578 return false;
410 } 579 }
411 int init = init_n->get_int(); 580
412 int limit = limit_n->get_int(); 581 uint trip_count = cl->trip_count();
413 int span = limit - init; 582 // Note, max_juint is used to indicate unknown trip count.
414 int stride = cl->stride_con(); 583 assert(trip_count > 1, "one iteration loop should be optimized out already");
415 584 assert(trip_count < max_juint, "exact trip_count should be less than max_uint.");
416 if (init >= limit || stride > span) {
417 // return a false (no maximally unroll) and the regular unroll/peel
418 // route will make a small mess which CCP will fold away.
419 return false;
420 }
421 uint trip_count = span/stride; // trip_count can be greater than 2 Gig.
422 assert( (int)trip_count*stride == span, "must divide evenly" );
423 585
424 // Real policy: if we maximally unroll, does it get too big? 586 // Real policy: if we maximally unroll, does it get too big?
425 // Allow the unrolled mess to get larger than standard loop 587 // Allow the unrolled mess to get larger than standard loop
426 // size. After all, it will no longer be a loop. 588 // size. After all, it will no longer be a loop.
427 uint body_size = _body.size(); 589 uint body_size = _body.size();
428 uint unroll_limit = (uint)LoopUnrollLimit * 4; 590 uint unroll_limit = (uint)LoopUnrollLimit * 4;
429 assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits"); 591 assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits");
430 cl->set_trip_count(trip_count);
431 if (trip_count > unroll_limit || body_size > unroll_limit) { 592 if (trip_count > unroll_limit || body_size > unroll_limit) {
593 return false;
594 }
595
596 // Take into account that after unroll conjoined heads and tails will fold,
597 // otherwise policy_unroll() may allow more unrolling than max unrolling.
598 uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count;
599 uint tst_body_size = (new_body_size - EMPTY_LOOP_SIZE) / trip_count + EMPTY_LOOP_SIZE;
600 if (body_size != tst_body_size) // Check for int overflow
601 return false;
602 if (new_body_size > unroll_limit ||
603 // Unrolling can result in a large amount of node construction
604 new_body_size >= MaxNodeLimit - phase->C->unique()) {
432 return false; 605 return false;
433 } 606 }
434 607
435 // Currently we don't have policy to optimize one iteration loops. 608 // Currently we don't have policy to optimize one iteration loops.
436 // Maximally unrolling transformation is used for that: 609 // Maximally unrolling transformation is used for that:
437 // it is peeled and the original loop become non reachable (dead). 610 // it is peeled and the original loop become non reachable (dead).
438 if (trip_count == 1) 611 // Also fully unroll a loop with few iterations regardless next
612 // conditions since following loop optimizations will split
613 // such loop anyway (pre-main-post).
614 if (trip_count <= 3)
439 return true; 615 return true;
440 616
441 // Do not unroll a loop with String intrinsics code. 617 // Do not unroll a loop with String intrinsics code.
442 // String intrinsics are large and have loops. 618 // String intrinsics are large and have loops.
443 for (uint k = 0; k < _body.size(); k++) { 619 for (uint k = 0; k < _body.size(); k++) {
450 return false; 626 return false;
451 } 627 }
452 } // switch 628 } // switch
453 } 629 }
454 630
455 if (body_size <= unroll_limit) { 631 return true; // Do maximally unroll
456 uint new_body_size = body_size * trip_count;
457 if (new_body_size <= unroll_limit &&
458 body_size == new_body_size / trip_count &&
459 // Unrolling can result in a large amount of node construction
460 new_body_size < MaxNodeLimit - phase->C->unique()) {
461 return true; // maximally unroll
462 }
463 }
464
465 return false; // Do not maximally unroll
466 } 632 }
467 633
468 634
469 //------------------------------policy_unroll---------------------------------- 635 //------------------------------policy_unroll----------------------------------
470 // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if 636 // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if
472 bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const { 638 bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
473 639
474 CountedLoopNode *cl = _head->as_CountedLoop(); 640 CountedLoopNode *cl = _head->as_CountedLoop();
475 assert(cl->is_normal_loop() || cl->is_main_loop(), ""); 641 assert(cl->is_normal_loop() || cl->is_main_loop(), "");
476 642
477 // protect against stride not being a constant 643 if (!cl->is_valid_counted_loop())
478 if (!cl->stride_is_con()) return false; 644 return false; // Malformed counted loop
479 645
480 // protect against over-unrolling 646 // protect against over-unrolling
481 if (cl->trip_count() <= 1) return false; 647 if (cl->trip_count() <= 1) return false;
648
649 // Check for stride being a small enough constant
650 if (abs(cl->stride_con()) > (1<<3)) return false;
482 651
483 int future_unroll_ct = cl->unrolled_count() * 2; 652 int future_unroll_ct = cl->unrolled_count() * 2;
484 653
485 // Don't unroll if the next round of unrolling would push us 654 // Don't unroll if the next round of unrolling would push us
486 // over the expected trip count of the loop. One is subtracted 655 // over the expected trip count of the loop. One is subtracted
558 if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true; 727 if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true;
559 // Normal case: loop too big 728 // Normal case: loop too big
560 return false; 729 return false;
561 } 730 }
562 731
563 // Check for stride being a small enough constant
564 if (abs(cl->stride_con()) > (1<<3)) return false;
565
566 // Unroll once! (Each trip will soon do double iterations) 732 // Unroll once! (Each trip will soon do double iterations)
567 return true; 733 return true;
568 } 734 }
569 735
570 //------------------------------policy_align----------------------------------- 736 //------------------------------policy_align-----------------------------------
954 #ifndef PRODUCT 1120 #ifndef PRODUCT
955 if (PrintOpto && VerifyLoopOptimizations) { 1121 if (PrintOpto && VerifyLoopOptimizations) {
956 tty->print("Unrolling "); 1122 tty->print("Unrolling ");
957 loop->dump_head(); 1123 loop->dump_head();
958 } else if (TraceLoopOpts) { 1124 } else if (TraceLoopOpts) {
959 tty->print("Unroll %d ", loop_head->unrolled_count()*2); 1125 if (loop_head->trip_count() < (uint)LoopUnrollLimit) {
1126 tty->print("Unroll %d(%2d) ", loop_head->unrolled_count()*2, loop_head->trip_count());
1127 } else {
1128 tty->print("Unroll %d ", loop_head->unrolled_count()*2);
1129 }
960 loop->dump_head(); 1130 loop->dump_head();
961 } 1131 }
962 #endif 1132 #endif
963 1133
964 // Remember loop node count before unrolling to detect 1134 // Remember loop node count before unrolling to detect
1629 // Micro-benchmark spamming. Policy is to always remove empty loops. 1799 // Micro-benchmark spamming. Policy is to always remove empty loops.
1630 // The 'DO' part is to replace the trip counter with the value it will 1800 // The 'DO' part is to replace the trip counter with the value it will
1631 // have on the last iteration. This will break the loop. 1801 // have on the last iteration. This will break the loop.
1632 bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) { 1802 bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
1633 // Minimum size must be empty loop 1803 // Minimum size must be empty loop
1634 if (_body.size() > 7/*number of nodes in an empty loop*/) 1804 if (_body.size() > EMPTY_LOOP_SIZE)
1635 return false; 1805 return false;
1636 1806
1637 if (!_head->is_CountedLoop()) 1807 if (!_head->is_CountedLoop())
1638 return false; // Dead loop 1808 return false; // Dead loop
1639 CountedLoopNode *cl = _head->as_CountedLoop(); 1809 CountedLoopNode *cl = _head->as_CountedLoop();
1656 #endif 1826 #endif
1657 1827
1658 // main and post loops have explicitly created zero trip guard 1828 // main and post loops have explicitly created zero trip guard
1659 bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop(); 1829 bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop();
1660 if (needs_guard) { 1830 if (needs_guard) {
1831 // Skip guard if values not overlap.
1832 const TypeInt* init_t = phase->_igvn.type(cl->init_trip())->is_int();
1833 const TypeInt* limit_t = phase->_igvn.type(cl->limit())->is_int();
1834 int stride_con = cl->stride_con();
1835 if (stride_con > 0) {
1836 needs_guard = (init_t->_hi >= limit_t->_lo);
1837 } else {
1838 needs_guard = (init_t->_lo <= limit_t->_hi);
1839 }
1840 }
1841 if (needs_guard) {
1661 // Check for an obvious zero trip guard. 1842 // Check for an obvious zero trip guard.
1662 Node* inctrl = cl->in(LoopNode::EntryControl); 1843 Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl));
1663 if (inctrl->Opcode() == Op_IfTrue) { 1844 if (inctrl->Opcode() == Op_IfTrue) {
1664 // The test should look like just the backedge of a CountedLoop 1845 // The test should look like just the backedge of a CountedLoop
1665 Node* iff = inctrl->in(0); 1846 Node* iff = inctrl->in(0);
1666 if (iff->is_If()) { 1847 if (iff->is_If()) {
1667 Node* bol = iff->in(1); 1848 Node* bol = iff->in(1);
1700 phase->_igvn.replace_node(phi,final); 1881 phase->_igvn.replace_node(phi,final);
1701 phase->C->set_major_progress(); 1882 phase->C->set_major_progress();
1702 return true; 1883 return true;
1703 } 1884 }
1704 1885
1886 //------------------------------policy_do_one_iteration_loop-------------------
1887 // Convert one iteration loop into normal code.
1888 bool IdealLoopTree::policy_do_one_iteration_loop( PhaseIdealLoop *phase ) {
1889 if (!_head->as_Loop()->is_valid_counted_loop())
1890 return false; // Only for counted loop
1891
1892 CountedLoopNode *cl = _head->as_CountedLoop();
1893 if (!cl->has_exact_trip_count() || cl->trip_count() != 1) {
1894 return false;
1895 }
1896
1897 #ifndef PRODUCT
1898 if(TraceLoopOpts) {
1899 tty->print("OneIteration ");
1900 this->dump_head();
1901 }
1902 #endif
1903
1904 Node *init_n = cl->init_trip();
1905 #ifdef ASSERT
1906 // Loop boundaries should be constant since trip count is exact.
1907 assert(init_n->get_int() + cl->stride_con() >= cl->limit()->get_int(), "should be one iteration");
1908 #endif
1909 // Replace the phi at loop head with the value of the init_trip.
1910 // Then the CountedLoopEnd will collapse (backedge will not be taken)
1911 // and all loop-invariant uses of the exit values will be correct.
1912 phase->_igvn.replace_node(cl->phi(), cl->init_trip());
1913 phase->C->set_major_progress();
1914 return true;
1915 }
1705 1916
1706 //============================================================================= 1917 //=============================================================================
1707 //------------------------------iteration_split_impl--------------------------- 1918 //------------------------------iteration_split_impl---------------------------
1708 bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) { 1919 bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) {
1920 // Compute exact loop trip count if possible.
1921 compute_exact_trip_count(phase);
1922
1923 // Convert one iteration loop into normal code.
1924 if (policy_do_one_iteration_loop(phase))
1925 return true;
1926
1709 // Check and remove empty loops (spam micro-benchmarks) 1927 // Check and remove empty loops (spam micro-benchmarks)
1710 if( policy_do_remove_empty_loop(phase) ) 1928 if (policy_do_remove_empty_loop(phase))
1711 return true; // Here we removed an empty loop 1929 return true; // Here we removed an empty loop
1712 1930
1713 bool should_peel = policy_peeling(phase); // Should we peel? 1931 bool should_peel = policy_peeling(phase); // Should we peel?
1714 1932
1715 bool should_unswitch = policy_unswitching(phase); 1933 bool should_unswitch = policy_unswitching(phase);
1716 1934
1717 // Non-counted loops may be peeled; exactly 1 iteration is peeled. 1935 // Non-counted loops may be peeled; exactly 1 iteration is peeled.
1718 // This removes loop-invariant tests (usually null checks). 1936 // This removes loop-invariant tests (usually null checks).
1719 if( !_head->is_CountedLoop() ) { // Non-counted loop 1937 if (!_head->is_CountedLoop()) { // Non-counted loop
1720 if (PartialPeelLoop && phase->partial_peel(this, old_new)) { 1938 if (PartialPeelLoop && phase->partial_peel(this, old_new)) {
1721 // Partial peel succeeded so terminate this round of loop opts 1939 // Partial peel succeeded so terminate this round of loop opts
1722 return false; 1940 return false;
1723 } 1941 }
1724 if( should_peel ) { // Should we peel? 1942 if (should_peel) { // Should we peel?
1725 #ifndef PRODUCT 1943 #ifndef PRODUCT
1726 if (PrintOpto) tty->print_cr("should_peel"); 1944 if (PrintOpto) tty->print_cr("should_peel");
1727 #endif 1945 #endif
1728 phase->do_peeling(this,old_new); 1946 phase->do_peeling(this,old_new);
1729 } else if( should_unswitch ) { 1947 } else if (should_unswitch) {
1730 phase->do_unswitching(this, old_new); 1948 phase->do_unswitching(this, old_new);
1731 } 1949 }
1732 return true; 1950 return true;
1733 } 1951 }
1734 CountedLoopNode *cl = _head->as_CountedLoop(); 1952 CountedLoopNode *cl = _head->as_CountedLoop();
1735 1953
1736 if( !cl->loopexit() ) return true; // Ignore various kinds of broken loops 1954 if (!cl->loopexit()) return true; // Ignore various kinds of broken loops
1737 1955
1738 // Do nothing special to pre- and post- loops 1956 // Do nothing special to pre- and post- loops
1739 if( cl->is_pre_loop() || cl->is_post_loop() ) return true; 1957 if (cl->is_pre_loop() || cl->is_post_loop()) return true;
1740 1958
1741 // Compute loop trip count from profile data 1959 // Compute loop trip count from profile data
1742 compute_profile_trip_cnt(phase); 1960 compute_profile_trip_cnt(phase);
1743 1961
1744 // Before attempting fancy unrolling, RCE or alignment, see if we want 1962 // Before attempting fancy unrolling, RCE or alignment, see if we want
1745 // to completely unroll this loop or do loop unswitching. 1963 // to completely unroll this loop or do loop unswitching.
1746 if( cl->is_normal_loop() ) { 1964 if (cl->is_normal_loop()) {
1747 if (should_unswitch) { 1965 if (should_unswitch) {
1748 phase->do_unswitching(this, old_new); 1966 phase->do_unswitching(this, old_new);
1749 return true; 1967 return true;
1750 } 1968 }
1751 bool should_maximally_unroll = policy_maximally_unroll(phase); 1969 bool should_maximally_unroll = policy_maximally_unroll(phase);
1752 if( should_maximally_unroll ) { 1970 if (should_maximally_unroll) {
1753 // Here we did some unrolling and peeling. Eventually we will 1971 // Here we did some unrolling and peeling. Eventually we will
1754 // completely unroll this loop and it will no longer be a loop. 1972 // completely unroll this loop and it will no longer be a loop.
1755 phase->do_maximally_unroll(this,old_new); 1973 phase->do_maximally_unroll(this,old_new);
1756 return true; 1974 return true;
1757 } 1975 }
1758 } 1976 }
1759 1977
1978 // Skip next optimizations if running low on nodes. Note that
1979 // policy_unswitching and policy_maximally_unroll have this check.
1980 uint nodes_left = MaxNodeLimit - phase->C->unique();
1981 if ((2 * _body.size()) > nodes_left) {
1982 return true;
1983 }
1760 1984
1761 // Counted loops may be peeled, may need some iterations run up 1985 // Counted loops may be peeled, may need some iterations run up
1762 // front for RCE, and may want to align loop refs to a cache 1986 // front for RCE, and may want to align loop refs to a cache
1763 // line. Thus we clone a full loop up front whose trip count is 1987 // line. Thus we clone a full loop up front whose trip count is
1764 // at least 1 (if peeling), but may be several more. 1988 // at least 1 (if peeling), but may be several more.
1785 bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align; 2009 bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align;
1786 2010
1787 // If we have any of these conditions (RCE, alignment, unrolling) met, then 2011 // If we have any of these conditions (RCE, alignment, unrolling) met, then
1788 // we switch to the pre-/main-/post-loop model. This model also covers 2012 // we switch to the pre-/main-/post-loop model. This model also covers
1789 // peeling. 2013 // peeling.
1790 if( should_rce || should_align || should_unroll ) { 2014 if (should_rce || should_align || should_unroll) {
1791 if( cl->is_normal_loop() ) // Convert to 'pre/main/post' loops 2015 if (cl->is_normal_loop()) // Convert to 'pre/main/post' loops
1792 phase->insert_pre_post_loops(this,old_new, !may_rce_align); 2016 phase->insert_pre_post_loops(this,old_new, !may_rce_align);
1793 2017
1794 // Adjust the pre- and main-loop limits to let the pre and post loops run 2018 // Adjust the pre- and main-loop limits to let the pre and post loops run
1795 // with full checks, but the main-loop with no checks. Remove said 2019 // with full checks, but the main-loop with no checks. Remove said
1796 // checks from the main body. 2020 // checks from the main body.
1797 if( should_rce ) 2021 if (should_rce)
1798 phase->do_range_check(this,old_new); 2022 phase->do_range_check(this,old_new);
1799 2023
1800 // Double loop body for unrolling. Adjust the minimum-trip test (will do 2024 // Double loop body for unrolling. Adjust the minimum-trip test (will do
1801 // twice as many iterations as before) and the main body limit (only do 2025 // twice as many iterations as before) and the main body limit (only do
1802 // an even number of trips). If we are peeling, we might enable some RCE 2026 // an even number of trips). If we are peeling, we might enable some RCE
1803 // and we'd rather unroll the post-RCE'd loop SO... do not unroll if 2027 // and we'd rather unroll the post-RCE'd loop SO... do not unroll if
1804 // peeling. 2028 // peeling.
1805 if( should_unroll && !should_peel ) 2029 if (should_unroll && !should_peel)
1806 phase->do_unroll(this,old_new, true); 2030 phase->do_unroll(this,old_new, true);
1807 2031
1808 // Adjust the pre-loop limits to align the main body 2032 // Adjust the pre-loop limits to align the main body
1809 // iterations. 2033 // iterations.
1810 if( should_align ) 2034 if (should_align)
1811 Unimplemented(); 2035 Unimplemented();
1812 2036
1813 } else { // Else we have an unchanged counted loop 2037 } else { // Else we have an unchanged counted loop
1814 if( should_peel ) // Might want to peel but do nothing else 2038 if (should_peel) // Might want to peel but do nothing else
1815 phase->do_peeling(this,old_new); 2039 phase->do_peeling(this,old_new);
1816 } 2040 }
1817 return true; 2041 return true;
1818 } 2042 }
1819 2043
1859 if (_next && !_next->iteration_split(phase, old_new)) 2083 if (_next && !_next->iteration_split(phase, old_new))
1860 return false; 2084 return false;
1861 return true; 2085 return true;
1862 } 2086 }
1863 2087
1864 //-------------------------------is_uncommon_trap_proj---------------------------- 2088
1865 // Return true if proj is the form of "proj->[region->..]call_uct" 2089 //=============================================================================
1866 bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason) {
1867 int path_limit = 10;
1868 assert(proj, "invalid argument");
1869 Node* out = proj;
1870 for (int ct = 0; ct < path_limit; ct++) {
1871 out = out->unique_ctrl_out();
1872 if (out == NULL || out->is_Root() || out->is_Start())
1873 return false;
1874 if (out->is_CallStaticJava()) {
1875 int req = out->as_CallStaticJava()->uncommon_trap_request();
1876 if (req != 0) {
1877 Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req);
1878 if (trap_reason == reason || reason == Deoptimization::Reason_none) {
1879 return true;
1880 }
1881 }
1882 return false; // don't do further after call
1883 }
1884 }
1885 return false;
1886 }
1887
1888 //-------------------------------is_uncommon_trap_if_pattern-------------------------
1889 // Return true for "if(test)-> proj -> ...
1890 // |
1891 // V
1892 // other_proj->[region->..]call_uct"
1893 //
1894 // "must_reason_predicate" means the uct reason must be Reason_predicate
1895 bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, Deoptimization::DeoptReason reason) {
1896 Node *in0 = proj->in(0);
1897 if (!in0->is_If()) return false;
1898 // Variation of a dead If node.
1899 if (in0->outcnt() < 2) return false;
1900 IfNode* iff = in0->as_If();
1901
1902 // we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate
1903 if (reason != Deoptimization::Reason_none) {
1904 if (iff->in(1)->Opcode() != Op_Conv2B ||
1905 iff->in(1)->in(1)->Opcode() != Op_Opaque1) {
1906 return false;
1907 }
1908 }
1909
1910 ProjNode* other_proj = iff->proj_out(1-proj->_con)->as_Proj();
1911 return is_uncommon_trap_proj(other_proj, reason);
1912 }
1913
1914 //-------------------------------register_control-------------------------
1915 void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred) {
1916 assert(n->is_CFG(), "must be control node");
1917 _igvn.register_new_node_with_optimizer(n);
1918 loop->_body.push(n);
1919 set_loop(n, loop);
1920 // When called from beautify_loops() idom is not constructed yet.
1921 if (_idom != NULL) {
1922 set_idom(n, pred, dom_depth(pred));
1923 }
1924 }
1925
1926 //------------------------------create_new_if_for_predicate------------------------
1927 // create a new if above the uct_if_pattern for the predicate to be promoted.
1928 //
1929 // before after
1930 // ---------- ----------
1931 // ctrl ctrl
1932 // | |
1933 // | |
1934 // v v
1935 // iff new_iff
1936 // / \ / \
1937 // / \ / \
1938 // v v v v
1939 // uncommon_proj cont_proj if_uct if_cont
1940 // \ | | | |
1941 // \ | | | |
1942 // v v v | v
1943 // rgn loop | iff
1944 // | | / \
1945 // | | / \
1946 // v | v v
1947 // uncommon_trap | uncommon_proj cont_proj
1948 // \ \ | |
1949 // \ \ | |
1950 // v v v v
1951 // rgn loop
1952 // |
1953 // |
1954 // v
1955 // uncommon_trap
1956 //
1957 //
1958 // We will create a region to guard the uct call if there is no one there.
1959 // The true projecttion (if_cont) of the new_iff is returned.
1960 // This code is also used to clone predicates to clonned loops.
1961 ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
1962 Deoptimization::DeoptReason reason) {
1963 assert(is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!");
1964 IfNode* iff = cont_proj->in(0)->as_If();
1965
1966 ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con);
1967 Node *rgn = uncommon_proj->unique_ctrl_out();
1968 assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct");
1969
1970 if (!rgn->is_Region()) { // create a region to guard the call
1971 assert(rgn->is_Call(), "must be call uct");
1972 CallNode* call = rgn->as_Call();
1973 IdealLoopTree* loop = get_loop(call);
1974 rgn = new (C, 1) RegionNode(1);
1975 rgn->add_req(uncommon_proj);
1976 register_control(rgn, loop, uncommon_proj);
1977 _igvn.hash_delete(call);
1978 call->set_req(0, rgn);
1979 // When called from beautify_loops() idom is not constructed yet.
1980 if (_idom != NULL) {
1981 set_idom(call, rgn, dom_depth(rgn));
1982 }
1983 }
1984
1985 Node* entry = iff->in(0);
1986 if (new_entry != NULL) {
1987 // Clonning the predicate to new location.
1988 entry = new_entry;
1989 }
1990 // Create new_iff
1991 IdealLoopTree* lp = get_loop(entry);
1992 IfNode *new_iff = new (C, 2) IfNode(entry, NULL, iff->_prob, iff->_fcnt);
1993 register_control(new_iff, lp, entry);
1994 Node *if_cont = new (C, 1) IfTrueNode(new_iff);
1995 Node *if_uct = new (C, 1) IfFalseNode(new_iff);
1996 if (cont_proj->is_IfFalse()) {
1997 // Swap
1998 Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp;
1999 }
2000 register_control(if_cont, lp, new_iff);
2001 register_control(if_uct, get_loop(rgn), new_iff);
2002
2003 // if_uct to rgn
2004 _igvn.hash_delete(rgn);
2005 rgn->add_req(if_uct);
2006 // When called from beautify_loops() idom is not constructed yet.
2007 if (_idom != NULL) {
2008 Node* ridom = idom(rgn);
2009 Node* nrdom = dom_lca(ridom, new_iff);
2010 set_idom(rgn, nrdom, dom_depth(rgn));
2011 }
2012 // rgn must have no phis
2013 assert(!rgn->as_Region()->has_phi(), "region must have no phis");
2014
2015 if (new_entry == NULL) {
2016 // Attach if_cont to iff
2017 _igvn.hash_delete(iff);
2018 iff->set_req(0, if_cont);
2019 if (_idom != NULL) {
2020 set_idom(iff, if_cont, dom_depth(iff));
2021 }
2022 }
2023 return if_cont->as_Proj();
2024 }
2025
2026 //--------------------------find_predicate_insertion_point-------------------
2027 // Find a good location to insert a predicate
2028 ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason) {
2029 if (start_c == NULL || !start_c->is_Proj())
2030 return NULL;
2031 if (is_uncommon_trap_if_pattern(start_c->as_Proj(), reason)) {
2032 return start_c->as_Proj();
2033 }
2034 return NULL;
2035 }
2036
2037 //--------------------------find_predicate------------------------------------
2038 // Find a predicate
2039 Node* PhaseIdealLoop::find_predicate(Node* entry) {
2040 Node* predicate = NULL;
2041 if (UseLoopPredicate) {
2042 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
2043 if (predicate != NULL) { // right pattern that can be used by loop predication
2044 assert(entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be");
2045 return entry;
2046 }
2047 }
2048 return NULL;
2049 }
2050
2051 //------------------------------Invariance-----------------------------------
2052 // Helper class for loop_predication_impl to compute invariance on the fly and
2053 // clone invariants.
2054 class Invariance : public StackObj {
2055 VectorSet _visited, _invariant;
2056 Node_Stack _stack;
2057 VectorSet _clone_visited;
2058 Node_List _old_new; // map of old to new (clone)
2059 IdealLoopTree* _lpt;
2060 PhaseIdealLoop* _phase;
2061
2062 // Helper function to set up the invariance for invariance computation
2063 // If n is a known invariant, set up directly. Otherwise, look up the
2064 // the possibility to push n onto the stack for further processing.
2065 void visit(Node* use, Node* n) {
2066 if (_lpt->is_invariant(n)) { // known invariant
2067 _invariant.set(n->_idx);
2068 } else if (!n->is_CFG()) {
2069 Node *n_ctrl = _phase->ctrl_or_self(n);
2070 Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG
2071 if (_phase->is_dominator(n_ctrl, u_ctrl)) {
2072 _stack.push(n, n->in(0) == NULL ? 1 : 0);
2073 }
2074 }
2075 }
2076
2077 // Compute invariance for "the_node" and (possibly) all its inputs recursively
2078 // on the fly
2079 void compute_invariance(Node* n) {
2080 assert(_visited.test(n->_idx), "must be");
2081 visit(n, n);
2082 while (_stack.is_nonempty()) {
2083 Node* n = _stack.node();
2084 uint idx = _stack.index();
2085 if (idx == n->req()) { // all inputs are processed
2086 _stack.pop();
2087 // n is invariant if it's inputs are all invariant
2088 bool all_inputs_invariant = true;
2089 for (uint i = 0; i < n->req(); i++) {
2090 Node* in = n->in(i);
2091 if (in == NULL) continue;
2092 assert(_visited.test(in->_idx), "must have visited input");
2093 if (!_invariant.test(in->_idx)) { // bad guy
2094 all_inputs_invariant = false;
2095 break;
2096 }
2097 }
2098 if (all_inputs_invariant) {
2099 _invariant.set(n->_idx); // I am a invariant too
2100 }
2101 } else { // process next input
2102 _stack.set_index(idx + 1);
2103 Node* m = n->in(idx);
2104 if (m != NULL && !_visited.test_set(m->_idx)) {
2105 visit(n, m);
2106 }
2107 }
2108 }
2109 }
2110
2111 // Helper function to set up _old_new map for clone_nodes.
2112 // If n is a known invariant, set up directly ("clone" of n == n).
2113 // Otherwise, push n onto the stack for real cloning.
2114 void clone_visit(Node* n) {
2115 assert(_invariant.test(n->_idx), "must be invariant");
2116 if (_lpt->is_invariant(n)) { // known invariant
2117 _old_new.map(n->_idx, n);
2118 } else{ // to be cloned
2119 assert (!n->is_CFG(), "should not see CFG here");
2120 _stack.push(n, n->in(0) == NULL ? 1 : 0);
2121 }
2122 }
2123
2124 // Clone "n" and (possibly) all its inputs recursively
2125 void clone_nodes(Node* n, Node* ctrl) {
2126 clone_visit(n);
2127 while (_stack.is_nonempty()) {
2128 Node* n = _stack.node();
2129 uint idx = _stack.index();
2130 if (idx == n->req()) { // all inputs processed, clone n!
2131 _stack.pop();
2132 // clone invariant node
2133 Node* n_cl = n->clone();
2134 _old_new.map(n->_idx, n_cl);
2135 _phase->register_new_node(n_cl, ctrl);
2136 for (uint i = 0; i < n->req(); i++) {
2137 Node* in = n_cl->in(i);
2138 if (in == NULL) continue;
2139 n_cl->set_req(i, _old_new[in->_idx]);
2140 }
2141 } else { // process next input
2142 _stack.set_index(idx + 1);
2143 Node* m = n->in(idx);
2144 if (m != NULL && !_clone_visited.test_set(m->_idx)) {
2145 clone_visit(m); // visit the input
2146 }
2147 }
2148 }
2149 }
2150
2151 public:
2152 Invariance(Arena* area, IdealLoopTree* lpt) :
2153 _lpt(lpt), _phase(lpt->_phase),
2154 _visited(area), _invariant(area), _stack(area, 10 /* guess */),
2155 _clone_visited(area), _old_new(area)
2156 {}
2157
2158 // Map old to n for invariance computation and clone
2159 void map_ctrl(Node* old, Node* n) {
2160 assert(old->is_CFG() && n->is_CFG(), "must be");
2161 _old_new.map(old->_idx, n); // "clone" of old is n
2162 _invariant.set(old->_idx); // old is invariant
2163 _clone_visited.set(old->_idx);
2164 }
2165
2166 // Driver function to compute invariance
2167 bool is_invariant(Node* n) {
2168 if (!_visited.test_set(n->_idx))
2169 compute_invariance(n);
2170 return (_invariant.test(n->_idx) != 0);
2171 }
2172
2173 // Driver function to clone invariant
2174 Node* clone(Node* n, Node* ctrl) {
2175 assert(ctrl->is_CFG(), "must be");
2176 assert(_invariant.test(n->_idx), "must be an invariant");
2177 if (!_clone_visited.test(n->_idx))
2178 clone_nodes(n, ctrl);
2179 return _old_new[n->_idx];
2180 }
2181 };
2182
2183 //------------------------------is_range_check_if -----------------------------------
2184 // Returns true if the predicate of iff is in "scale*iv + offset u< load_range(ptr)" format
2185 // Note: this function is particularly designed for loop predication. We require load_range
2186 // and offset to be loop invariant computed on the fly by "invar"
2187 bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const {
2188 if (!is_loop_exit(iff)) {
2189 return false;
2190 }
2191 if (!iff->in(1)->is_Bool()) {
2192 return false;
2193 }
2194 const BoolNode *bol = iff->in(1)->as_Bool();
2195 if (bol->_test._test != BoolTest::lt) {
2196 return false;
2197 }
2198 if (!bol->in(1)->is_Cmp()) {
2199 return false;
2200 }
2201 const CmpNode *cmp = bol->in(1)->as_Cmp();
2202 if (cmp->Opcode() != Op_CmpU ) {
2203 return false;
2204 }
2205 Node* range = cmp->in(2);
2206 if (range->Opcode() != Op_LoadRange) {
2207 const TypeInt* tint = phase->_igvn.type(range)->isa_int();
2208 if (!OptimizeFill || tint == NULL || tint->empty() || tint->_lo < 0) {
2209 // Allow predication on positive values that aren't LoadRanges.
2210 // This allows optimization of loops where the length of the
2211 // array is a known value and doesn't need to be loaded back
2212 // from the array.
2213 return false;
2214 }
2215 }
2216 if (!invar.is_invariant(range)) {
2217 return false;
2218 }
2219 Node *iv = _head->as_CountedLoop()->phi();
2220 int scale = 0;
2221 Node *offset = NULL;
2222 if (!phase->is_scaled_iv_plus_offset(cmp->in(1), iv, &scale, &offset)) {
2223 return false;
2224 }
2225 if(offset && !invar.is_invariant(offset)) { // offset must be invariant
2226 return false;
2227 }
2228 return true;
2229 }
2230
2231 //------------------------------rc_predicate-----------------------------------
2232 // Create a range check predicate
2233 //
2234 // for (i = init; i < limit; i += stride) {
2235 // a[scale*i+offset]
2236 // }
2237 //
2238 // Compute max(scale*i + offset) for init <= i < limit and build the predicate
2239 // as "max(scale*i + offset) u< a.length".
2240 //
2241 // There are two cases for max(scale*i + offset):
2242 // (1) stride*scale > 0
2243 // max(scale*i + offset) = scale*(limit-stride) + offset
2244 // (2) stride*scale < 0
2245 // max(scale*i + offset) = scale*init + offset
2246 BoolNode* PhaseIdealLoop::rc_predicate(Node* ctrl,
2247 int scale, Node* offset,
2248 Node* init, Node* limit, Node* stride,
2249 Node* range, bool upper) {
2250 DEBUG_ONLY(ttyLocker ttyl);
2251 if (TraceLoopPredicate) tty->print("rc_predicate ");
2252
2253 Node* max_idx_expr = init;
2254 int stride_con = stride->get_int();
2255 if ((stride_con > 0) == (scale > 0) == upper) {
2256 max_idx_expr = new (C, 3) SubINode(limit, stride);
2257 register_new_node(max_idx_expr, ctrl);
2258 if (TraceLoopPredicate) tty->print("(limit - stride) ");
2259 } else {
2260 if (TraceLoopPredicate) tty->print("init ");
2261 }
2262
2263 if (scale != 1) {
2264 ConNode* con_scale = _igvn.intcon(scale);
2265 max_idx_expr = new (C, 3) MulINode(max_idx_expr, con_scale);
2266 register_new_node(max_idx_expr, ctrl);
2267 if (TraceLoopPredicate) tty->print("* %d ", scale);
2268 }
2269
2270 if (offset && (!offset->is_Con() || offset->get_int() != 0)){
2271 max_idx_expr = new (C, 3) AddINode(max_idx_expr, offset);
2272 register_new_node(max_idx_expr, ctrl);
2273 if (TraceLoopPredicate)
2274 if (offset->is_Con()) tty->print("+ %d ", offset->get_int());
2275 else tty->print("+ offset ");
2276 }
2277
2278 CmpUNode* cmp = new (C, 3) CmpUNode(max_idx_expr, range);
2279 register_new_node(cmp, ctrl);
2280 BoolNode* bol = new (C, 2) BoolNode(cmp, BoolTest::lt);
2281 register_new_node(bol, ctrl);
2282
2283 if (TraceLoopPredicate) tty->print_cr("<u range");
2284 return bol;
2285 }
2286
2287 //------------------------------ loop_predication_impl--------------------------
2288 // Insert loop predicates for null checks and range checks
2289 bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
2290 if (!UseLoopPredicate) return false;
2291
2292 if (!loop->_head->is_Loop()) {
2293 // Could be a simple region when irreducible loops are present.
2294 return false;
2295 }
2296
2297 if (loop->_head->unique_ctrl_out()->Opcode() == Op_NeverBranch) {
2298 // do nothing for infinite loops
2299 return false;
2300 }
2301
2302 CountedLoopNode *cl = NULL;
2303 if (loop->_head->is_CountedLoop()) {
2304 cl = loop->_head->as_CountedLoop();
2305 // do nothing for iteration-splitted loops
2306 if (!cl->is_normal_loop()) return false;
2307 }
2308
2309 LoopNode *lpn = loop->_head->as_Loop();
2310 Node* entry = lpn->in(LoopNode::EntryControl);
2311
2312 ProjNode *predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
2313 if (!predicate_proj) {
2314 #ifndef PRODUCT
2315 if (TraceLoopPredicate) {
2316 tty->print("missing predicate:");
2317 loop->dump_head();
2318 lpn->dump(1);
2319 }
2320 #endif
2321 return false;
2322 }
2323 ConNode* zero = _igvn.intcon(0);
2324 set_ctrl(zero, C->root());
2325
2326 ResourceArea *area = Thread::current()->resource_area();
2327 Invariance invar(area, loop);
2328
2329 // Create list of if-projs such that a newer proj dominates all older
2330 // projs in the list, and they all dominate loop->tail()
2331 Node_List if_proj_list(area);
2332 LoopNode *head = loop->_head->as_Loop();
2333 Node *current_proj = loop->tail(); //start from tail
2334 while ( current_proj != head ) {
2335 if (loop == get_loop(current_proj) && // still in the loop ?
2336 current_proj->is_Proj() && // is a projection ?
2337 current_proj->in(0)->Opcode() == Op_If) { // is a if projection ?
2338 if_proj_list.push(current_proj);
2339 }
2340 current_proj = idom(current_proj);
2341 }
2342
2343 bool hoisted = false; // true if at least one proj is promoted
2344 while (if_proj_list.size() > 0) {
2345 // Following are changed to nonnull when a predicate can be hoisted
2346 ProjNode* new_predicate_proj = NULL;
2347
2348 ProjNode* proj = if_proj_list.pop()->as_Proj();
2349 IfNode* iff = proj->in(0)->as_If();
2350
2351 if (!is_uncommon_trap_if_pattern(proj, Deoptimization::Reason_none)) {
2352 if (loop->is_loop_exit(iff)) {
2353 // stop processing the remaining projs in the list because the execution of them
2354 // depends on the condition of "iff" (iff->in(1)).
2355 break;
2356 } else {
2357 // Both arms are inside the loop. There are two cases:
2358 // (1) there is one backward branch. In this case, any remaining proj
2359 // in the if_proj list post-dominates "iff". So, the condition of "iff"
2360 // does not determine the execution the remining projs directly, and we
2361 // can safely continue.
2362 // (2) both arms are forwarded, i.e. a diamond shape. In this case, "proj"
2363 // does not dominate loop->tail(), so it can not be in the if_proj list.
2364 continue;
2365 }
2366 }
2367
2368 Node* test = iff->in(1);
2369 if (!test->is_Bool()){ //Conv2B, ...
2370 continue;
2371 }
2372 BoolNode* bol = test->as_Bool();
2373 if (invar.is_invariant(bol)) {
2374 // Invariant test
2375 new_predicate_proj = create_new_if_for_predicate(predicate_proj, NULL,
2376 Deoptimization::Reason_predicate);
2377 Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0);
2378 BoolNode* new_predicate_bol = invar.clone(bol, ctrl)->as_Bool();
2379
2380 // Negate test if necessary
2381 bool negated = false;
2382 if (proj->_con != predicate_proj->_con) {
2383 new_predicate_bol = new (C, 2) BoolNode(new_predicate_bol->in(1), new_predicate_bol->_test.negate());
2384 register_new_node(new_predicate_bol, ctrl);
2385 negated = true;
2386 }
2387 IfNode* new_predicate_iff = new_predicate_proj->in(0)->as_If();
2388 _igvn.hash_delete(new_predicate_iff);
2389 new_predicate_iff->set_req(1, new_predicate_bol);
2390 #ifndef PRODUCT
2391 if (TraceLoopPredicate) {
2392 tty->print("Predicate invariant if%s: %d ", negated ? " negated" : "", new_predicate_iff->_idx);
2393 loop->dump_head();
2394 } else if (TraceLoopOpts) {
2395 tty->print("Predicate IC ");
2396 loop->dump_head();
2397 }
2398 #endif
2399 } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) {
2400 assert(proj->_con == predicate_proj->_con, "must match");
2401
2402 // Range check for counted loops
2403 const Node* cmp = bol->in(1)->as_Cmp();
2404 Node* idx = cmp->in(1);
2405 assert(!invar.is_invariant(idx), "index is variant");
2406 assert(cmp->in(2)->Opcode() == Op_LoadRange || OptimizeFill, "must be");
2407 Node* rng = cmp->in(2);
2408 assert(invar.is_invariant(rng), "range must be invariant");
2409 int scale = 1;
2410 Node* offset = zero;
2411 bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset);
2412 assert(ok, "must be index expression");
2413
2414 Node* init = cl->init_trip();
2415 Node* limit = cl->limit();
2416 Node* stride = cl->stride();
2417
2418 // Build if's for the upper and lower bound tests. The
2419 // lower_bound test will dominate the upper bound test and all
2420 // cloned or created nodes will use the lower bound test as
2421 // their declared control.
2422 ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate);
2423 ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate);
2424 assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate");
2425 Node *ctrl = lower_bound_proj->in(0)->as_If()->in(0);
2426
2427 // Perform cloning to keep Invariance state correct since the
2428 // late schedule will place invariant things in the loop.
2429 rng = invar.clone(rng, ctrl);
2430 if (offset && offset != zero) {
2431 assert(invar.is_invariant(offset), "offset must be loop invariant");
2432 offset = invar.clone(offset, ctrl);
2433 }
2434
2435 // Test the lower bound
2436 Node* lower_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, false);
2437 IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If();
2438 _igvn.hash_delete(lower_bound_iff);
2439 lower_bound_iff->set_req(1, lower_bound_bol);
2440 if (TraceLoopPredicate) tty->print_cr("lower bound check if: %d", lower_bound_iff->_idx);
2441
2442 // Test the upper bound
2443 Node* upper_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, true);
2444 IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If();
2445 _igvn.hash_delete(upper_bound_iff);
2446 upper_bound_iff->set_req(1, upper_bound_bol);
2447 if (TraceLoopPredicate) tty->print_cr("upper bound check if: %d", lower_bound_iff->_idx);
2448
2449 // Fall through into rest of the clean up code which will move
2450 // any dependent nodes onto the upper bound test.
2451 new_predicate_proj = upper_bound_proj;
2452
2453 #ifndef PRODUCT
2454 if (TraceLoopOpts && !TraceLoopPredicate) {
2455 tty->print("Predicate RC ");
2456 loop->dump_head();
2457 }
2458 #endif
2459 } else {
2460 // Loop variant check (for example, range check in non-counted loop)
2461 // with uncommon trap.
2462 continue;
2463 }
2464 assert(new_predicate_proj != NULL, "sanity");
2465 // Success - attach condition (new_predicate_bol) to predicate if
2466 invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate
2467
2468 // Eliminate the old If in the loop body
2469 dominated_by( new_predicate_proj, iff, proj->_con != new_predicate_proj->_con );
2470
2471 hoisted = true;
2472 C->set_major_progress();
2473 } // end while
2474
2475 #ifndef PRODUCT
2476 // report that the loop predication has been actually performed
2477 // for this loop
2478 if (TraceLoopPredicate && hoisted) {
2479 tty->print("Loop Predication Performed:");
2480 loop->dump_head();
2481 }
2482 #endif
2483
2484 return hoisted;
2485 }
2486
2487 //------------------------------loop_predication--------------------------------
2488 // driver routine for loop predication optimization
2489 bool IdealLoopTree::loop_predication( PhaseIdealLoop *phase) {
2490 bool hoisted = false;
2491 // Recursively promote predicates
2492 if ( _child ) {
2493 hoisted = _child->loop_predication( phase);
2494 }
2495
2496 // self
2497 if (!_irreducible && !tail()->is_top()) {
2498 hoisted |= phase->loop_predication_impl(this);
2499 }
2500
2501 if ( _next ) { //sibling
2502 hoisted |= _next->loop_predication( phase);
2503 }
2504
2505 return hoisted;
2506 }
2507
2508
2509 // Process all the loops in the loop tree and replace any fill 2090 // Process all the loops in the loop tree and replace any fill
2510 // patterns with an intrisc version. 2091 // patterns with an intrisc version.
2511 bool PhaseIdealLoop::do_intrinsify_fill() { 2092 bool PhaseIdealLoop::do_intrinsify_fill() {
2512 bool changed = false; 2093 bool changed = false;
2513 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { 2094 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
2623 } 2204 }
2624 #endif 2205 #endif
2625 if (value != head->phi()) { 2206 if (value != head->phi()) {
2626 msg = "unhandled shift in address"; 2207 msg = "unhandled shift in address";
2627 } else { 2208 } else {
2628 found_index = true; 2209 if (type2aelembytes(store->as_Mem()->memory_type(), true) != (1 << n->in(2)->get_int())) {
2629 shift = n; 2210 msg = "scale doesn't match";
2630 assert(type2aelembytes(store->as_Mem()->memory_type(), true) == 1 << shift->in(2)->get_int(), "scale should match"); 2211 } else {
2212 found_index = true;
2213 shift = n;
2214 }
2631 } 2215 }
2632 } else if (n->Opcode() == Op_ConvI2L && conv == NULL) { 2216 } else if (n->Opcode() == Op_ConvI2L && conv == NULL) {
2633 if (n->in(1) == head->phi()) { 2217 if (n->in(1) == head->phi()) {
2634 found_index = true; 2218 found_index = true;
2635 conv = n; 2219 conv = n;
2759 Node* shift = NULL; 2343 Node* shift = NULL;
2760 Node* offset = NULL; 2344 Node* offset = NULL;
2761 if (!match_fill_loop(lpt, store, store_value, shift, offset)) { 2345 if (!match_fill_loop(lpt, store, store_value, shift, offset)) {
2762 return false; 2346 return false;
2763 } 2347 }
2348
2349 #ifndef PRODUCT
2350 if (TraceLoopOpts) {
2351 tty->print("ArrayFill ");
2352 lpt->dump_head();
2353 }
2354 #endif
2764 2355
2765 // Now replace the whole loop body by a call to a fill routine that 2356 // Now replace the whole loop body by a call to a fill routine that
2766 // covers the same region as the loop. 2357 // covers the same region as the loop.
2767 Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base); 2358 Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base);
2768 2359