comparison src/share/vm/opto/library_call.cpp @ 6804:e626685e9f6c

7193318: C2: remove number of inputs requirement from Node's new operator Summary: Deleted placement new operator of Node - node(size_t, Compile *, int). Reviewed-by: kvn, twisti Contributed-by: bharadwaj.yadavalli@oracle.com
author kvn
date Thu, 27 Sep 2012 09:38:42 -0700
parents 7eca5de9e0b6
children d8ce2825b193 65d07d9ee446
comparison
equal deleted inserted replaced
6803:06f52c4d0e18 6804:e626685e9f6c
812 return NULL; 812 return NULL;
813 } 813 }
814 814
815 IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN); 815 IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN);
816 816
817 Node* if_slow = _gvn.transform( new (C, 1) IfTrueNode(iff) ); 817 Node* if_slow = _gvn.transform( new (C) IfTrueNode(iff) );
818 if (if_slow == top()) { 818 if (if_slow == top()) {
819 // The slow branch is never taken. No need to build this guard. 819 // The slow branch is never taken. No need to build this guard.
820 return NULL; 820 return NULL;
821 } 821 }
822 822
823 if (region != NULL) 823 if (region != NULL)
824 region->add_req(if_slow); 824 region->add_req(if_slow);
825 825
826 Node* if_fast = _gvn.transform( new (C, 1) IfFalseNode(iff) ); 826 Node* if_fast = _gvn.transform( new (C) IfFalseNode(iff) );
827 set_control(if_fast); 827 set_control(if_fast);
828 828
829 return if_slow; 829 return if_slow;
830 } 830 }
831 831
840 Node* *pos_index) { 840 Node* *pos_index) {
841 if (stopped()) 841 if (stopped())
842 return NULL; // already stopped 842 return NULL; // already stopped
843 if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint] 843 if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
844 return NULL; // index is already adequately typed 844 return NULL; // index is already adequately typed
845 Node* cmp_lt = _gvn.transform( new (C, 3) CmpINode(index, intcon(0)) ); 845 Node* cmp_lt = _gvn.transform( new (C) CmpINode(index, intcon(0)) );
846 Node* bol_lt = _gvn.transform( new (C, 2) BoolNode(cmp_lt, BoolTest::lt) ); 846 Node* bol_lt = _gvn.transform( new (C) BoolNode(cmp_lt, BoolTest::lt) );
847 Node* is_neg = generate_guard(bol_lt, region, PROB_MIN); 847 Node* is_neg = generate_guard(bol_lt, region, PROB_MIN);
848 if (is_neg != NULL && pos_index != NULL) { 848 if (is_neg != NULL && pos_index != NULL) {
849 // Emulate effect of Parse::adjust_map_after_if. 849 // Emulate effect of Parse::adjust_map_after_if.
850 Node* ccast = new (C, 2) CastIINode(index, TypeInt::POS); 850 Node* ccast = new (C) CastIINode(index, TypeInt::POS);
851 ccast->set_req(0, control()); 851 ccast->set_req(0, control());
852 (*pos_index) = _gvn.transform(ccast); 852 (*pos_index) = _gvn.transform(ccast);
853 } 853 }
854 return is_neg; 854 return is_neg;
855 } 855 }
858 Node* *pos_index) { 858 Node* *pos_index) {
859 if (stopped()) 859 if (stopped())
860 return NULL; // already stopped 860 return NULL; // already stopped
861 if (_gvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint] 861 if (_gvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint]
862 return NULL; // index is already adequately typed 862 return NULL; // index is already adequately typed
863 Node* cmp_le = _gvn.transform( new (C, 3) CmpINode(index, intcon(0)) ); 863 Node* cmp_le = _gvn.transform( new (C) CmpINode(index, intcon(0)) );
864 BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le); 864 BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le);
865 Node* bol_le = _gvn.transform( new (C, 2) BoolNode(cmp_le, le_or_eq) ); 865 Node* bol_le = _gvn.transform( new (C) BoolNode(cmp_le, le_or_eq) );
866 Node* is_notp = generate_guard(bol_le, NULL, PROB_MIN); 866 Node* is_notp = generate_guard(bol_le, NULL, PROB_MIN);
867 if (is_notp != NULL && pos_index != NULL) { 867 if (is_notp != NULL && pos_index != NULL) {
868 // Emulate effect of Parse::adjust_map_after_if. 868 // Emulate effect of Parse::adjust_map_after_if.
869 Node* ccast = new (C, 2) CastIINode(index, TypeInt::POS1); 869 Node* ccast = new (C) CastIINode(index, TypeInt::POS1);
870 ccast->set_req(0, control()); 870 ccast->set_req(0, control());
871 (*pos_index) = _gvn.transform(ccast); 871 (*pos_index) = _gvn.transform(ccast);
872 } 872 }
873 return is_notp; 873 return is_notp;
874 } 874 }
896 bool zero_offset = _gvn.type(offset) == TypeInt::ZERO; 896 bool zero_offset = _gvn.type(offset) == TypeInt::ZERO;
897 if (zero_offset && subseq_length->eqv_uncast(array_length)) 897 if (zero_offset && subseq_length->eqv_uncast(array_length))
898 return NULL; // common case of whole-array copy 898 return NULL; // common case of whole-array copy
899 Node* last = subseq_length; 899 Node* last = subseq_length;
900 if (!zero_offset) // last += offset 900 if (!zero_offset) // last += offset
901 last = _gvn.transform( new (C, 3) AddINode(last, offset)); 901 last = _gvn.transform( new (C) AddINode(last, offset));
902 Node* cmp_lt = _gvn.transform( new (C, 3) CmpUNode(array_length, last) ); 902 Node* cmp_lt = _gvn.transform( new (C) CmpUNode(array_length, last) );
903 Node* bol_lt = _gvn.transform( new (C, 2) BoolNode(cmp_lt, BoolTest::lt) ); 903 Node* bol_lt = _gvn.transform( new (C) BoolNode(cmp_lt, BoolTest::lt) );
904 Node* is_over = generate_guard(bol_lt, region, PROB_MIN); 904 Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
905 return is_over; 905 return is_over;
906 } 906 }
907 907
908 908
909 //--------------------------generate_current_thread-------------------- 909 //--------------------------generate_current_thread--------------------
910 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) { 910 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
911 ciKlass* thread_klass = env()->Thread_klass(); 911 ciKlass* thread_klass = env()->Thread_klass();
912 const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull); 912 const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
913 Node* thread = _gvn.transform(new (C, 1) ThreadLocalNode()); 913 Node* thread = _gvn.transform(new (C) ThreadLocalNode());
914 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset())); 914 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
915 Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT); 915 Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT);
916 tls_output = thread; 916 tls_output = thread;
917 return threadObj; 917 return threadObj;
918 } 918 }
943 switch (opcode) { 943 switch (opcode) {
944 case Op_StrIndexOf: 944 case Op_StrIndexOf:
945 // Get length of string 2 945 // Get length of string 2
946 str2_len = load_String_length(no_ctrl, str2); 946 str2_len = load_String_length(no_ctrl, str2);
947 947
948 result = new (C, 6) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS), 948 result = new (C) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS),
949 str1_start, str1_len, str2_start, str2_len); 949 str1_start, str1_len, str2_start, str2_len);
950 break; 950 break;
951 case Op_StrComp: 951 case Op_StrComp:
952 // Get length of string 2 952 // Get length of string 2
953 str2_len = load_String_length(no_ctrl, str2); 953 str2_len = load_String_length(no_ctrl, str2);
954 954
955 result = new (C, 6) StrCompNode(control(), memory(TypeAryPtr::CHARS), 955 result = new (C) StrCompNode(control(), memory(TypeAryPtr::CHARS),
956 str1_start, str1_len, str2_start, str2_len); 956 str1_start, str1_len, str2_start, str2_len);
957 break; 957 break;
958 case Op_StrEquals: 958 case Op_StrEquals:
959 result = new (C, 5) StrEqualsNode(control(), memory(TypeAryPtr::CHARS), 959 result = new (C) StrEqualsNode(control(), memory(TypeAryPtr::CHARS),
960 str1_start, str2_start, str1_len); 960 str1_start, str2_start, str1_len);
961 break; 961 break;
962 default: 962 default:
963 ShouldNotReachHere(); 963 ShouldNotReachHere();
964 return NULL; 964 return NULL;
977 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2) { 977 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2) {
978 978
979 Node* result = NULL; 979 Node* result = NULL;
980 switch (opcode) { 980 switch (opcode) {
981 case Op_StrIndexOf: 981 case Op_StrIndexOf:
982 result = new (C, 6) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS), 982 result = new (C) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS),
983 str1_start, cnt1, str2_start, cnt2); 983 str1_start, cnt1, str2_start, cnt2);
984 break; 984 break;
985 case Op_StrComp: 985 case Op_StrComp:
986 result = new (C, 6) StrCompNode(control(), memory(TypeAryPtr::CHARS), 986 result = new (C) StrCompNode(control(), memory(TypeAryPtr::CHARS),
987 str1_start, cnt1, str2_start, cnt2); 987 str1_start, cnt1, str2_start, cnt2);
988 break; 988 break;
989 case Op_StrEquals: 989 case Op_StrEquals:
990 result = new (C, 5) StrEqualsNode(control(), memory(TypeAryPtr::CHARS), 990 result = new (C) StrEqualsNode(control(), memory(TypeAryPtr::CHARS),
991 str1_start, str2_start, cnt1); 991 str1_start, str2_start, cnt1);
992 break; 992 break;
993 default: 993 default:
994 ShouldNotReachHere(); 994 ShouldNotReachHere();
995 return NULL; 995 return NULL;
1050 if (stopped()) { 1050 if (stopped()) {
1051 return true; 1051 return true;
1052 } 1052 }
1053 1053
1054 // paths (plus control) merge 1054 // paths (plus control) merge
1055 RegionNode* region = new (C, 5) RegionNode(5); 1055 RegionNode* region = new (C) RegionNode(5);
1056 Node* phi = new (C, 5) PhiNode(region, TypeInt::BOOL); 1056 Node* phi = new (C) PhiNode(region, TypeInt::BOOL);
1057 1057
1058 // does source == target string? 1058 // does source == target string?
1059 Node* cmp = _gvn.transform(new (C, 3) CmpPNode(receiver, argument)); 1059 Node* cmp = _gvn.transform(new (C) CmpPNode(receiver, argument));
1060 Node* bol = _gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::eq)); 1060 Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::eq));
1061 1061
1062 Node* if_eq = generate_slow_guard(bol, NULL); 1062 Node* if_eq = generate_slow_guard(bol, NULL);
1063 if (if_eq != NULL) { 1063 if (if_eq != NULL) {
1064 // receiver == argument 1064 // receiver == argument
1065 phi->init_req(2, intcon(1)); 1065 phi->init_req(2, intcon(1));
1071 1071
1072 if (!stopped()) { 1072 if (!stopped()) {
1073 _sp += nargs; // gen_instanceof might do an uncommon trap 1073 _sp += nargs; // gen_instanceof might do an uncommon trap
1074 Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass))); 1074 Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass)));
1075 _sp -= nargs; 1075 _sp -= nargs;
1076 Node* cmp = _gvn.transform(new (C, 3) CmpINode(inst, intcon(1))); 1076 Node* cmp = _gvn.transform(new (C) CmpINode(inst, intcon(1)));
1077 Node* bol = _gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::ne)); 1077 Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
1078 1078
1079 Node* inst_false = generate_guard(bol, NULL, PROB_MIN); 1079 Node* inst_false = generate_guard(bol, NULL, PROB_MIN);
1080 //instanceOf == true, fallthrough 1080 //instanceOf == true, fallthrough
1081 1081
1082 if (inst_false != NULL) { 1082 if (inst_false != NULL) {
1087 1087
1088 if (!stopped()) { 1088 if (!stopped()) {
1089 const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass); 1089 const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
1090 1090
1091 // Properly cast the argument to String 1091 // Properly cast the argument to String
1092 argument = _gvn.transform(new (C, 2) CheckCastPPNode(control(), argument, string_type)); 1092 argument = _gvn.transform(new (C) CheckCastPPNode(control(), argument, string_type));
1093 // This path is taken only when argument's type is String:NotNull. 1093 // This path is taken only when argument's type is String:NotNull.
1094 argument = cast_not_null(argument, false); 1094 argument = cast_not_null(argument, false);
1095 1095
1096 Node* no_ctrl = NULL; 1096 Node* no_ctrl = NULL;
1097 1097
1110 1110
1111 // Get length of argument 1111 // Get length of argument
1112 Node* argument_cnt = load_String_length(no_ctrl, argument); 1112 Node* argument_cnt = load_String_length(no_ctrl, argument);
1113 1113
1114 // Check for receiver count != argument count 1114 // Check for receiver count != argument count
1115 Node* cmp = _gvn.transform( new(C, 3) CmpINode(receiver_cnt, argument_cnt) ); 1115 Node* cmp = _gvn.transform( new(C) CmpINode(receiver_cnt, argument_cnt) );
1116 Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, BoolTest::ne) ); 1116 Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::ne) );
1117 Node* if_ne = generate_slow_guard(bol, NULL); 1117 Node* if_ne = generate_slow_guard(bol, NULL);
1118 if (if_ne != NULL) { 1118 if (if_ne != NULL) {
1119 phi->init_req(4, intcon(0)); 1119 phi->init_req(4, intcon(0));
1120 region->init_req(4, if_ne); 1120 region->init_req(4, if_ne);
1121 } 1121 }
1146 _sp += 2; 1146 _sp += 2;
1147 Node *argument2 = pop(); 1147 Node *argument2 = pop();
1148 Node *argument1 = pop(); 1148 Node *argument1 = pop();
1149 1149
1150 Node* equals = 1150 Node* equals =
1151 _gvn.transform(new (C, 4) AryEqNode(control(), memory(TypeAryPtr::CHARS), 1151 _gvn.transform(new (C) AryEqNode(control(), memory(TypeAryPtr::CHARS),
1152 argument1, argument2) ); 1152 argument1, argument2) );
1153 push(equals); 1153 push(equals);
1154 return true; 1154 return true;
1155 } 1155 }
1156 1156
1321 1321
1322 ciInstanceKlass* str_klass = env()->String_klass(); 1322 ciInstanceKlass* str_klass = env()->String_klass();
1323 const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(str_klass); 1323 const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(str_klass);
1324 1324
1325 // Make the merge point 1325 // Make the merge point
1326 RegionNode* result_rgn = new (C, 4) RegionNode(4); 1326 RegionNode* result_rgn = new (C) RegionNode(4);
1327 Node* result_phi = new (C, 4) PhiNode(result_rgn, TypeInt::INT); 1327 Node* result_phi = new (C) PhiNode(result_rgn, TypeInt::INT);
1328 Node* no_ctrl = NULL; 1328 Node* no_ctrl = NULL;
1329 1329
1330 // Get start addr of source string 1330 // Get start addr of source string
1331 Node* source = load_String_value(no_ctrl, receiver); 1331 Node* source = load_String_value(no_ctrl, receiver);
1332 Node* source_offset = load_String_offset(no_ctrl, receiver); 1332 Node* source_offset = load_String_offset(no_ctrl, receiver);
1342 1342
1343 // Get length of source string 1343 // Get length of source string
1344 Node* substr_cnt = load_String_length(no_ctrl, argument); 1344 Node* substr_cnt = load_String_length(no_ctrl, argument);
1345 1345
1346 // Check for substr count > string count 1346 // Check for substr count > string count
1347 Node* cmp = _gvn.transform( new(C, 3) CmpINode(substr_cnt, source_cnt) ); 1347 Node* cmp = _gvn.transform( new(C) CmpINode(substr_cnt, source_cnt) );
1348 Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, BoolTest::gt) ); 1348 Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::gt) );
1349 Node* if_gt = generate_slow_guard(bol, NULL); 1349 Node* if_gt = generate_slow_guard(bol, NULL);
1350 if (if_gt != NULL) { 1350 if (if_gt != NULL) {
1351 result_phi->init_req(2, intcon(-1)); 1351 result_phi->init_req(2, intcon(-1));
1352 result_rgn->init_req(2, if_gt); 1352 result_rgn->init_req(2, if_gt);
1353 } 1353 }
1354 1354
1355 if (!stopped()) { 1355 if (!stopped()) {
1356 // Check for substr count == 0 1356 // Check for substr count == 0
1357 cmp = _gvn.transform( new(C, 3) CmpINode(substr_cnt, intcon(0)) ); 1357 cmp = _gvn.transform( new(C) CmpINode(substr_cnt, intcon(0)) );
1358 bol = _gvn.transform( new(C, 2) BoolNode(cmp, BoolTest::eq) ); 1358 bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::eq) );
1359 Node* if_zero = generate_slow_guard(bol, NULL); 1359 Node* if_zero = generate_slow_guard(bol, NULL);
1360 if (if_zero != NULL) { 1360 if (if_zero != NULL) {
1361 result_phi->init_req(3, intcon(0)); 1361 result_phi->init_req(3, intcon(0));
1362 result_rgn->init_req(3, if_zero); 1362 result_rgn->init_req(3, if_zero);
1363 } 1363 }
1454 // Pop a double argument to a math function from the stack 1454 // Pop a double argument to a math function from the stack
1455 // rounding it if necessary. 1455 // rounding it if necessary.
1456 Node * LibraryCallKit::pop_math_arg() { 1456 Node * LibraryCallKit::pop_math_arg() {
1457 Node *arg = pop_pair(); 1457 Node *arg = pop_pair();
1458 if( Matcher::strict_fp_requires_explicit_rounding && UseSSE<=1 ) 1458 if( Matcher::strict_fp_requires_explicit_rounding && UseSSE<=1 )
1459 arg = _gvn.transform( new (C, 2) RoundDoubleNode(0, arg) ); 1459 arg = _gvn.transform( new (C) RoundDoubleNode(0, arg) );
1460 return arg; 1460 return arg;
1461 } 1461 }
1462 1462
1463 //------------------------------inline_trig---------------------------------- 1463 //------------------------------inline_trig----------------------------------
1464 // Inline sin/cos/tan instructions, if possible. If rounding is required, do 1464 // Inline sin/cos/tan instructions, if possible. If rounding is required, do
1468 Node* arg = pop_math_arg(); 1468 Node* arg = pop_math_arg();
1469 Node* trig = NULL; 1469 Node* trig = NULL;
1470 1470
1471 switch (id) { 1471 switch (id) {
1472 case vmIntrinsics::_dsin: 1472 case vmIntrinsics::_dsin:
1473 trig = _gvn.transform((Node*)new (C, 2) SinDNode(arg)); 1473 trig = _gvn.transform((Node*)new (C) SinDNode(arg));
1474 break; 1474 break;
1475 case vmIntrinsics::_dcos: 1475 case vmIntrinsics::_dcos:
1476 trig = _gvn.transform((Node*)new (C, 2) CosDNode(arg)); 1476 trig = _gvn.transform((Node*)new (C) CosDNode(arg));
1477 break; 1477 break;
1478 case vmIntrinsics::_dtan: 1478 case vmIntrinsics::_dtan:
1479 trig = _gvn.transform((Node*)new (C, 2) TanDNode(arg)); 1479 trig = _gvn.transform((Node*)new (C) TanDNode(arg));
1480 break; 1480 break;
1481 default: 1481 default:
1482 assert(false, "bad intrinsic was passed in"); 1482 assert(false, "bad intrinsic was passed in");
1483 return false; 1483 return false;
1484 } 1484 }
1518 // requires a special machine instruction to load it. Instead we'll try 1518 // requires a special machine instruction to load it. Instead we'll try
1519 // the 'easy' case. If we really need the extra range +/- PI/2 we'll 1519 // the 'easy' case. If we really need the extra range +/- PI/2 we'll
1520 // probably do the math inside the SIN encoding. 1520 // probably do the math inside the SIN encoding.
1521 1521
1522 // Make the merge point 1522 // Make the merge point
1523 RegionNode *r = new (C, 3) RegionNode(3); 1523 RegionNode *r = new (C) RegionNode(3);
1524 Node *phi = new (C, 3) PhiNode(r,Type::DOUBLE); 1524 Node *phi = new (C) PhiNode(r,Type::DOUBLE);
1525 1525
1526 // Flatten arg so we need only 1 test 1526 // Flatten arg so we need only 1 test
1527 Node *abs = _gvn.transform(new (C, 2) AbsDNode(arg)); 1527 Node *abs = _gvn.transform(new (C) AbsDNode(arg));
1528 // Node for PI/4 constant 1528 // Node for PI/4 constant
1529 Node *pi4 = makecon(TypeD::make(pi_4)); 1529 Node *pi4 = makecon(TypeD::make(pi_4));
1530 // Check PI/4 : abs(arg) 1530 // Check PI/4 : abs(arg)
1531 Node *cmp = _gvn.transform(new (C, 3) CmpDNode(pi4,abs)); 1531 Node *cmp = _gvn.transform(new (C) CmpDNode(pi4,abs));
1532 // Check: If PI/4 < abs(arg) then go slow 1532 // Check: If PI/4 < abs(arg) then go slow
1533 Node *bol = _gvn.transform( new (C, 2) BoolNode( cmp, BoolTest::lt ) ); 1533 Node *bol = _gvn.transform( new (C) BoolNode( cmp, BoolTest::lt ) );
1534 // Branch either way 1534 // Branch either way
1535 IfNode *iff = create_and_xform_if(control(),bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); 1535 IfNode *iff = create_and_xform_if(control(),bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
1536 set_control(opt_iff(r,iff)); 1536 set_control(opt_iff(r,iff));
1537 1537
1538 // Set fast path result 1538 // Set fast path result
1556 CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 1556 CAST_FROM_FN_PTR(address, SharedRuntime::dtan),
1557 "Tan", NULL, arg, top()); 1557 "Tan", NULL, arg, top());
1558 break; 1558 break;
1559 } 1559 }
1560 assert(control()->in(0) == call, ""); 1560 assert(control()->in(0) == call, "");
1561 Node* slow_result = _gvn.transform(new (C, 1) ProjNode(call,TypeFunc::Parms)); 1561 Node* slow_result = _gvn.transform(new (C) ProjNode(call,TypeFunc::Parms));
1562 r->init_req(1,control()); 1562 r->init_req(1,control());
1563 phi->init_req(1,slow_result); 1563 phi->init_req(1,slow_result);
1564 1564
1565 // Post-merge 1565 // Post-merge
1566 set_control(_gvn.transform(r)); 1566 set_control(_gvn.transform(r));
1577 //------------------------------inline_sqrt------------------------------------- 1577 //------------------------------inline_sqrt-------------------------------------
1578 // Inline square root instruction, if possible. 1578 // Inline square root instruction, if possible.
1579 bool LibraryCallKit::inline_sqrt(vmIntrinsics::ID id) { 1579 bool LibraryCallKit::inline_sqrt(vmIntrinsics::ID id) {
1580 assert(id == vmIntrinsics::_dsqrt, "Not square root"); 1580 assert(id == vmIntrinsics::_dsqrt, "Not square root");
1581 _sp += arg_size(); // restore stack pointer 1581 _sp += arg_size(); // restore stack pointer
1582 push_pair(_gvn.transform(new (C, 2) SqrtDNode(0, pop_math_arg()))); 1582 push_pair(_gvn.transform(new (C) SqrtDNode(0, pop_math_arg())));
1583 return true; 1583 return true;
1584 } 1584 }
1585 1585
1586 //------------------------------inline_abs------------------------------------- 1586 //------------------------------inline_abs-------------------------------------
1587 // Inline absolute value instruction, if possible. 1587 // Inline absolute value instruction, if possible.
1588 bool LibraryCallKit::inline_abs(vmIntrinsics::ID id) { 1588 bool LibraryCallKit::inline_abs(vmIntrinsics::ID id) {
1589 assert(id == vmIntrinsics::_dabs, "Not absolute value"); 1589 assert(id == vmIntrinsics::_dabs, "Not absolute value");
1590 _sp += arg_size(); // restore stack pointer 1590 _sp += arg_size(); // restore stack pointer
1591 push_pair(_gvn.transform(new (C, 2) AbsDNode(pop_math_arg()))); 1591 push_pair(_gvn.transform(new (C) AbsDNode(pop_math_arg())));
1592 return true; 1592 return true;
1593 } 1593 }
1594 1594
1595 void LibraryCallKit::finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName) { 1595 void LibraryCallKit::finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName) {
1596 //------------------- 1596 //-------------------
1597 //result=(result.isNaN())? funcAddr():result; 1597 //result=(result.isNaN())? funcAddr():result;
1598 // Check: If isNaN() by checking result!=result? then either trap 1598 // Check: If isNaN() by checking result!=result? then either trap
1599 // or go to runtime 1599 // or go to runtime
1600 Node* cmpisnan = _gvn.transform(new (C, 3) CmpDNode(result,result)); 1600 Node* cmpisnan = _gvn.transform(new (C) CmpDNode(result,result));
1601 // Build the boolean node 1601 // Build the boolean node
1602 Node* bolisnum = _gvn.transform( new (C, 2) BoolNode(cmpisnan, BoolTest::eq) ); 1602 Node* bolisnum = _gvn.transform( new (C) BoolNode(cmpisnan, BoolTest::eq) );
1603 1603
1604 if (!too_many_traps(Deoptimization::Reason_intrinsic)) { 1604 if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
1605 { 1605 {
1606 BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT); 1606 BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT);
1607 // End the current control-flow path 1607 // End the current control-flow path
1618 } else { 1618 } else {
1619 // If this inlining ever returned NaN in the past, we compile a call 1619 // If this inlining ever returned NaN in the past, we compile a call
1620 // to the runtime to properly handle corner cases 1620 // to the runtime to properly handle corner cases
1621 1621
1622 IfNode* iff = create_and_xform_if(control(), bolisnum, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); 1622 IfNode* iff = create_and_xform_if(control(), bolisnum, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
1623 Node* if_slow = _gvn.transform( new (C, 1) IfFalseNode(iff) ); 1623 Node* if_slow = _gvn.transform( new (C) IfFalseNode(iff) );
1624 Node* if_fast = _gvn.transform( new (C, 1) IfTrueNode(iff) ); 1624 Node* if_fast = _gvn.transform( new (C) IfTrueNode(iff) );
1625 1625
1626 if (!if_slow->is_top()) { 1626 if (!if_slow->is_top()) {
1627 RegionNode* result_region = new(C, 3) RegionNode(3); 1627 RegionNode* result_region = new(C) RegionNode(3);
1628 PhiNode* result_val = new (C, 3) PhiNode(result_region, Type::DOUBLE); 1628 PhiNode* result_val = new (C) PhiNode(result_region, Type::DOUBLE);
1629 1629
1630 result_region->init_req(1, if_fast); 1630 result_region->init_req(1, if_fast);
1631 result_val->init_req(1, result); 1631 result_val->init_req(1, result);
1632 1632
1633 set_control(if_slow); 1633 set_control(if_slow);
1634 1634
1635 const TypePtr* no_memory_effects = NULL; 1635 const TypePtr* no_memory_effects = NULL;
1636 Node* rt = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName, 1636 Node* rt = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
1637 no_memory_effects, 1637 no_memory_effects,
1638 x, top(), y, y ? top() : NULL); 1638 x, top(), y, y ? top() : NULL);
1639 Node* value = _gvn.transform(new (C, 1) ProjNode(rt, TypeFunc::Parms+0)); 1639 Node* value = _gvn.transform(new (C) ProjNode(rt, TypeFunc::Parms+0));
1640 #ifdef ASSERT 1640 #ifdef ASSERT
1641 Node* value_top = _gvn.transform(new (C, 1) ProjNode(rt, TypeFunc::Parms+1)); 1641 Node* value_top = _gvn.transform(new (C) ProjNode(rt, TypeFunc::Parms+1));
1642 assert(value_top == top(), "second value must be top"); 1642 assert(value_top == top(), "second value must be top");
1643 #endif 1643 #endif
1644 1644
1645 result_region->init_req(2, control()); 1645 result_region->init_req(2, control());
1646 result_val->init_req(2, value); 1646 result_val->init_req(2, value);
1657 bool LibraryCallKit::inline_exp(vmIntrinsics::ID id) { 1657 bool LibraryCallKit::inline_exp(vmIntrinsics::ID id) {
1658 assert(id == vmIntrinsics::_dexp, "Not exp"); 1658 assert(id == vmIntrinsics::_dexp, "Not exp");
1659 1659
1660 _sp += arg_size(); // restore stack pointer 1660 _sp += arg_size(); // restore stack pointer
1661 Node *x = pop_math_arg(); 1661 Node *x = pop_math_arg();
1662 Node *result = _gvn.transform(new (C, 2) ExpDNode(0,x)); 1662 Node *result = _gvn.transform(new (C) ExpDNode(0,x));
1663 1663
1664 finish_pow_exp(result, x, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP"); 1664 finish_pow_exp(result, x, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
1665 1665
1666 C->set_has_split_ifs(true); // Has chance for split-if optimization 1666 C->set_has_split_ifs(true); // Has chance for split-if optimization
1667 1667
1696 1696
1697 Node* result = NULL; 1697 Node* result = NULL;
1698 1698
1699 if (!too_many_traps(Deoptimization::Reason_intrinsic)) { 1699 if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
1700 // Short form: skip the fancy tests and just check for NaN result. 1700 // Short form: skip the fancy tests and just check for NaN result.
1701 result = _gvn.transform( new (C, 3) PowDNode(0, x, y) ); 1701 result = _gvn.transform( new (C) PowDNode(0, x, y) );
1702 } else { 1702 } else {
1703 // If this inlining ever returned NaN in the past, include all 1703 // If this inlining ever returned NaN in the past, include all
1704 // checks + call to the runtime. 1704 // checks + call to the runtime.
1705 1705
1706 // Set the merge point for If node with condition of (x <= 0.0) 1706 // Set the merge point for If node with condition of (x <= 0.0)
1707 // There are four possible paths to region node and phi node 1707 // There are four possible paths to region node and phi node
1708 RegionNode *r = new (C, 4) RegionNode(4); 1708 RegionNode *r = new (C) RegionNode(4);
1709 Node *phi = new (C, 4) PhiNode(r, Type::DOUBLE); 1709 Node *phi = new (C) PhiNode(r, Type::DOUBLE);
1710 1710
1711 // Build the first if node: if (x <= 0.0) 1711 // Build the first if node: if (x <= 0.0)
1712 // Node for 0 constant 1712 // Node for 0 constant
1713 Node *zeronode = makecon(TypeD::ZERO); 1713 Node *zeronode = makecon(TypeD::ZERO);
1714 // Check x:0 1714 // Check x:0
1715 Node *cmp = _gvn.transform(new (C, 3) CmpDNode(x, zeronode)); 1715 Node *cmp = _gvn.transform(new (C) CmpDNode(x, zeronode));
1716 // Check: If (x<=0) then go complex path 1716 // Check: If (x<=0) then go complex path
1717 Node *bol1 = _gvn.transform( new (C, 2) BoolNode( cmp, BoolTest::le ) ); 1717 Node *bol1 = _gvn.transform( new (C) BoolNode( cmp, BoolTest::le ) );
1718 // Branch either way 1718 // Branch either way
1719 IfNode *if1 = create_and_xform_if(control(),bol1, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN); 1719 IfNode *if1 = create_and_xform_if(control(),bol1, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1720 // Fast path taken; set region slot 3 1720 // Fast path taken; set region slot 3
1721 Node *fast_taken = _gvn.transform( new (C, 1) IfFalseNode(if1) ); 1721 Node *fast_taken = _gvn.transform( new (C) IfFalseNode(if1) );
1722 r->init_req(3,fast_taken); // Capture fast-control 1722 r->init_req(3,fast_taken); // Capture fast-control
1723 1723
1724 // Fast path not-taken, i.e. slow path 1724 // Fast path not-taken, i.e. slow path
1725 Node *complex_path = _gvn.transform( new (C, 1) IfTrueNode(if1) ); 1725 Node *complex_path = _gvn.transform( new (C) IfTrueNode(if1) );
1726 1726
1727 // Set fast path result 1727 // Set fast path result
1728 Node *fast_result = _gvn.transform( new (C, 3) PowDNode(0, x, y) ); 1728 Node *fast_result = _gvn.transform( new (C) PowDNode(0, x, y) );
1729 phi->init_req(3, fast_result); 1729 phi->init_req(3, fast_result);
1730 1730
1731 // Complex path 1731 // Complex path
1732 // Build the second if node (if y is long) 1732 // Build the second if node (if y is long)
1733 // Node for (long)y 1733 // Node for (long)y
1734 Node *longy = _gvn.transform( new (C, 2) ConvD2LNode(y)); 1734 Node *longy = _gvn.transform( new (C) ConvD2LNode(y));
1735 // Node for (double)((long) y) 1735 // Node for (double)((long) y)
1736 Node *doublelongy= _gvn.transform( new (C, 2) ConvL2DNode(longy)); 1736 Node *doublelongy= _gvn.transform( new (C) ConvL2DNode(longy));
1737 // Check (double)((long) y) : y 1737 // Check (double)((long) y) : y
1738 Node *cmplongy= _gvn.transform(new (C, 3) CmpDNode(doublelongy, y)); 1738 Node *cmplongy= _gvn.transform(new (C) CmpDNode(doublelongy, y));
1739 // Check if (y isn't long) then go to slow path 1739 // Check if (y isn't long) then go to slow path
1740 1740
1741 Node *bol2 = _gvn.transform( new (C, 2) BoolNode( cmplongy, BoolTest::ne ) ); 1741 Node *bol2 = _gvn.transform( new (C) BoolNode( cmplongy, BoolTest::ne ) );
1742 // Branch either way 1742 // Branch either way
1743 IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN); 1743 IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1744 Node* ylong_path = _gvn.transform( new (C, 1) IfFalseNode(if2)); 1744 Node* ylong_path = _gvn.transform( new (C) IfFalseNode(if2));
1745 1745
1746 Node *slow_path = _gvn.transform( new (C, 1) IfTrueNode(if2) ); 1746 Node *slow_path = _gvn.transform( new (C) IfTrueNode(if2) );
1747 1747
1748 // Calculate DPow(abs(x), y)*(1 & (long)y) 1748 // Calculate DPow(abs(x), y)*(1 & (long)y)
1749 // Node for constant 1 1749 // Node for constant 1
1750 Node *conone = longcon(1); 1750 Node *conone = longcon(1);
1751 // 1& (long)y 1751 // 1& (long)y
1752 Node *signnode= _gvn.transform( new (C, 3) AndLNode(conone, longy) ); 1752 Node *signnode= _gvn.transform( new (C) AndLNode(conone, longy) );
1753 1753
1754 // A huge number is always even. Detect a huge number by checking 1754 // A huge number is always even. Detect a huge number by checking
1755 // if y + 1 == y and set integer to be tested for parity to 0. 1755 // if y + 1 == y and set integer to be tested for parity to 0.
1756 // Required for corner case: 1756 // Required for corner case:
1757 // (long)9.223372036854776E18 = max_jlong 1757 // (long)9.223372036854776E18 = max_jlong
1758 // (double)(long)9.223372036854776E18 = 9.223372036854776E18 1758 // (double)(long)9.223372036854776E18 = 9.223372036854776E18
1759 // max_jlong is odd but 9.223372036854776E18 is even 1759 // max_jlong is odd but 9.223372036854776E18 is even
1760 Node* yplus1 = _gvn.transform( new (C, 3) AddDNode(y, makecon(TypeD::make(1)))); 1760 Node* yplus1 = _gvn.transform( new (C) AddDNode(y, makecon(TypeD::make(1))));
1761 Node *cmpyplus1= _gvn.transform(new (C, 3) CmpDNode(yplus1, y)); 1761 Node *cmpyplus1= _gvn.transform(new (C) CmpDNode(yplus1, y));
1762 Node *bolyplus1 = _gvn.transform( new (C, 2) BoolNode( cmpyplus1, BoolTest::eq ) ); 1762 Node *bolyplus1 = _gvn.transform( new (C) BoolNode( cmpyplus1, BoolTest::eq ) );
1763 Node* correctedsign = NULL; 1763 Node* correctedsign = NULL;
1764 if (ConditionalMoveLimit != 0) { 1764 if (ConditionalMoveLimit != 0) {
1765 correctedsign = _gvn.transform( CMoveNode::make(C, NULL, bolyplus1, signnode, longcon(0), TypeLong::LONG)); 1765 correctedsign = _gvn.transform( CMoveNode::make(C, NULL, bolyplus1, signnode, longcon(0), TypeLong::LONG));
1766 } else { 1766 } else {
1767 IfNode *ifyplus1 = create_and_xform_if(ylong_path,bolyplus1, PROB_FAIR, COUNT_UNKNOWN); 1767 IfNode *ifyplus1 = create_and_xform_if(ylong_path,bolyplus1, PROB_FAIR, COUNT_UNKNOWN);
1768 RegionNode *r = new (C, 3) RegionNode(3); 1768 RegionNode *r = new (C) RegionNode(3);
1769 Node *phi = new (C, 3) PhiNode(r, TypeLong::LONG); 1769 Node *phi = new (C) PhiNode(r, TypeLong::LONG);
1770 r->init_req(1, _gvn.transform( new (C, 1) IfFalseNode(ifyplus1))); 1770 r->init_req(1, _gvn.transform( new (C) IfFalseNode(ifyplus1)));
1771 r->init_req(2, _gvn.transform( new (C, 1) IfTrueNode(ifyplus1))); 1771 r->init_req(2, _gvn.transform( new (C) IfTrueNode(ifyplus1)));
1772 phi->init_req(1, signnode); 1772 phi->init_req(1, signnode);
1773 phi->init_req(2, longcon(0)); 1773 phi->init_req(2, longcon(0));
1774 correctedsign = _gvn.transform(phi); 1774 correctedsign = _gvn.transform(phi);
1775 ylong_path = _gvn.transform(r); 1775 ylong_path = _gvn.transform(r);
1776 record_for_igvn(r); 1776 record_for_igvn(r);
1777 } 1777 }
1778 1778
1779 // zero node 1779 // zero node
1780 Node *conzero = longcon(0); 1780 Node *conzero = longcon(0);
1781 // Check (1&(long)y)==0? 1781 // Check (1&(long)y)==0?
1782 Node *cmpeq1 = _gvn.transform(new (C, 3) CmpLNode(correctedsign, conzero)); 1782 Node *cmpeq1 = _gvn.transform(new (C) CmpLNode(correctedsign, conzero));
1783 // Check if (1&(long)y)!=0?, if so the result is negative 1783 // Check if (1&(long)y)!=0?, if so the result is negative
1784 Node *bol3 = _gvn.transform( new (C, 2) BoolNode( cmpeq1, BoolTest::ne ) ); 1784 Node *bol3 = _gvn.transform( new (C) BoolNode( cmpeq1, BoolTest::ne ) );
1785 // abs(x) 1785 // abs(x)
1786 Node *absx=_gvn.transform( new (C, 2) AbsDNode(x)); 1786 Node *absx=_gvn.transform( new (C) AbsDNode(x));
1787 // abs(x)^y 1787 // abs(x)^y
1788 Node *absxpowy = _gvn.transform( new (C, 3) PowDNode(0, absx, y) ); 1788 Node *absxpowy = _gvn.transform( new (C) PowDNode(0, absx, y) );
1789 // -abs(x)^y 1789 // -abs(x)^y
1790 Node *negabsxpowy = _gvn.transform(new (C, 2) NegDNode (absxpowy)); 1790 Node *negabsxpowy = _gvn.transform(new (C) NegDNode (absxpowy));
1791 // (1&(long)y)==1?-DPow(abs(x), y):DPow(abs(x), y) 1791 // (1&(long)y)==1?-DPow(abs(x), y):DPow(abs(x), y)
1792 Node *signresult = NULL; 1792 Node *signresult = NULL;
1793 if (ConditionalMoveLimit != 0) { 1793 if (ConditionalMoveLimit != 0) {
1794 signresult = _gvn.transform( CMoveNode::make(C, NULL, bol3, absxpowy, negabsxpowy, Type::DOUBLE)); 1794 signresult = _gvn.transform( CMoveNode::make(C, NULL, bol3, absxpowy, negabsxpowy, Type::DOUBLE));
1795 } else { 1795 } else {
1796 IfNode *ifyeven = create_and_xform_if(ylong_path,bol3, PROB_FAIR, COUNT_UNKNOWN); 1796 IfNode *ifyeven = create_and_xform_if(ylong_path,bol3, PROB_FAIR, COUNT_UNKNOWN);
1797 RegionNode *r = new (C, 3) RegionNode(3); 1797 RegionNode *r = new (C) RegionNode(3);
1798 Node *phi = new (C, 3) PhiNode(r, Type::DOUBLE); 1798 Node *phi = new (C) PhiNode(r, Type::DOUBLE);
1799 r->init_req(1, _gvn.transform( new (C, 1) IfFalseNode(ifyeven))); 1799 r->init_req(1, _gvn.transform( new (C) IfFalseNode(ifyeven)));
1800 r->init_req(2, _gvn.transform( new (C, 1) IfTrueNode(ifyeven))); 1800 r->init_req(2, _gvn.transform( new (C) IfTrueNode(ifyeven)));
1801 phi->init_req(1, absxpowy); 1801 phi->init_req(1, absxpowy);
1802 phi->init_req(2, negabsxpowy); 1802 phi->init_req(2, negabsxpowy);
1803 signresult = _gvn.transform(phi); 1803 signresult = _gvn.transform(phi);
1804 ylong_path = _gvn.transform(r); 1804 ylong_path = _gvn.transform(r);
1805 record_for_igvn(r); 1805 record_for_igvn(r);
1834 Node* arg = pop_math_arg(); 1834 Node* arg = pop_math_arg();
1835 Node* trans = NULL; 1835 Node* trans = NULL;
1836 1836
1837 switch (id) { 1837 switch (id) {
1838 case vmIntrinsics::_dlog: 1838 case vmIntrinsics::_dlog:
1839 trans = _gvn.transform((Node*)new (C, 2) LogDNode(arg)); 1839 trans = _gvn.transform((Node*)new (C) LogDNode(arg));
1840 break; 1840 break;
1841 case vmIntrinsics::_dlog10: 1841 case vmIntrinsics::_dlog10:
1842 trans = _gvn.transform((Node*)new (C, 2) Log10DNode(arg)); 1842 trans = _gvn.transform((Node*)new (C) Log10DNode(arg));
1843 break; 1843 break;
1844 default: 1844 default:
1845 assert(false, "bad intrinsic was passed in"); 1845 assert(false, "bad intrinsic was passed in");
1846 return false; 1846 return false;
1847 } 1847 }
1868 1868
1869 const TypePtr* no_memory_effects = NULL; 1869 const TypePtr* no_memory_effects = NULL;
1870 Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName, 1870 Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
1871 no_memory_effects, 1871 no_memory_effects,
1872 a, top(), b, b ? top() : NULL); 1872 a, top(), b, b ? top() : NULL);
1873 Node* value = _gvn.transform(new (C, 1) ProjNode(trig, TypeFunc::Parms+0)); 1873 Node* value = _gvn.transform(new (C) ProjNode(trig, TypeFunc::Parms+0));
1874 #ifdef ASSERT 1874 #ifdef ASSERT
1875 Node* value_top = _gvn.transform(new (C, 1) ProjNode(trig, TypeFunc::Parms+1)); 1875 Node* value_top = _gvn.transform(new (C) ProjNode(trig, TypeFunc::Parms+1));
1876 assert(value_top == top(), "second value must be top"); 1876 assert(value_top == top(), "second value must be top");
1877 #endif 1877 #endif
1878 1878
1879 push_pair(value); 1879 push_pair(value);
1880 return true; 1880 return true;
1961 // and similar uses of System.arraycopy. 1961 // and similar uses of System.arraycopy.
1962 // First, compute the normalized version of CmpI(x, y). 1962 // First, compute the normalized version of CmpI(x, y).
1963 int cmp_op = Op_CmpI; 1963 int cmp_op = Op_CmpI;
1964 Node* xkey = xvalue; 1964 Node* xkey = xvalue;
1965 Node* ykey = yvalue; 1965 Node* ykey = yvalue;
1966 Node* ideal_cmpxy = _gvn.transform( new(C, 3) CmpINode(xkey, ykey) ); 1966 Node* ideal_cmpxy = _gvn.transform( new(C) CmpINode(xkey, ykey) );
1967 if (ideal_cmpxy->is_Cmp()) { 1967 if (ideal_cmpxy->is_Cmp()) {
1968 // E.g., if we have CmpI(length - offset, count), 1968 // E.g., if we have CmpI(length - offset, count),
1969 // it might idealize to CmpI(length, count + offset) 1969 // it might idealize to CmpI(length, count + offset)
1970 cmp_op = ideal_cmpxy->Opcode(); 1970 cmp_op = ideal_cmpxy->Opcode();
1971 xkey = ideal_cmpxy->in(1); 1971 xkey = ideal_cmpxy->in(1);
2054 Node* answer_if_false = NULL; 2054 Node* answer_if_false = NULL;
2055 switch (best_btest) { 2055 switch (best_btest) {
2056 default: 2056 default:
2057 if (cmpxy == NULL) 2057 if (cmpxy == NULL)
2058 cmpxy = ideal_cmpxy; 2058 cmpxy = ideal_cmpxy;
2059 best_bol = _gvn.transform( new(C, 2) BoolNode(cmpxy, BoolTest::lt) ); 2059 best_bol = _gvn.transform( new(C) BoolNode(cmpxy, BoolTest::lt) );
2060 // and fall through: 2060 // and fall through:
2061 case BoolTest::lt: // x < y 2061 case BoolTest::lt: // x < y
2062 case BoolTest::le: // x <= y 2062 case BoolTest::le: // x <= y
2063 answer_if_true = (want_max ? yvalue : xvalue); 2063 answer_if_true = (want_max ? yvalue : xvalue);
2064 answer_if_false = (want_max ? xvalue : yvalue); 2064 answer_if_false = (want_max ? xvalue : yvalue);
2114 if (base_type == NULL) { 2114 if (base_type == NULL) {
2115 // Unknown type. 2115 // Unknown type.
2116 return Type::AnyPtr; 2116 return Type::AnyPtr;
2117 } else if (base_type == TypePtr::NULL_PTR) { 2117 } else if (base_type == TypePtr::NULL_PTR) {
2118 // Since this is a NULL+long form, we have to switch to a rawptr. 2118 // Since this is a NULL+long form, we have to switch to a rawptr.
2119 base = _gvn.transform( new (C, 2) CastX2PNode(offset) ); 2119 base = _gvn.transform( new (C) CastX2PNode(offset) );
2120 offset = MakeConX(0); 2120 offset = MakeConX(0);
2121 return Type::RawPtr; 2121 return Type::RawPtr;
2122 } else if (base_type->base() == Type::RawPtr) { 2122 } else if (base_type->base() == Type::RawPtr) {
2123 return Type::RawPtr; 2123 return Type::RawPtr;
2124 } else if (base_type->isa_oopptr()) { 2124 } else if (base_type->isa_oopptr()) {
2159 if (id == vmIntrinsics::_numberOfLeadingZeros_i && !Matcher::match_rule_supported(Op_CountLeadingZerosI)) return false; 2159 if (id == vmIntrinsics::_numberOfLeadingZeros_i && !Matcher::match_rule_supported(Op_CountLeadingZerosI)) return false;
2160 if (id == vmIntrinsics::_numberOfLeadingZeros_l && !Matcher::match_rule_supported(Op_CountLeadingZerosL)) return false; 2160 if (id == vmIntrinsics::_numberOfLeadingZeros_l && !Matcher::match_rule_supported(Op_CountLeadingZerosL)) return false;
2161 _sp += arg_size(); // restore stack pointer 2161 _sp += arg_size(); // restore stack pointer
2162 switch (id) { 2162 switch (id) {
2163 case vmIntrinsics::_numberOfLeadingZeros_i: 2163 case vmIntrinsics::_numberOfLeadingZeros_i:
2164 push(_gvn.transform(new (C, 2) CountLeadingZerosINode(pop()))); 2164 push(_gvn.transform(new (C) CountLeadingZerosINode(pop())));
2165 break; 2165 break;
2166 case vmIntrinsics::_numberOfLeadingZeros_l: 2166 case vmIntrinsics::_numberOfLeadingZeros_l:
2167 push(_gvn.transform(new (C, 2) CountLeadingZerosLNode(pop_pair()))); 2167 push(_gvn.transform(new (C) CountLeadingZerosLNode(pop_pair())));
2168 break; 2168 break;
2169 default: 2169 default:
2170 ShouldNotReachHere(); 2170 ShouldNotReachHere();
2171 } 2171 }
2172 return true; 2172 return true;
2180 if (id == vmIntrinsics::_numberOfTrailingZeros_i && !Matcher::match_rule_supported(Op_CountTrailingZerosI)) return false; 2180 if (id == vmIntrinsics::_numberOfTrailingZeros_i && !Matcher::match_rule_supported(Op_CountTrailingZerosI)) return false;
2181 if (id == vmIntrinsics::_numberOfTrailingZeros_l && !Matcher::match_rule_supported(Op_CountTrailingZerosL)) return false; 2181 if (id == vmIntrinsics::_numberOfTrailingZeros_l && !Matcher::match_rule_supported(Op_CountTrailingZerosL)) return false;
2182 _sp += arg_size(); // restore stack pointer 2182 _sp += arg_size(); // restore stack pointer
2183 switch (id) { 2183 switch (id) {
2184 case vmIntrinsics::_numberOfTrailingZeros_i: 2184 case vmIntrinsics::_numberOfTrailingZeros_i:
2185 push(_gvn.transform(new (C, 2) CountTrailingZerosINode(pop()))); 2185 push(_gvn.transform(new (C) CountTrailingZerosINode(pop())));
2186 break; 2186 break;
2187 case vmIntrinsics::_numberOfTrailingZeros_l: 2187 case vmIntrinsics::_numberOfTrailingZeros_l:
2188 push(_gvn.transform(new (C, 2) CountTrailingZerosLNode(pop_pair()))); 2188 push(_gvn.transform(new (C) CountTrailingZerosLNode(pop_pair())));
2189 break; 2189 break;
2190 default: 2190 default:
2191 ShouldNotReachHere(); 2191 ShouldNotReachHere();
2192 } 2192 }
2193 return true; 2193 return true;
2201 if (id == vmIntrinsics::_bitCount_i && !Matcher::has_match_rule(Op_PopCountI)) return false; 2201 if (id == vmIntrinsics::_bitCount_i && !Matcher::has_match_rule(Op_PopCountI)) return false;
2202 if (id == vmIntrinsics::_bitCount_l && !Matcher::has_match_rule(Op_PopCountL)) return false; 2202 if (id == vmIntrinsics::_bitCount_l && !Matcher::has_match_rule(Op_PopCountL)) return false;
2203 _sp += arg_size(); // restore stack pointer 2203 _sp += arg_size(); // restore stack pointer
2204 switch (id) { 2204 switch (id) {
2205 case vmIntrinsics::_bitCount_i: 2205 case vmIntrinsics::_bitCount_i:
2206 push(_gvn.transform(new (C, 2) PopCountINode(pop()))); 2206 push(_gvn.transform(new (C) PopCountINode(pop())));
2207 break; 2207 break;
2208 case vmIntrinsics::_bitCount_l: 2208 case vmIntrinsics::_bitCount_l:
2209 push(_gvn.transform(new (C, 2) PopCountLNode(pop_pair()))); 2209 push(_gvn.transform(new (C) PopCountLNode(pop_pair())));
2210 break; 2210 break;
2211 default: 2211 default:
2212 ShouldNotReachHere(); 2212 ShouldNotReachHere();
2213 } 2213 }
2214 return true; 2214 return true;
2228 if (id == vmIntrinsics::_reverseBytes_c && !Matcher::has_match_rule(Op_ReverseBytesUS)) return false; 2228 if (id == vmIntrinsics::_reverseBytes_c && !Matcher::has_match_rule(Op_ReverseBytesUS)) return false;
2229 if (id == vmIntrinsics::_reverseBytes_s && !Matcher::has_match_rule(Op_ReverseBytesS)) return false; 2229 if (id == vmIntrinsics::_reverseBytes_s && !Matcher::has_match_rule(Op_ReverseBytesS)) return false;
2230 _sp += arg_size(); // restore stack pointer 2230 _sp += arg_size(); // restore stack pointer
2231 switch (id) { 2231 switch (id) {
2232 case vmIntrinsics::_reverseBytes_i: 2232 case vmIntrinsics::_reverseBytes_i:
2233 push(_gvn.transform(new (C, 2) ReverseBytesINode(0, pop()))); 2233 push(_gvn.transform(new (C) ReverseBytesINode(0, pop())));
2234 break; 2234 break;
2235 case vmIntrinsics::_reverseBytes_l: 2235 case vmIntrinsics::_reverseBytes_l:
2236 push_pair(_gvn.transform(new (C, 2) ReverseBytesLNode(0, pop_pair()))); 2236 push_pair(_gvn.transform(new (C) ReverseBytesLNode(0, pop_pair())));
2237 break; 2237 break;
2238 case vmIntrinsics::_reverseBytes_c: 2238 case vmIntrinsics::_reverseBytes_c:
2239 push(_gvn.transform(new (C, 2) ReverseBytesUSNode(0, pop()))); 2239 push(_gvn.transform(new (C) ReverseBytesUSNode(0, pop())));
2240 break; 2240 break;
2241 case vmIntrinsics::_reverseBytes_s: 2241 case vmIntrinsics::_reverseBytes_s:
2242 push(_gvn.transform(new (C, 2) ReverseBytesSNode(0, pop()))); 2242 push(_gvn.transform(new (C) ReverseBytesSNode(0, pop())));
2243 break; 2243 break;
2244 default: 2244 default:
2245 ; 2245 ;
2246 } 2246 }
2247 return true; 2247 return true;
2582 } 2582 }
2583 push(p); 2583 push(p);
2584 break; 2584 break;
2585 case T_ADDRESS: 2585 case T_ADDRESS:
2586 // Cast to an int type. 2586 // Cast to an int type.
2587 p = _gvn.transform( new (C, 2) CastP2XNode(NULL,p) ); 2587 p = _gvn.transform( new (C) CastP2XNode(NULL,p) );
2588 p = ConvX2L(p); 2588 p = ConvX2L(p);
2589 push_pair(p); 2589 push_pair(p);
2590 break; 2590 break;
2591 case T_DOUBLE: 2591 case T_DOUBLE:
2592 case T_LONG: 2592 case T_LONG:
2601 val = dstore_rounding(val); 2601 val = dstore_rounding(val);
2602 break; 2602 break;
2603 case T_ADDRESS: 2603 case T_ADDRESS:
2604 // Repackage the long as a pointer. 2604 // Repackage the long as a pointer.
2605 val = ConvL2X(val); 2605 val = ConvL2X(val);
2606 val = _gvn.transform( new (C, 2) CastX2PNode(val) ); 2606 val = _gvn.transform( new (C) CastX2PNode(val) );
2607 break; 2607 break;
2608 } 2608 }
2609 2609
2610 if (type != T_OBJECT ) { 2610 if (type != T_OBJECT ) {
2611 (void) store_to_memory(control(), adr, val, type, adr_type, is_volatile); 2611 (void) store_to_memory(control(), adr, val, type, adr_type, is_volatile);
2723 } 2723 }
2724 2724
2725 // Generate the read or write prefetch 2725 // Generate the read or write prefetch
2726 Node *prefetch; 2726 Node *prefetch;
2727 if (is_store) { 2727 if (is_store) {
2728 prefetch = new (C, 3) PrefetchWriteNode(i_o(), adr); 2728 prefetch = new (C) PrefetchWriteNode(i_o(), adr);
2729 } else { 2729 } else {
2730 prefetch = new (C, 3) PrefetchReadNode(i_o(), adr); 2730 prefetch = new (C) PrefetchReadNode(i_o(), adr);
2731 } 2731 }
2732 prefetch->init_req(0, control()); 2732 prefetch->init_req(0, control());
2733 set_i_o(_gvn.transform(prefetch)); 2733 set_i_o(_gvn.transform(prefetch));
2734 2734
2735 return true; 2735 return true;
2844 // longs, and Object. Adding others should be straightforward. 2844 // longs, and Object. Adding others should be straightforward.
2845 Node* load_store; 2845 Node* load_store;
2846 switch(type) { 2846 switch(type) {
2847 case T_INT: 2847 case T_INT:
2848 if (kind == LS_xadd) { 2848 if (kind == LS_xadd) {
2849 load_store = _gvn.transform(new (C, 4) GetAndAddINode(control(), mem, adr, newval, adr_type)); 2849 load_store = _gvn.transform(new (C) GetAndAddINode(control(), mem, adr, newval, adr_type));
2850 } else if (kind == LS_xchg) { 2850 } else if (kind == LS_xchg) {
2851 load_store = _gvn.transform(new (C, 4) GetAndSetINode(control(), mem, adr, newval, adr_type)); 2851 load_store = _gvn.transform(new (C) GetAndSetINode(control(), mem, adr, newval, adr_type));
2852 } else if (kind == LS_cmpxchg) { 2852 } else if (kind == LS_cmpxchg) {
2853 load_store = _gvn.transform(new (C, 5) CompareAndSwapINode(control(), mem, adr, newval, oldval)); 2853 load_store = _gvn.transform(new (C) CompareAndSwapINode(control(), mem, adr, newval, oldval));
2854 } else { 2854 } else {
2855 ShouldNotReachHere(); 2855 ShouldNotReachHere();
2856 } 2856 }
2857 break; 2857 break;
2858 case T_LONG: 2858 case T_LONG:
2859 if (kind == LS_xadd) { 2859 if (kind == LS_xadd) {
2860 load_store = _gvn.transform(new (C, 4) GetAndAddLNode(control(), mem, adr, newval, adr_type)); 2860 load_store = _gvn.transform(new (C) GetAndAddLNode(control(), mem, adr, newval, adr_type));
2861 } else if (kind == LS_xchg) { 2861 } else if (kind == LS_xchg) {
2862 load_store = _gvn.transform(new (C, 4) GetAndSetLNode(control(), mem, adr, newval, adr_type)); 2862 load_store = _gvn.transform(new (C) GetAndSetLNode(control(), mem, adr, newval, adr_type));
2863 } else if (kind == LS_cmpxchg) { 2863 } else if (kind == LS_cmpxchg) {
2864 load_store = _gvn.transform(new (C, 5) CompareAndSwapLNode(control(), mem, adr, newval, oldval)); 2864 load_store = _gvn.transform(new (C) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2865 } else { 2865 } else {
2866 ShouldNotReachHere(); 2866 ShouldNotReachHere();
2867 } 2867 }
2868 break; 2868 break;
2869 case T_OBJECT: 2869 case T_OBJECT:
2878 control(), base, adr, alias_idx, newval, value_type->make_oopptr(), 2878 control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
2879 NULL /* pre_val*/, 2879 NULL /* pre_val*/,
2880 T_OBJECT); 2880 T_OBJECT);
2881 #ifdef _LP64 2881 #ifdef _LP64
2882 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 2882 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2883 Node *newval_enc = _gvn.transform(new (C, 2) EncodePNode(newval, newval->bottom_type()->make_narrowoop())); 2883 Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2884 if (kind == LS_xchg) { 2884 if (kind == LS_xchg) {
2885 load_store = _gvn.transform(new (C, 4) GetAndSetNNode(control(), mem, adr, 2885 load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr,
2886 newval_enc, adr_type, value_type->make_narrowoop())); 2886 newval_enc, adr_type, value_type->make_narrowoop()));
2887 } else { 2887 } else {
2888 assert(kind == LS_cmpxchg, "wrong LoadStore operation"); 2888 assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2889 Node *oldval_enc = _gvn.transform(new (C, 2) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop())); 2889 Node *oldval_enc = _gvn.transform(new (C) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2890 load_store = _gvn.transform(new (C, 5) CompareAndSwapNNode(control(), mem, adr, 2890 load_store = _gvn.transform(new (C) CompareAndSwapNNode(control(), mem, adr,
2891 newval_enc, oldval_enc)); 2891 newval_enc, oldval_enc));
2892 } 2892 }
2893 } else 2893 } else
2894 #endif 2894 #endif
2895 { 2895 {
2896 if (kind == LS_xchg) { 2896 if (kind == LS_xchg) {
2897 load_store = _gvn.transform(new (C, 4) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr())); 2897 load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
2898 } else { 2898 } else {
2899 assert(kind == LS_cmpxchg, "wrong LoadStore operation"); 2899 assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2900 load_store = _gvn.transform(new (C, 5) CompareAndSwapPNode(control(), mem, adr, newval, oldval)); 2900 load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
2901 } 2901 }
2902 } 2902 }
2903 post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true); 2903 post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2904 break; 2904 break;
2905 default: 2905 default:
2908 } 2908 }
2909 2909
2910 // SCMemProjNodes represent the memory state of a LoadStore. Their 2910 // SCMemProjNodes represent the memory state of a LoadStore. Their
2911 // main role is to prevent LoadStore nodes from being optimized away 2911 // main role is to prevent LoadStore nodes from being optimized away
2912 // when their results aren't used. 2912 // when their results aren't used.
2913 Node* proj = _gvn.transform( new (C, 1) SCMemProjNode(load_store)); 2913 Node* proj = _gvn.transform( new (C) SCMemProjNode(load_store));
2914 set_memory(proj, alias_idx); 2914 set_memory(proj, alias_idx);
2915 2915
2916 // Add the trailing membar surrounding the access 2916 // Add the trailing membar surrounding the access
2917 insert_mem_bar(Op_MemBarCPUOrder); 2917 insert_mem_bar(Op_MemBarCPUOrder);
2918 insert_mem_bar(Op_MemBarAcquire); 2918 insert_mem_bar(Op_MemBarAcquire);
2919 2919
2920 #ifdef _LP64 2920 #ifdef _LP64
2921 if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) { 2921 if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) {
2922 load_store = _gvn.transform(new (C, 2) DecodeNNode(load_store, load_store->bottom_type()->make_ptr())); 2922 load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->bottom_type()->make_ptr()));
2923 } 2923 }
2924 #endif 2924 #endif
2925 2925
2926 assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); 2926 assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2927 push_node(load_store->bottom_type()->basic_type(), load_store); 2927 push_node(load_store->bottom_type()->basic_type(), load_store);
3020 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset())); 3020 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3021 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler 3021 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3022 // can generate code to load it as unsigned byte. 3022 // can generate code to load it as unsigned byte.
3023 Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN); 3023 Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN);
3024 Node* bits = intcon(InstanceKlass::fully_initialized); 3024 Node* bits = intcon(InstanceKlass::fully_initialized);
3025 Node* test = _gvn.transform( new (C, 3) SubINode(inst, bits) ); 3025 Node* test = _gvn.transform( new (C) SubINode(inst, bits) );
3026 // The 'test' is non-zero if we need to take a slow path. 3026 // The 'test' is non-zero if we need to take a slow path.
3027 3027
3028 Node* obj = new_instance(kls, test); 3028 Node* obj = new_instance(kls, test);
3029 push(obj); 3029 push(obj);
3030 3030
3049 _sp -= nargs; 3049 _sp -= nargs;
3050 ByteSize offset = TRACE_ID_OFFSET; 3050 ByteSize offset = TRACE_ID_OFFSET;
3051 Node* insp = basic_plus_adr(kls, in_bytes(offset)); 3051 Node* insp = basic_plus_adr(kls, in_bytes(offset));
3052 Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG); 3052 Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG);
3053 Node* bits = longcon(~0x03l); // ignore bit 0 & 1 3053 Node* bits = longcon(~0x03l); // ignore bit 0 & 1
3054 Node* andl = _gvn.transform(new (C, 3) AndLNode(tvalue, bits)); 3054 Node* andl = _gvn.transform(new (C) AndLNode(tvalue, bits));
3055 Node* clsused = longcon(0x01l); // set the class bit 3055 Node* clsused = longcon(0x01l); // set the class bit
3056 Node* orl = _gvn.transform(new (C, 3) OrLNode(tvalue, clsused)); 3056 Node* orl = _gvn.transform(new (C) OrLNode(tvalue, clsused));
3057 3057
3058 const TypePtr *adr_type = _gvn.type(insp)->isa_ptr(); 3058 const TypePtr *adr_type = _gvn.type(insp)->isa_ptr();
3059 store_to_memory(control(), insp, orl, T_LONG, adr_type); 3059 store_to_memory(control(), insp, orl, T_LONG, adr_type);
3060 push_pair(andl); 3060 push_pair(andl);
3061 return true; 3061 return true;
3088 // these have the same type and signature 3088 // these have the same type and signature
3089 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) { 3089 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3090 const TypeFunc *tf = OptoRuntime::void_long_Type(); 3090 const TypeFunc *tf = OptoRuntime::void_long_Type();
3091 const TypePtr* no_memory_effects = NULL; 3091 const TypePtr* no_memory_effects = NULL;
3092 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects); 3092 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3093 Node* value = _gvn.transform(new (C, 1) ProjNode(time, TypeFunc::Parms+0)); 3093 Node* value = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms+0));
3094 #ifdef ASSERT 3094 #ifdef ASSERT
3095 Node* value_top = _gvn.transform(new (C, 1) ProjNode(time, TypeFunc::Parms + 1)); 3095 Node* value_top = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms + 1));
3096 assert(value_top == top(), "second value must be top"); 3096 assert(value_top == top(), "second value must be top");
3097 #endif 3097 #endif
3098 push_pair(value); 3098 push_pair(value);
3099 return true; 3099 return true;
3100 } 3100 }
3119 // However, if the receiver is not currentThread, we must call the VM, 3119 // However, if the receiver is not currentThread, we must call the VM,
3120 // because there must be some locking done around the operation. 3120 // because there must be some locking done around the operation.
3121 3121
3122 // We only go to the fast case code if we pass two guards. 3122 // We only go to the fast case code if we pass two guards.
3123 // Paths which do not pass are accumulated in the slow_region. 3123 // Paths which do not pass are accumulated in the slow_region.
3124 RegionNode* slow_region = new (C, 1) RegionNode(1); 3124 RegionNode* slow_region = new (C) RegionNode(1);
3125 record_for_igvn(slow_region); 3125 record_for_igvn(slow_region);
3126 RegionNode* result_rgn = new (C, 4) RegionNode(1+3); // fast1, fast2, slow 3126 RegionNode* result_rgn = new (C) RegionNode(1+3); // fast1, fast2, slow
3127 PhiNode* result_val = new (C, 4) PhiNode(result_rgn, TypeInt::BOOL); 3127 PhiNode* result_val = new (C) PhiNode(result_rgn, TypeInt::BOOL);
3128 enum { no_int_result_path = 1, 3128 enum { no_int_result_path = 1,
3129 no_clear_result_path = 2, 3129 no_clear_result_path = 2,
3130 slow_result_path = 3 3130 slow_result_path = 3
3131 }; 3131 };
3132 3132
3133 // (a) Receiving thread must be the current thread. 3133 // (a) Receiving thread must be the current thread.
3134 Node* rec_thr = argument(0); 3134 Node* rec_thr = argument(0);
3135 Node* tls_ptr = NULL; 3135 Node* tls_ptr = NULL;
3136 Node* cur_thr = generate_current_thread(tls_ptr); 3136 Node* cur_thr = generate_current_thread(tls_ptr);
3137 Node* cmp_thr = _gvn.transform( new (C, 3) CmpPNode(cur_thr, rec_thr) ); 3137 Node* cmp_thr = _gvn.transform( new (C) CmpPNode(cur_thr, rec_thr) );
3138 Node* bol_thr = _gvn.transform( new (C, 2) BoolNode(cmp_thr, BoolTest::ne) ); 3138 Node* bol_thr = _gvn.transform( new (C) BoolNode(cmp_thr, BoolTest::ne) );
3139 3139
3140 bool known_current_thread = (_gvn.type(bol_thr) == TypeInt::ZERO); 3140 bool known_current_thread = (_gvn.type(bol_thr) == TypeInt::ZERO);
3141 if (!known_current_thread) 3141 if (!known_current_thread)
3142 generate_slow_guard(bol_thr, slow_region); 3142 generate_slow_guard(bol_thr, slow_region);
3143 3143
3145 Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset())); 3145 Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
3146 Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS); 3146 Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS);
3147 p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset())); 3147 p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
3148 // Set the control input on the field _interrupted read to prevent it floating up. 3148 // Set the control input on the field _interrupted read to prevent it floating up.
3149 Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT); 3149 Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT);
3150 Node* cmp_bit = _gvn.transform( new (C, 3) CmpINode(int_bit, intcon(0)) ); 3150 Node* cmp_bit = _gvn.transform( new (C) CmpINode(int_bit, intcon(0)) );
3151 Node* bol_bit = _gvn.transform( new (C, 2) BoolNode(cmp_bit, BoolTest::ne) ); 3151 Node* bol_bit = _gvn.transform( new (C) BoolNode(cmp_bit, BoolTest::ne) );
3152 3152
3153 IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN); 3153 IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
3154 3154
3155 // First fast path: if (!TLS._interrupted) return false; 3155 // First fast path: if (!TLS._interrupted) return false;
3156 Node* false_bit = _gvn.transform( new (C, 1) IfFalseNode(iff_bit) ); 3156 Node* false_bit = _gvn.transform( new (C) IfFalseNode(iff_bit) );
3157 result_rgn->init_req(no_int_result_path, false_bit); 3157 result_rgn->init_req(no_int_result_path, false_bit);
3158 result_val->init_req(no_int_result_path, intcon(0)); 3158 result_val->init_req(no_int_result_path, intcon(0));
3159 3159
3160 // drop through to next case 3160 // drop through to next case
3161 set_control( _gvn.transform(new (C, 1) IfTrueNode(iff_bit)) ); 3161 set_control( _gvn.transform(new (C) IfTrueNode(iff_bit)) );
3162 3162
3163 // (c) Or, if interrupt bit is set and clear_int is false, use 2nd fast path. 3163 // (c) Or, if interrupt bit is set and clear_int is false, use 2nd fast path.
3164 Node* clr_arg = argument(1); 3164 Node* clr_arg = argument(1);
3165 Node* cmp_arg = _gvn.transform( new (C, 3) CmpINode(clr_arg, intcon(0)) ); 3165 Node* cmp_arg = _gvn.transform( new (C) CmpINode(clr_arg, intcon(0)) );
3166 Node* bol_arg = _gvn.transform( new (C, 2) BoolNode(cmp_arg, BoolTest::ne) ); 3166 Node* bol_arg = _gvn.transform( new (C) BoolNode(cmp_arg, BoolTest::ne) );
3167 IfNode* iff_arg = create_and_map_if(control(), bol_arg, PROB_FAIR, COUNT_UNKNOWN); 3167 IfNode* iff_arg = create_and_map_if(control(), bol_arg, PROB_FAIR, COUNT_UNKNOWN);
3168 3168
3169 // Second fast path: ... else if (!clear_int) return true; 3169 // Second fast path: ... else if (!clear_int) return true;
3170 Node* false_arg = _gvn.transform( new (C, 1) IfFalseNode(iff_arg) ); 3170 Node* false_arg = _gvn.transform( new (C) IfFalseNode(iff_arg) );
3171 result_rgn->init_req(no_clear_result_path, false_arg); 3171 result_rgn->init_req(no_clear_result_path, false_arg);
3172 result_val->init_req(no_clear_result_path, intcon(1)); 3172 result_val->init_req(no_clear_result_path, intcon(1));
3173 3173
3174 // drop through to next case 3174 // drop through to next case
3175 set_control( _gvn.transform(new (C, 1) IfTrueNode(iff_arg)) ); 3175 set_control( _gvn.transform(new (C) IfTrueNode(iff_arg)) );
3176 3176
3177 // (d) Otherwise, go to the slow path. 3177 // (d) Otherwise, go to the slow path.
3178 slow_region->add_req(control()); 3178 slow_region->add_req(control());
3179 set_control( _gvn.transform(slow_region) ); 3179 set_control( _gvn.transform(slow_region) );
3180 3180
3258 // Like generate_guard, adds a new path onto the region. 3258 // Like generate_guard, adds a new path onto the region.
3259 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset())); 3259 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3260 Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT); 3260 Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT);
3261 Node* mask = intcon(modifier_mask); 3261 Node* mask = intcon(modifier_mask);
3262 Node* bits = intcon(modifier_bits); 3262 Node* bits = intcon(modifier_bits);
3263 Node* mbit = _gvn.transform( new (C, 3) AndINode(mods, mask) ); 3263 Node* mbit = _gvn.transform( new (C) AndINode(mods, mask) );
3264 Node* cmp = _gvn.transform( new (C, 3) CmpINode(mbit, bits) ); 3264 Node* cmp = _gvn.transform( new (C) CmpINode(mbit, bits) );
3265 Node* bol = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ne) ); 3265 Node* bol = _gvn.transform( new (C) BoolNode(cmp, BoolTest::ne) );
3266 return generate_fair_guard(bol, region); 3266 return generate_fair_guard(bol, region);
3267 } 3267 }
3268 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) { 3268 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3269 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region); 3269 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3270 } 3270 }
3333 } 3333 }
3334 } 3334 }
3335 #endif 3335 #endif
3336 3336
3337 // Null-check the mirror, and the mirror's klass ptr (in case it is a primitive). 3337 // Null-check the mirror, and the mirror's klass ptr (in case it is a primitive).
3338 RegionNode* region = new (C, PATH_LIMIT) RegionNode(PATH_LIMIT); 3338 RegionNode* region = new (C) RegionNode(PATH_LIMIT);
3339 record_for_igvn(region); 3339 record_for_igvn(region);
3340 PhiNode* phi = new (C, PATH_LIMIT) PhiNode(region, return_type); 3340 PhiNode* phi = new (C) PhiNode(region, return_type);
3341 3341
3342 // The mirror will never be null of Reflection.getClassAccessFlags, however 3342 // The mirror will never be null of Reflection.getClassAccessFlags, however
3343 // it may be null for Class.isInstance or Class.getModifiers. Throw a NPE 3343 // it may be null for Class.isInstance or Class.getModifiers. Throw a NPE
3344 // if it is. See bug 4774291. 3344 // if it is. See bug 4774291.
3345 3345
3483 _ref_subtype_path, // {N,N} & subtype check wins => true 3483 _ref_subtype_path, // {N,N} & subtype check wins => true
3484 _both_ref_path, // {N,N} & subtype check loses => false 3484 _both_ref_path, // {N,N} & subtype check loses => false
3485 PATH_LIMIT 3485 PATH_LIMIT
3486 }; 3486 };
3487 3487
3488 RegionNode* region = new (C, PATH_LIMIT) RegionNode(PATH_LIMIT); 3488 RegionNode* region = new (C) RegionNode(PATH_LIMIT);
3489 Node* phi = new (C, PATH_LIMIT) PhiNode(region, TypeInt::BOOL); 3489 Node* phi = new (C) PhiNode(region, TypeInt::BOOL);
3490 record_for_igvn(region); 3490 record_for_igvn(region);
3491 3491
3492 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads 3492 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
3493 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL; 3493 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3494 int class_klass_offset = java_lang_Class::klass_offset_in_bytes(); 3494 int class_klass_offset = java_lang_Class::klass_offset_in_bytes();
3535 // we must return true when they are identical primitives. 3535 // we must return true when they are identical primitives.
3536 // It is convenient to test this after the first null klass check. 3536 // It is convenient to test this after the first null klass check.
3537 set_control(region->in(_prim_0_path)); // go back to first null check 3537 set_control(region->in(_prim_0_path)); // go back to first null check
3538 if (!stopped()) { 3538 if (!stopped()) {
3539 // Since superc is primitive, make a guard for the superc==subc case. 3539 // Since superc is primitive, make a guard for the superc==subc case.
3540 Node* cmp_eq = _gvn.transform( new (C, 3) CmpPNode(args[0], args[1]) ); 3540 Node* cmp_eq = _gvn.transform( new (C) CmpPNode(args[0], args[1]) );
3541 Node* bol_eq = _gvn.transform( new (C, 2) BoolNode(cmp_eq, BoolTest::eq) ); 3541 Node* bol_eq = _gvn.transform( new (C) BoolNode(cmp_eq, BoolTest::eq) );
3542 generate_guard(bol_eq, region, PROB_FAIR); 3542 generate_guard(bol_eq, region, PROB_FAIR);
3543 if (region->req() == PATH_LIMIT+1) { 3543 if (region->req() == PATH_LIMIT+1) {
3544 // A guard was added. If the added guard is taken, superc==subc. 3544 // A guard was added. If the added guard is taken, superc==subc.
3545 region->swap_edges(PATH_LIMIT, _prim_same_path); 3545 region->swap_edges(PATH_LIMIT, _prim_same_path);
3546 region->del_req(PATH_LIMIT); 3546 region->del_req(PATH_LIMIT);
3602 // Now test the correct condition. 3602 // Now test the correct condition.
3603 jint nval = (obj_array 3603 jint nval = (obj_array
3604 ? ((jint)Klass::_lh_array_tag_type_value 3604 ? ((jint)Klass::_lh_array_tag_type_value
3605 << Klass::_lh_array_tag_shift) 3605 << Klass::_lh_array_tag_shift)
3606 : Klass::_lh_neutral_value); 3606 : Klass::_lh_neutral_value);
3607 Node* cmp = _gvn.transform( new(C, 3) CmpINode(layout_val, intcon(nval)) ); 3607 Node* cmp = _gvn.transform( new(C) CmpINode(layout_val, intcon(nval)) );
3608 BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array 3608 BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array
3609 // invert the test if we are looking for a non-array 3609 // invert the test if we are looking for a non-array
3610 if (not_array) btest = BoolTest(btest).negate(); 3610 if (not_array) btest = BoolTest(btest).negate();
3611 Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, btest) ); 3611 Node* bol = _gvn.transform( new(C) BoolNode(cmp, btest) );
3612 return generate_fair_guard(bol, region); 3612 return generate_fair_guard(bol, region);
3613 } 3613 }
3614 3614
3615 3615
3616 //-----------------------inline_native_newArray-------------------------- 3616 //-----------------------inline_native_newArray--------------------------
3624 _sp -= nargs; 3624 _sp -= nargs;
3625 // If mirror or obj is dead, only null-path is taken. 3625 // If mirror or obj is dead, only null-path is taken.
3626 if (stopped()) return true; 3626 if (stopped()) return true;
3627 3627
3628 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT }; 3628 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3629 RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT); 3629 RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT);
3630 PhiNode* result_val = new(C, PATH_LIMIT) PhiNode(result_reg, 3630 PhiNode* result_val = new(C) PhiNode(result_reg,
3631 TypeInstPtr::NOTNULL); 3631 TypeInstPtr::NOTNULL);
3632 PhiNode* result_io = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO); 3632 PhiNode* result_io = new(C) PhiNode(result_reg, Type::ABIO);
3633 PhiNode* result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY, 3633 PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY,
3634 TypePtr::BOTTOM); 3634 TypePtr::BOTTOM);
3635 3635
3636 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check); 3636 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3637 Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null, 3637 Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
3638 nargs, 3638 nargs,
3639 result_reg, _slow_path); 3639 result_reg, _slow_path);
3744 3744
3745 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, 0, 3745 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, 0,
3746 NULL, 0); 3746 NULL, 0);
3747 klass_node = do_null_check(klass_node, T_OBJECT); 3747 klass_node = do_null_check(klass_node, T_OBJECT);
3748 3748
3749 RegionNode* bailout = new (C, 1) RegionNode(1); 3749 RegionNode* bailout = new (C) RegionNode(1);
3750 record_for_igvn(bailout); 3750 record_for_igvn(bailout);
3751 3751
3752 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc. 3752 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
3753 // Bail out if that is so. 3753 // Bail out if that is so.
3754 Node* not_objArray = generate_non_objArray_guard(klass_node, bailout); 3754 Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
3755 if (not_objArray != NULL) { 3755 if (not_objArray != NULL) {
3756 // Improve the klass node's type from the new optimistic assumption: 3756 // Improve the klass node's type from the new optimistic assumption:
3757 ciKlass* ak = ciArrayKlass::make(env()->Object_klass()); 3757 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
3758 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/); 3758 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
3759 Node* cast = new (C, 2) CastPPNode(klass_node, akls); 3759 Node* cast = new (C) CastPPNode(klass_node, akls);
3760 cast->init_req(0, control()); 3760 cast->init_req(0, control());
3761 klass_node = _gvn.transform(cast); 3761 klass_node = _gvn.transform(cast);
3762 } 3762 }
3763 3763
3764 // Bail out if either start or end is negative. 3764 // Bail out if either start or end is negative.
3765 generate_negative_guard(start, bailout, &start); 3765 generate_negative_guard(start, bailout, &start);
3766 generate_negative_guard(end, bailout, &end); 3766 generate_negative_guard(end, bailout, &end);
3767 3767
3768 Node* length = end; 3768 Node* length = end;
3769 if (_gvn.type(start) != TypeInt::ZERO) { 3769 if (_gvn.type(start) != TypeInt::ZERO) {
3770 length = _gvn.transform( new (C, 3) SubINode(end, start) ); 3770 length = _gvn.transform( new (C) SubINode(end, start) );
3771 } 3771 }
3772 3772
3773 // Bail out if length is negative. 3773 // Bail out if length is negative.
3774 // Without this the new_array would throw 3774 // Without this the new_array would throw
3775 // NegativeArraySizeException but IllegalArgumentException is what 3775 // NegativeArraySizeException but IllegalArgumentException is what
3785 3785
3786 if (!stopped()) { 3786 if (!stopped()) {
3787 3787
3788 // How many elements will we copy from the original? 3788 // How many elements will we copy from the original?
3789 // The answer is MinI(orig_length - start, length). 3789 // The answer is MinI(orig_length - start, length).
3790 Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) ); 3790 Node* orig_tail = _gvn.transform( new(C) SubINode(orig_length, start) );
3791 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length); 3791 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3792 3792
3793 newcopy = new_array(klass_node, length, 0); 3793 newcopy = new_array(klass_node, length, 0);
3794 3794
3795 // Generate a direct call to the right arraycopy function(s). 3795 // Generate a direct call to the right arraycopy function(s).
3832 3832
3833 // Compare the target method with the expected method (e.g., Object.hashCode). 3833 // Compare the target method with the expected method (e.g., Object.hashCode).
3834 const TypePtr* native_call_addr = TypeMetadataPtr::make(method); 3834 const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
3835 3835
3836 Node* native_call = makecon(native_call_addr); 3836 Node* native_call = makecon(native_call_addr);
3837 Node* chk_native = _gvn.transform( new(C, 3) CmpPNode(target_call, native_call) ); 3837 Node* chk_native = _gvn.transform( new(C) CmpPNode(target_call, native_call) );
3838 Node* test_native = _gvn.transform( new(C, 2) BoolNode(chk_native, BoolTest::ne) ); 3838 Node* test_native = _gvn.transform( new(C) BoolNode(chk_native, BoolTest::ne) );
3839 3839
3840 return generate_slow_guard(test_native, slow_region); 3840 return generate_slow_guard(test_native, slow_region);
3841 } 3841 }
3842 3842
3843 //-----------------------generate_method_call---------------------------- 3843 //-----------------------generate_method_call----------------------------
3855 ciMethod* method = callee(); 3855 ciMethod* method = callee();
3856 // ensure the JVMS we have will be correct for this call 3856 // ensure the JVMS we have will be correct for this call
3857 guarantee(method_id == method->intrinsic_id(), "must match"); 3857 guarantee(method_id == method->intrinsic_id(), "must match");
3858 3858
3859 const TypeFunc* tf = TypeFunc::make(method); 3859 const TypeFunc* tf = TypeFunc::make(method);
3860 int tfdc = tf->domain()->cnt();
3861 CallJavaNode* slow_call; 3860 CallJavaNode* slow_call;
3862 if (is_static) { 3861 if (is_static) {
3863 assert(!is_virtual, ""); 3862 assert(!is_virtual, "");
3864 slow_call = new(C, tfdc) CallStaticJavaNode(tf, 3863 slow_call = new(C) CallStaticJavaNode(tf,
3865 SharedRuntime::get_resolve_static_call_stub(), 3864 SharedRuntime::get_resolve_static_call_stub(),
3866 method, bci()); 3865 method, bci());
3867 } else if (is_virtual) { 3866 } else if (is_virtual) {
3868 null_check_receiver(method); 3867 null_check_receiver(method);
3869 int vtable_index = Method::invalid_vtable_index; 3868 int vtable_index = Method::invalid_vtable_index;
3870 if (UseInlineCaches) { 3869 if (UseInlineCaches) {
3871 // Suppress the vtable call 3870 // Suppress the vtable call
3873 // hashCode and clone are not a miranda methods, 3872 // hashCode and clone are not a miranda methods,
3874 // so the vtable index is fixed. 3873 // so the vtable index is fixed.
3875 // No need to use the linkResolver to get it. 3874 // No need to use the linkResolver to get it.
3876 vtable_index = method->vtable_index(); 3875 vtable_index = method->vtable_index();
3877 } 3876 }
3878 slow_call = new(C, tfdc) CallDynamicJavaNode(tf, 3877 slow_call = new(C) CallDynamicJavaNode(tf,
3879 SharedRuntime::get_resolve_virtual_call_stub(), 3878 SharedRuntime::get_resolve_virtual_call_stub(),
3880 method, vtable_index, bci()); 3879 method, vtable_index, bci());
3881 } else { // neither virtual nor static: opt_virtual 3880 } else { // neither virtual nor static: opt_virtual
3882 null_check_receiver(method); 3881 null_check_receiver(method);
3883 slow_call = new(C, tfdc) CallStaticJavaNode(tf, 3882 slow_call = new(C) CallStaticJavaNode(tf,
3884 SharedRuntime::get_resolve_opt_virtual_call_stub(), 3883 SharedRuntime::get_resolve_opt_virtual_call_stub(),
3885 method, bci()); 3884 method, bci());
3886 slow_call->set_optimized_virtual(true); 3885 slow_call->set_optimized_virtual(true);
3887 } 3886 }
3888 set_arguments_for_java_call(slow_call); 3887 set_arguments_for_java_call(slow_call);
3897 assert(is_static == callee()->is_static(), "correct intrinsic selection"); 3896 assert(is_static == callee()->is_static(), "correct intrinsic selection");
3898 assert(!(is_virtual && is_static), "either virtual, special, or static"); 3897 assert(!(is_virtual && is_static), "either virtual, special, or static");
3899 3898
3900 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT }; 3899 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
3901 3900
3902 RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT); 3901 RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT);
3903 PhiNode* result_val = new(C, PATH_LIMIT) PhiNode(result_reg, 3902 PhiNode* result_val = new(C) PhiNode(result_reg,
3904 TypeInt::INT); 3903 TypeInt::INT);
3905 PhiNode* result_io = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO); 3904 PhiNode* result_io = new(C) PhiNode(result_reg, Type::ABIO);
3906 PhiNode* result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY, 3905 PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY,
3907 TypePtr::BOTTOM); 3906 TypePtr::BOTTOM);
3908 Node* obj = NULL; 3907 Node* obj = NULL;
3909 if (!is_static) { 3908 if (!is_static) {
3910 // Check for hashing null object 3909 // Check for hashing null object
3911 obj = null_check_receiver(callee()); 3910 obj = null_check_receiver(callee());
3912 if (stopped()) return true; // unconditionally null 3911 if (stopped()) return true; // unconditionally null
3936 // This call may be virtual (invokevirtual) or bound (invokespecial). 3935 // This call may be virtual (invokevirtual) or bound (invokespecial).
3937 // For each case we generate slightly different code. 3936 // For each case we generate slightly different code.
3938 3937
3939 // We only go to the fast case code if we pass a number of guards. The 3938 // We only go to the fast case code if we pass a number of guards. The
3940 // paths which do not pass are accumulated in the slow_region. 3939 // paths which do not pass are accumulated in the slow_region.
3941 RegionNode* slow_region = new (C, 1) RegionNode(1); 3940 RegionNode* slow_region = new (C) RegionNode(1);
3942 record_for_igvn(slow_region); 3941 record_for_igvn(slow_region);
3943 3942
3944 // If this is a virtual call, we generate a funny guard. We pull out 3943 // If this is a virtual call, we generate a funny guard. We pull out
3945 // the vtable entry corresponding to hashCode() from the target object. 3944 // the vtable entry corresponding to hashCode() from the target object.
3946 // If the target method which we are calling happens to be the native 3945 // If the target method which we are calling happens to be the native
3955 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); 3954 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
3956 Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type()); 3955 Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type());
3957 3956
3958 // Test the header to see if it is unlocked. 3957 // Test the header to see if it is unlocked.
3959 Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place); 3958 Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
3960 Node *lmasked_header = _gvn.transform( new (C, 3) AndXNode(header, lock_mask) ); 3959 Node *lmasked_header = _gvn.transform( new (C) AndXNode(header, lock_mask) );
3961 Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value); 3960 Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value);
3962 Node *chk_unlocked = _gvn.transform( new (C, 3) CmpXNode( lmasked_header, unlocked_val)); 3961 Node *chk_unlocked = _gvn.transform( new (C) CmpXNode( lmasked_header, unlocked_val));
3963 Node *test_unlocked = _gvn.transform( new (C, 2) BoolNode( chk_unlocked, BoolTest::ne) ); 3962 Node *test_unlocked = _gvn.transform( new (C) BoolNode( chk_unlocked, BoolTest::ne) );
3964 3963
3965 generate_slow_guard(test_unlocked, slow_region); 3964 generate_slow_guard(test_unlocked, slow_region);
3966 3965
3967 // Get the hash value and check to see that it has been properly assigned. 3966 // Get the hash value and check to see that it has been properly assigned.
3968 // We depend on hash_mask being at most 32 bits and avoid the use of 3967 // We depend on hash_mask being at most 32 bits and avoid the use of
3969 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit 3968 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
3970 // vm: see markOop.hpp. 3969 // vm: see markOop.hpp.
3971 Node *hash_mask = _gvn.intcon(markOopDesc::hash_mask); 3970 Node *hash_mask = _gvn.intcon(markOopDesc::hash_mask);
3972 Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift); 3971 Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift);
3973 Node *hshifted_header= _gvn.transform( new (C, 3) URShiftXNode(header, hash_shift) ); 3972 Node *hshifted_header= _gvn.transform( new (C) URShiftXNode(header, hash_shift) );
3974 // This hack lets the hash bits live anywhere in the mark object now, as long 3973 // This hack lets the hash bits live anywhere in the mark object now, as long
3975 // as the shift drops the relevant bits into the low 32 bits. Note that 3974 // as the shift drops the relevant bits into the low 32 bits. Note that
3976 // Java spec says that HashCode is an int so there's no point in capturing 3975 // Java spec says that HashCode is an int so there's no point in capturing
3977 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build). 3976 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
3978 hshifted_header = ConvX2I(hshifted_header); 3977 hshifted_header = ConvX2I(hshifted_header);
3979 Node *hash_val = _gvn.transform( new (C, 3) AndINode(hshifted_header, hash_mask) ); 3978 Node *hash_val = _gvn.transform( new (C) AndINode(hshifted_header, hash_mask) );
3980 3979
3981 Node *no_hash_val = _gvn.intcon(markOopDesc::no_hash); 3980 Node *no_hash_val = _gvn.intcon(markOopDesc::no_hash);
3982 Node *chk_assigned = _gvn.transform( new (C, 3) CmpINode( hash_val, no_hash_val)); 3981 Node *chk_assigned = _gvn.transform( new (C) CmpINode( hash_val, no_hash_val));
3983 Node *test_assigned = _gvn.transform( new (C, 2) BoolNode( chk_assigned, BoolTest::eq) ); 3982 Node *test_assigned = _gvn.transform( new (C) BoolNode( chk_assigned, BoolTest::eq) );
3984 3983
3985 generate_slow_guard(test_assigned, slow_region); 3984 generate_slow_guard(test_assigned, slow_region);
3986 3985
3987 Node* init_mem = reset_memory(); 3986 Node* init_mem = reset_memory();
3988 // fill in the rest of the null path: 3987 // fill in the rest of the null path:
4182 // restore the arguments 4181 // restore the arguments
4183 _sp += arg_size(); 4182 _sp += arg_size();
4184 4183
4185 switch (id) { 4184 switch (id) {
4186 case vmIntrinsics::_floatToRawIntBits: 4185 case vmIntrinsics::_floatToRawIntBits:
4187 push(_gvn.transform( new (C, 2) MoveF2INode(pop()))); 4186 push(_gvn.transform( new (C) MoveF2INode(pop())));
4188 break; 4187 break;
4189 4188
4190 case vmIntrinsics::_intBitsToFloat: 4189 case vmIntrinsics::_intBitsToFloat:
4191 push(_gvn.transform( new (C, 2) MoveI2FNode(pop()))); 4190 push(_gvn.transform( new (C) MoveI2FNode(pop())));
4192 break; 4191 break;
4193 4192
4194 case vmIntrinsics::_doubleToRawLongBits: 4193 case vmIntrinsics::_doubleToRawLongBits:
4195 push_pair(_gvn.transform( new (C, 2) MoveD2LNode(pop_pair()))); 4194 push_pair(_gvn.transform( new (C) MoveD2LNode(pop_pair())));
4196 break; 4195 break;
4197 4196
4198 case vmIntrinsics::_longBitsToDouble: 4197 case vmIntrinsics::_longBitsToDouble:
4199 push_pair(_gvn.transform( new (C, 2) MoveL2DNode(pop_pair()))); 4198 push_pair(_gvn.transform( new (C) MoveL2DNode(pop_pair())));
4200 break; 4199 break;
4201 4200
4202 case vmIntrinsics::_doubleToLongBits: { 4201 case vmIntrinsics::_doubleToLongBits: {
4203 Node* value = pop_pair(); 4202 Node* value = pop_pair();
4204 4203
4205 // two paths (plus control) merge in a wood 4204 // two paths (plus control) merge in a wood
4206 RegionNode *r = new (C, 3) RegionNode(3); 4205 RegionNode *r = new (C) RegionNode(3);
4207 Node *phi = new (C, 3) PhiNode(r, TypeLong::LONG); 4206 Node *phi = new (C) PhiNode(r, TypeLong::LONG);
4208 4207
4209 Node *cmpisnan = _gvn.transform( new (C, 3) CmpDNode(value, value)); 4208 Node *cmpisnan = _gvn.transform( new (C) CmpDNode(value, value));
4210 // Build the boolean node 4209 // Build the boolean node
4211 Node *bolisnan = _gvn.transform( new (C, 2) BoolNode( cmpisnan, BoolTest::ne ) ); 4210 Node *bolisnan = _gvn.transform( new (C) BoolNode( cmpisnan, BoolTest::ne ) );
4212 4211
4213 // Branch either way. 4212 // Branch either way.
4214 // NaN case is less traveled, which makes all the difference. 4213 // NaN case is less traveled, which makes all the difference.
4215 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); 4214 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4216 Node *opt_isnan = _gvn.transform(ifisnan); 4215 Node *opt_isnan = _gvn.transform(ifisnan);
4217 assert( opt_isnan->is_If(), "Expect an IfNode"); 4216 assert( opt_isnan->is_If(), "Expect an IfNode");
4218 IfNode *opt_ifisnan = (IfNode*)opt_isnan; 4217 IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4219 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(opt_ifisnan) ); 4218 Node *iftrue = _gvn.transform( new (C) IfTrueNode(opt_ifisnan) );
4220 4219
4221 set_control(iftrue); 4220 set_control(iftrue);
4222 4221
4223 static const jlong nan_bits = CONST64(0x7ff8000000000000); 4222 static const jlong nan_bits = CONST64(0x7ff8000000000000);
4224 Node *slow_result = longcon(nan_bits); // return NaN 4223 Node *slow_result = longcon(nan_bits); // return NaN
4225 phi->init_req(1, _gvn.transform( slow_result )); 4224 phi->init_req(1, _gvn.transform( slow_result ));
4226 r->init_req(1, iftrue); 4225 r->init_req(1, iftrue);
4227 4226
4228 // Else fall through 4227 // Else fall through
4229 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(opt_ifisnan) ); 4228 Node *iffalse = _gvn.transform( new (C) IfFalseNode(opt_ifisnan) );
4230 set_control(iffalse); 4229 set_control(iffalse);
4231 4230
4232 phi->init_req(2, _gvn.transform( new (C, 2) MoveD2LNode(value))); 4231 phi->init_req(2, _gvn.transform( new (C) MoveD2LNode(value)));
4233 r->init_req(2, iffalse); 4232 r->init_req(2, iffalse);
4234 4233
4235 // Post merge 4234 // Post merge
4236 set_control(_gvn.transform(r)); 4235 set_control(_gvn.transform(r));
4237 record_for_igvn(r); 4236 record_for_igvn(r);
4247 4246
4248 case vmIntrinsics::_floatToIntBits: { 4247 case vmIntrinsics::_floatToIntBits: {
4249 Node* value = pop(); 4248 Node* value = pop();
4250 4249
4251 // two paths (plus control) merge in a wood 4250 // two paths (plus control) merge in a wood
4252 RegionNode *r = new (C, 3) RegionNode(3); 4251 RegionNode *r = new (C) RegionNode(3);
4253 Node *phi = new (C, 3) PhiNode(r, TypeInt::INT); 4252 Node *phi = new (C) PhiNode(r, TypeInt::INT);
4254 4253
4255 Node *cmpisnan = _gvn.transform( new (C, 3) CmpFNode(value, value)); 4254 Node *cmpisnan = _gvn.transform( new (C) CmpFNode(value, value));
4256 // Build the boolean node 4255 // Build the boolean node
4257 Node *bolisnan = _gvn.transform( new (C, 2) BoolNode( cmpisnan, BoolTest::ne ) ); 4256 Node *bolisnan = _gvn.transform( new (C) BoolNode( cmpisnan, BoolTest::ne ) );
4258 4257
4259 // Branch either way. 4258 // Branch either way.
4260 // NaN case is less traveled, which makes all the difference. 4259 // NaN case is less traveled, which makes all the difference.
4261 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); 4260 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4262 Node *opt_isnan = _gvn.transform(ifisnan); 4261 Node *opt_isnan = _gvn.transform(ifisnan);
4263 assert( opt_isnan->is_If(), "Expect an IfNode"); 4262 assert( opt_isnan->is_If(), "Expect an IfNode");
4264 IfNode *opt_ifisnan = (IfNode*)opt_isnan; 4263 IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4265 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(opt_ifisnan) ); 4264 Node *iftrue = _gvn.transform( new (C) IfTrueNode(opt_ifisnan) );
4266 4265
4267 set_control(iftrue); 4266 set_control(iftrue);
4268 4267
4269 static const jint nan_bits = 0x7fc00000; 4268 static const jint nan_bits = 0x7fc00000;
4270 Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN 4269 Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN
4271 phi->init_req(1, _gvn.transform( slow_result )); 4270 phi->init_req(1, _gvn.transform( slow_result ));
4272 r->init_req(1, iftrue); 4271 r->init_req(1, iftrue);
4273 4272
4274 // Else fall through 4273 // Else fall through
4275 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(opt_ifisnan) ); 4274 Node *iffalse = _gvn.transform( new (C) IfFalseNode(opt_ifisnan) );
4276 set_control(iffalse); 4275 set_control(iffalse);
4277 4276
4278 phi->init_req(2, _gvn.transform( new (C, 2) MoveF2INode(value))); 4277 phi->init_req(2, _gvn.transform( new (C) MoveF2INode(value)));
4279 r->init_req(2, iffalse); 4278 r->init_req(2, iffalse);
4280 4279
4281 // Post merge 4280 // Post merge
4282 set_control(_gvn.transform(r)); 4281 set_control(_gvn.transform(r));
4283 record_for_igvn(r); 4282 record_for_igvn(r);
4395 src = basic_plus_adr(src, base_off); 4394 src = basic_plus_adr(src, base_off);
4396 dest = basic_plus_adr(dest, base_off); 4395 dest = basic_plus_adr(dest, base_off);
4397 4396
4398 // Compute the length also, if needed: 4397 // Compute the length also, if needed:
4399 Node* countx = size; 4398 Node* countx = size;
4400 countx = _gvn.transform( new (C, 3) SubXNode(countx, MakeConX(base_off)) ); 4399 countx = _gvn.transform( new (C) SubXNode(countx, MakeConX(base_off)) );
4401 countx = _gvn.transform( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong) )); 4400 countx = _gvn.transform( new (C) URShiftXNode(countx, intcon(LogBytesPerLong) ));
4402 4401
4403 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; 4402 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4404 bool disjoint_bases = true; 4403 bool disjoint_bases = true;
4405 generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases, 4404 generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
4406 src, NULL, dest, NULL, countx, 4405 src, NULL, dest, NULL, countx,
4487 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy 4486 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
4488 _array_path, // plain array allocation, plus arrayof_long_arraycopy 4487 _array_path, // plain array allocation, plus arrayof_long_arraycopy
4489 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy 4488 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
4490 PATH_LIMIT 4489 PATH_LIMIT
4491 }; 4490 };
4492 RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT); 4491 RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT);
4493 result_val = new(C, PATH_LIMIT) PhiNode(result_reg, 4492 result_val = new(C) PhiNode(result_reg,
4494 TypeInstPtr::NOTNULL); 4493 TypeInstPtr::NOTNULL);
4495 PhiNode* result_i_o = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO); 4494 PhiNode* result_i_o = new(C) PhiNode(result_reg, Type::ABIO);
4496 PhiNode* result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY, 4495 PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY,
4497 TypePtr::BOTTOM); 4496 TypePtr::BOTTOM);
4498 record_for_igvn(result_reg); 4497 record_for_igvn(result_reg);
4499 4498
4500 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; 4499 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4501 int raw_adr_idx = Compile::AliasIdxRaw; 4500 int raw_adr_idx = Compile::AliasIdxRaw;
4502 4501
4548 } 4547 }
4549 } 4548 }
4550 4549
4551 // We only go to the instance fast case code if we pass a number of guards. 4550 // We only go to the instance fast case code if we pass a number of guards.
4552 // The paths which do not pass are accumulated in the slow_region. 4551 // The paths which do not pass are accumulated in the slow_region.
4553 RegionNode* slow_region = new (C, 1) RegionNode(1); 4552 RegionNode* slow_region = new (C) RegionNode(1);
4554 record_for_igvn(slow_region); 4553 record_for_igvn(slow_region);
4555 if (!stopped()) { 4554 if (!stopped()) {
4556 // It's an instance (we did array above). Make the slow-path tests. 4555 // It's an instance (we did array above). Make the slow-path tests.
4557 // If this is a virtual call, we generate a funny guard. We grab 4556 // If this is a virtual call, we generate a funny guard. We grab
4558 // the vtable entry corresponding to clone() from the target object. 4557 // the vtable entry corresponding to clone() from the target object.
4718 // (6) length must not be negative. 4717 // (6) length must not be negative.
4719 // (7) src_offset + length must not exceed length of src. 4718 // (7) src_offset + length must not exceed length of src.
4720 // (8) dest_offset + length must not exceed length of dest. 4719 // (8) dest_offset + length must not exceed length of dest.
4721 // (9) each element of an oop array must be assignable 4720 // (9) each element of an oop array must be assignable
4722 4721
4723 RegionNode* slow_region = new (C, 1) RegionNode(1); 4722 RegionNode* slow_region = new (C) RegionNode(1);
4724 record_for_igvn(slow_region); 4723 record_for_igvn(slow_region);
4725 4724
4726 // (3) operands must not be null 4725 // (3) operands must not be null
4727 // We currently perform our null checks with the do_null_check routine. 4726 // We currently perform our null checks with the do_null_check routine.
4728 // This means that the null exceptions will be reported in the caller 4727 // This means that the null exceptions will be reported in the caller
4808 bool disjoint_bases, 4807 bool disjoint_bases,
4809 bool length_never_negative, 4808 bool length_never_negative,
4810 RegionNode* slow_region) { 4809 RegionNode* slow_region) {
4811 4810
4812 if (slow_region == NULL) { 4811 if (slow_region == NULL) {
4813 slow_region = new(C,1) RegionNode(1); 4812 slow_region = new(C) RegionNode(1);
4814 record_for_igvn(slow_region); 4813 record_for_igvn(slow_region);
4815 } 4814 }
4816 4815
4817 Node* original_dest = dest; 4816 Node* original_dest = dest;
4818 AllocateArrayNode* alloc = NULL; // used for zeroing, if needed 4817 AllocateArrayNode* alloc = NULL; // used for zeroing, if needed
4856 slow_call_path = 3, // something went wrong; call the VM 4855 slow_call_path = 3, // something went wrong; call the VM
4857 zero_path = 4, // bypass when length of copy is zero 4856 zero_path = 4, // bypass when length of copy is zero
4858 bcopy_path = 5, // copy primitive array by 64-bit blocks 4857 bcopy_path = 5, // copy primitive array by 64-bit blocks
4859 PATH_LIMIT = 6 4858 PATH_LIMIT = 6
4860 }; 4859 };
4861 RegionNode* result_region = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT); 4860 RegionNode* result_region = new(C) RegionNode(PATH_LIMIT);
4862 PhiNode* result_i_o = new(C, PATH_LIMIT) PhiNode(result_region, Type::ABIO); 4861 PhiNode* result_i_o = new(C) PhiNode(result_region, Type::ABIO);
4863 PhiNode* result_memory = new(C, PATH_LIMIT) PhiNode(result_region, Type::MEMORY, adr_type); 4862 PhiNode* result_memory = new(C) PhiNode(result_region, Type::MEMORY, adr_type);
4864 record_for_igvn(result_region); 4863 record_for_igvn(result_region);
4865 _gvn.set_type_bottom(result_i_o); 4864 _gvn.set_type_bottom(result_i_o);
4866 _gvn.set_type_bottom(result_memory); 4865 _gvn.set_type_bottom(result_memory);
4867 assert(adr_type != TypePtr::BOTTOM, "must be RawMem or a T[] slice"); 4866 assert(adr_type != TypePtr::BOTTOM, "must be RawMem or a T[] slice");
4868 4867
4932 // We have to initialize the *uncopied* part of the array to zero. 4931 // We have to initialize the *uncopied* part of the array to zero.
4933 // The copy destination is the slice dest[off..off+len]. The other slices 4932 // The copy destination is the slice dest[off..off+len]. The other slices
4934 // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length]. 4933 // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length].
4935 Node* dest_size = alloc->in(AllocateNode::AllocSize); 4934 Node* dest_size = alloc->in(AllocateNode::AllocSize);
4936 Node* dest_length = alloc->in(AllocateNode::ALength); 4935 Node* dest_length = alloc->in(AllocateNode::ALength);
4937 Node* dest_tail = _gvn.transform( new(C,3) AddINode(dest_offset, 4936 Node* dest_tail = _gvn.transform( new(C) AddINode(dest_offset,
4938 copy_length) ); 4937 copy_length) );
4939 4938
4940 // If there is a head section that needs zeroing, do it now. 4939 // If there is a head section that needs zeroing, do it now.
4941 if (find_int_con(dest_offset, -1) != 0) { 4940 if (find_int_con(dest_offset, -1) != 0) {
4942 generate_clear_array(adr_type, dest, basic_elem_type, 4941 generate_clear_array(adr_type, dest, basic_elem_type,
4949 // There are two wins: Avoid generating the ClearArray 4948 // There are two wins: Avoid generating the ClearArray
4950 // with its attendant messy index arithmetic, and upgrade 4949 // with its attendant messy index arithmetic, and upgrade
4951 // the copy to a more hardware-friendly word size of 64 bits. 4950 // the copy to a more hardware-friendly word size of 64 bits.
4952 Node* tail_ctl = NULL; 4951 Node* tail_ctl = NULL;
4953 if (!stopped() && !dest_tail->eqv_uncast(dest_length)) { 4952 if (!stopped() && !dest_tail->eqv_uncast(dest_length)) {
4954 Node* cmp_lt = _gvn.transform( new(C,3) CmpINode(dest_tail, dest_length) ); 4953 Node* cmp_lt = _gvn.transform( new(C) CmpINode(dest_tail, dest_length) );
4955 Node* bol_lt = _gvn.transform( new(C,2) BoolNode(cmp_lt, BoolTest::lt) ); 4954 Node* bol_lt = _gvn.transform( new(C) BoolNode(cmp_lt, BoolTest::lt) );
4956 tail_ctl = generate_slow_guard(bol_lt, NULL); 4955 tail_ctl = generate_slow_guard(bol_lt, NULL);
4957 assert(tail_ctl != NULL || !stopped(), "must be an outcome"); 4956 assert(tail_ctl != NULL || !stopped(), "must be an outcome");
4958 } 4957 }
4959 4958
4960 // At this point, let's assume there is no tail. 4959 // At this point, let's assume there is no tail.
4984 generate_clear_array(adr_type, dest, basic_elem_type, 4983 generate_clear_array(adr_type, dest, basic_elem_type,
4985 dest_tail, NULL, 4984 dest_tail, NULL,
4986 dest_size); 4985 dest_size);
4987 } else { 4986 } else {
4988 // Make a local merge. 4987 // Make a local merge.
4989 Node* done_ctl = new(C,3) RegionNode(3); 4988 Node* done_ctl = new(C) RegionNode(3);
4990 Node* done_mem = new(C,3) PhiNode(done_ctl, Type::MEMORY, adr_type); 4989 Node* done_mem = new(C) PhiNode(done_ctl, Type::MEMORY, adr_type);
4991 done_ctl->init_req(1, notail_ctl); 4990 done_ctl->init_req(1, notail_ctl);
4992 done_mem->init_req(1, memory(adr_type)); 4991 done_mem->init_req(1, memory(adr_type));
4993 generate_clear_array(adr_type, dest, basic_elem_type, 4992 generate_clear_array(adr_type, dest, basic_elem_type,
4994 dest_tail, NULL, 4993 dest_tail, NULL,
4995 dest_size); 4994 dest_size);
5080 set_control(checked_control); 5079 set_control(checked_control);
5081 if (!stopped()) { 5080 if (!stopped()) {
5082 // Clean up after the checked call. 5081 // Clean up after the checked call.
5083 // The returned value is either 0 or -1^K, 5082 // The returned value is either 0 or -1^K,
5084 // where K = number of partially transferred array elements. 5083 // where K = number of partially transferred array elements.
5085 Node* cmp = _gvn.transform( new(C, 3) CmpINode(checked_value, intcon(0)) ); 5084 Node* cmp = _gvn.transform( new(C) CmpINode(checked_value, intcon(0)) );
5086 Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, BoolTest::eq) ); 5085 Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::eq) );
5087 IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN); 5086 IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
5088 5087
5089 // If it is 0, we are done, so transfer to the end. 5088 // If it is 0, we are done, so transfer to the end.
5090 Node* checks_done = _gvn.transform( new(C, 1) IfTrueNode(iff) ); 5089 Node* checks_done = _gvn.transform( new(C) IfTrueNode(iff) );
5091 result_region->init_req(checked_path, checks_done); 5090 result_region->init_req(checked_path, checks_done);
5092 result_i_o ->init_req(checked_path, checked_i_o); 5091 result_i_o ->init_req(checked_path, checked_i_o);
5093 result_memory->init_req(checked_path, checked_mem); 5092 result_memory->init_req(checked_path, checked_mem);
5094 5093
5095 // If it is not zero, merge into the slow call. 5094 // If it is not zero, merge into the slow call.
5096 set_control( _gvn.transform( new(C, 1) IfFalseNode(iff) )); 5095 set_control( _gvn.transform( new(C) IfFalseNode(iff) ));
5097 RegionNode* slow_reg2 = new(C, 3) RegionNode(3); 5096 RegionNode* slow_reg2 = new(C) RegionNode(3);
5098 PhiNode* slow_i_o2 = new(C, 3) PhiNode(slow_reg2, Type::ABIO); 5097 PhiNode* slow_i_o2 = new(C) PhiNode(slow_reg2, Type::ABIO);
5099 PhiNode* slow_mem2 = new(C, 3) PhiNode(slow_reg2, Type::MEMORY, adr_type); 5098 PhiNode* slow_mem2 = new(C) PhiNode(slow_reg2, Type::MEMORY, adr_type);
5100 record_for_igvn(slow_reg2); 5099 record_for_igvn(slow_reg2);
5101 slow_reg2 ->init_req(1, slow_control); 5100 slow_reg2 ->init_req(1, slow_control);
5102 slow_i_o2 ->init_req(1, slow_i_o); 5101 slow_i_o2 ->init_req(1, slow_i_o);
5103 slow_mem2 ->init_req(1, slow_mem); 5102 slow_mem2 ->init_req(1, slow_mem);
5104 slow_reg2 ->init_req(2, control()); 5103 slow_reg2 ->init_req(2, control());
5114 // This can cause double writes, but that's OK since dest is brand new. 5113 // This can cause double writes, but that's OK since dest is brand new.
5115 // So we ignore the low 31 bits of the value returned from the stub. 5114 // So we ignore the low 31 bits of the value returned from the stub.
5116 } else { 5115 } else {
5117 // We must continue the copy exactly where it failed, or else 5116 // We must continue the copy exactly where it failed, or else
5118 // another thread might see the wrong number of writes to dest. 5117 // another thread might see the wrong number of writes to dest.
5119 Node* checked_offset = _gvn.transform( new(C, 3) XorINode(checked_value, intcon(-1)) ); 5118 Node* checked_offset = _gvn.transform( new(C) XorINode(checked_value, intcon(-1)) );
5120 Node* slow_offset = new(C, 3) PhiNode(slow_reg2, TypeInt::INT); 5119 Node* slow_offset = new(C) PhiNode(slow_reg2, TypeInt::INT);
5121 slow_offset->init_req(1, intcon(0)); 5120 slow_offset->init_req(1, intcon(0));
5122 slow_offset->init_req(2, checked_offset); 5121 slow_offset->init_req(2, checked_offset);
5123 slow_offset = _gvn.transform(slow_offset); 5122 slow_offset = _gvn.transform(slow_offset);
5124 5123
5125 // Adjust the arguments by the conditionally incoming offset. 5124 // Adjust the arguments by the conditionally incoming offset.
5126 Node* src_off_plus = _gvn.transform( new(C, 3) AddINode(src_offset, slow_offset) ); 5125 Node* src_off_plus = _gvn.transform( new(C) AddINode(src_offset, slow_offset) );
5127 Node* dest_off_plus = _gvn.transform( new(C, 3) AddINode(dest_offset, slow_offset) ); 5126 Node* dest_off_plus = _gvn.transform( new(C) AddINode(dest_offset, slow_offset) );
5128 Node* length_minus = _gvn.transform( new(C, 3) SubINode(copy_length, slow_offset) ); 5127 Node* length_minus = _gvn.transform( new(C) SubINode(copy_length, slow_offset) );
5129 5128
5130 // Tweak the node variables to adjust the code produced below: 5129 // Tweak the node variables to adjust the code produced below:
5131 src_offset = src_off_plus; 5130 src_offset = src_off_plus;
5132 dest_offset = dest_off_plus; 5131 dest_offset = dest_off_plus;
5133 copy_length = length_minus; 5132 copy_length = length_minus;
5344 // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8) 5343 // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8)
5345 intptr_t end_base = abase + (slice_idx_con << scale); 5344 intptr_t end_base = abase + (slice_idx_con << scale);
5346 int end_round = (-1 << scale) & (BytesPerLong - 1); 5345 int end_round = (-1 << scale) & (BytesPerLong - 1);
5347 Node* end = ConvI2X(slice_len); 5346 Node* end = ConvI2X(slice_len);
5348 if (scale != 0) 5347 if (scale != 0)
5349 end = _gvn.transform( new(C,3) LShiftXNode(end, intcon(scale) )); 5348 end = _gvn.transform( new(C) LShiftXNode(end, intcon(scale) ));
5350 end_base += end_round; 5349 end_base += end_round;
5351 end = _gvn.transform( new(C,3) AddXNode(end, MakeConX(end_base)) ); 5350 end = _gvn.transform( new(C) AddXNode(end, MakeConX(end_base)) );
5352 end = _gvn.transform( new(C,3) AndXNode(end, MakeConX(~end_round)) ); 5351 end = _gvn.transform( new(C) AndXNode(end, MakeConX(~end_round)) );
5353 mem = ClearArrayNode::clear_memory(control(), mem, dest, 5352 mem = ClearArrayNode::clear_memory(control(), mem, dest,
5354 start_con, end, &_gvn); 5353 start_con, end, &_gvn);
5355 } else if (start_con < 0 && dest_size != top()) { 5354 } else if (start_con < 0 && dest_size != top()) {
5356 // Non-constant start, pre-rounded end after the tail of the array. 5355 // Non-constant start, pre-rounded end after the tail of the array.
5357 // This is almost certainly a "round-to-end" operation. 5356 // This is almost certainly a "round-to-end" operation.
5358 Node* start = slice_idx; 5357 Node* start = slice_idx;
5359 start = ConvI2X(start); 5358 start = ConvI2X(start);
5360 if (scale != 0) 5359 if (scale != 0)
5361 start = _gvn.transform( new(C,3) LShiftXNode( start, intcon(scale) )); 5360 start = _gvn.transform( new(C) LShiftXNode( start, intcon(scale) ));
5362 start = _gvn.transform( new(C,3) AddXNode(start, MakeConX(abase)) ); 5361 start = _gvn.transform( new(C) AddXNode(start, MakeConX(abase)) );
5363 if ((bump_bit | clear_low) != 0) { 5362 if ((bump_bit | clear_low) != 0) {
5364 int to_clear = (bump_bit | clear_low); 5363 int to_clear = (bump_bit | clear_low);
5365 // Align up mod 8, then store a jint zero unconditionally 5364 // Align up mod 8, then store a jint zero unconditionally
5366 // just before the mod-8 boundary. 5365 // just before the mod-8 boundary.
5367 if (((abase + bump_bit) & ~to_clear) - bump_bit 5366 if (((abase + bump_bit) & ~to_clear) - bump_bit
5368 < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) { 5367 < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) {
5369 bump_bit = 0; 5368 bump_bit = 0;
5370 assert((abase & to_clear) == 0, "array base must be long-aligned"); 5369 assert((abase & to_clear) == 0, "array base must be long-aligned");
5371 } else { 5370 } else {
5372 // Bump 'start' up to (or past) the next jint boundary: 5371 // Bump 'start' up to (or past) the next jint boundary:
5373 start = _gvn.transform( new(C,3) AddXNode(start, MakeConX(bump_bit)) ); 5372 start = _gvn.transform( new(C) AddXNode(start, MakeConX(bump_bit)) );
5374 assert((abase & clear_low) == 0, "array base must be int-aligned"); 5373 assert((abase & clear_low) == 0, "array base must be int-aligned");
5375 } 5374 }
5376 // Round bumped 'start' down to jlong boundary in body of array. 5375 // Round bumped 'start' down to jlong boundary in body of array.
5377 start = _gvn.transform( new(C,3) AndXNode(start, MakeConX(~to_clear)) ); 5376 start = _gvn.transform( new(C) AndXNode(start, MakeConX(~to_clear)) );
5378 if (bump_bit != 0) { 5377 if (bump_bit != 0) {
5379 // Store a zero to the immediately preceding jint: 5378 // Store a zero to the immediately preceding jint:
5380 Node* x1 = _gvn.transform( new(C,3) AddXNode(start, MakeConX(-bump_bit)) ); 5379 Node* x1 = _gvn.transform( new(C) AddXNode(start, MakeConX(-bump_bit)) );
5381 Node* p1 = basic_plus_adr(dest, x1); 5380 Node* p1 = basic_plus_adr(dest, x1);
5382 mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT); 5381 mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT);
5383 mem = _gvn.transform(mem); 5382 mem = _gvn.transform(mem);
5384 } 5383 }
5385 } 5384 }
5442 5441
5443 // Do this copy by giant steps. 5442 // Do this copy by giant steps.
5444 Node* sptr = basic_plus_adr(src, src_off); 5443 Node* sptr = basic_plus_adr(src, src_off);
5445 Node* dptr = basic_plus_adr(dest, dest_off); 5444 Node* dptr = basic_plus_adr(dest, dest_off);
5446 Node* countx = dest_size; 5445 Node* countx = dest_size;
5447 countx = _gvn.transform( new (C, 3) SubXNode(countx, MakeConX(dest_off)) ); 5446 countx = _gvn.transform( new (C) SubXNode(countx, MakeConX(dest_off)) );
5448 countx = _gvn.transform( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong)) ); 5447 countx = _gvn.transform( new (C) URShiftXNode(countx, intcon(LogBytesPerLong)) );
5449 5448
5450 bool disjoint_bases = true; // since alloc != NULL 5449 bool disjoint_bases = true; // since alloc != NULL
5451 generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases, 5450 generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases,
5452 sptr, NULL, dptr, NULL, countx, dest_uninitialized); 5451 sptr, NULL, dptr, NULL, countx, dest_uninitialized);
5453 5452
5493 // for the target array. This is an optimistic check. It will 5492 // for the target array. This is an optimistic check. It will
5494 // look in each non-null element's class, at the desired klass's 5493 // look in each non-null element's class, at the desired klass's
5495 // super_check_offset, for the desired klass. 5494 // super_check_offset, for the desired klass.
5496 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 5495 int sco_offset = in_bytes(Klass::super_check_offset_offset());
5497 Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset); 5496 Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
5498 Node* n3 = new(C, 3) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr()); 5497 Node* n3 = new(C) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr());
5499 Node* check_offset = ConvI2X(_gvn.transform(n3)); 5498 Node* check_offset = ConvI2X(_gvn.transform(n3));
5500 Node* check_value = dest_elem_klass; 5499 Node* check_value = dest_elem_klass;
5501 5500
5502 Node* src_start = array_element_address(src, src_offset, T_OBJECT); 5501 Node* src_start = array_element_address(src, src_offset, T_OBJECT);
5503 Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT); 5502 Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT);
5511 src_start, dest_start, 5510 src_start, dest_start,
5512 copy_length XTOP, 5511 copy_length XTOP,
5513 check_offset XTOP, 5512 check_offset XTOP,
5514 check_value); 5513 check_value);
5515 5514
5516 return _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Parms)); 5515 return _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
5517 } 5516 }
5518 5517
5519 5518
5520 // Helper function; generates code for cases requiring runtime checks. 5519 // Helper function; generates code for cases requiring runtime checks.
5521 Node* 5520 Node*
5533 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, 5532 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5534 OptoRuntime::generic_arraycopy_Type(), 5533 OptoRuntime::generic_arraycopy_Type(),
5535 copyfunc_addr, "generic_arraycopy", adr_type, 5534 copyfunc_addr, "generic_arraycopy", adr_type,
5536 src, src_offset, dest, dest_offset, copy_length); 5535 src, src_offset, dest, dest_offset, copy_length);
5537 5536
5538 return _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Parms)); 5537 return _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
5539 } 5538 }
5540 5539
5541 // Helper function; generates the fast out-of-line call to an arraycopy stub. 5540 // Helper function; generates the fast out-of-line call to an arraycopy stub.
5542 void 5541 void
5543 LibraryCallKit::generate_unchecked_arraycopy(const TypePtr* adr_type, 5542 LibraryCallKit::generate_unchecked_arraycopy(const TypePtr* adr_type,