comparison src/share/vm/opto/library_call.cpp @ 14909:4ca6dc0799b6

Backout jdk9 merge
author Gilles Duboscq <duboscq@ssw.jku.at>
date Tue, 01 Apr 2014 13:57:07 +0200
parents cd5d10655495
children 52b4284cb496
comparison
equal deleted inserted replaced
14908:8db6e76cb658 14909:4ca6dc0799b6
201 Node* round_double_node(Node* n); 201 Node* round_double_node(Node* n);
202 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName); 202 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
203 bool inline_math_native(vmIntrinsics::ID id); 203 bool inline_math_native(vmIntrinsics::ID id);
204 bool inline_trig(vmIntrinsics::ID id); 204 bool inline_trig(vmIntrinsics::ID id);
205 bool inline_math(vmIntrinsics::ID id); 205 bool inline_math(vmIntrinsics::ID id);
206 template <typename OverflowOp> 206 void inline_math_mathExact(Node* math);
207 bool inline_math_overflow(Node* arg1, Node* arg2);
208 void inline_math_mathExact(Node* math, Node* test);
209 bool inline_math_addExactI(bool is_increment); 207 bool inline_math_addExactI(bool is_increment);
210 bool inline_math_addExactL(bool is_increment); 208 bool inline_math_addExactL(bool is_increment);
211 bool inline_math_multiplyExactI(); 209 bool inline_math_multiplyExactI();
212 bool inline_math_multiplyExactL(); 210 bool inline_math_multiplyExactL();
213 bool inline_math_negateExactI(); 211 bool inline_math_negateExactI();
304 bool inline_reference_get(); 302 bool inline_reference_get();
305 bool inline_aescrypt_Block(vmIntrinsics::ID id); 303 bool inline_aescrypt_Block(vmIntrinsics::ID id);
306 bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id); 304 bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
307 Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting); 305 Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
308 Node* get_key_start_from_aescrypt_object(Node* aescrypt_object); 306 Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
309 Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
310 bool inline_encodeISOArray(); 307 bool inline_encodeISOArray();
311 bool inline_updateCRC32(); 308 bool inline_updateCRC32();
312 bool inline_updateBytesCRC32(); 309 bool inline_updateBytesCRC32();
313 bool inline_updateByteBufferCRC32(); 310 bool inline_updateByteBufferCRC32();
314 }; 311 };
517 if (!UseCRC32Intrinsics) return NULL; 514 if (!UseCRC32Intrinsics) return NULL;
518 break; 515 break;
519 516
520 case vmIntrinsics::_incrementExactI: 517 case vmIntrinsics::_incrementExactI:
521 case vmIntrinsics::_addExactI: 518 case vmIntrinsics::_addExactI:
522 if (!Matcher::match_rule_supported(Op_OverflowAddI) || !UseMathExactIntrinsics) return NULL; 519 if (!Matcher::match_rule_supported(Op_AddExactI) || !UseMathExactIntrinsics) return NULL;
523 break; 520 break;
524 case vmIntrinsics::_incrementExactL: 521 case vmIntrinsics::_incrementExactL:
525 case vmIntrinsics::_addExactL: 522 case vmIntrinsics::_addExactL:
526 if (!Matcher::match_rule_supported(Op_OverflowAddL) || !UseMathExactIntrinsics) return NULL; 523 if (!Matcher::match_rule_supported(Op_AddExactL) || !UseMathExactIntrinsics) return NULL;
527 break; 524 break;
528 case vmIntrinsics::_decrementExactI: 525 case vmIntrinsics::_decrementExactI:
529 case vmIntrinsics::_subtractExactI: 526 case vmIntrinsics::_subtractExactI:
530 if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL; 527 if (!Matcher::match_rule_supported(Op_SubExactI) || !UseMathExactIntrinsics) return NULL;
531 break; 528 break;
532 case vmIntrinsics::_decrementExactL: 529 case vmIntrinsics::_decrementExactL:
533 case vmIntrinsics::_subtractExactL: 530 case vmIntrinsics::_subtractExactL:
534 if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL; 531 if (!Matcher::match_rule_supported(Op_SubExactL) || !UseMathExactIntrinsics) return NULL;
535 break; 532 break;
536 case vmIntrinsics::_negateExactI: 533 case vmIntrinsics::_negateExactI:
537 if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL; 534 if (!Matcher::match_rule_supported(Op_NegExactI) || !UseMathExactIntrinsics) return NULL;
538 break; 535 break;
539 case vmIntrinsics::_negateExactL: 536 case vmIntrinsics::_negateExactL:
540 if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL; 537 if (!Matcher::match_rule_supported(Op_NegExactL) || !UseMathExactIntrinsics) return NULL;
541 break; 538 break;
542 case vmIntrinsics::_multiplyExactI: 539 case vmIntrinsics::_multiplyExactI:
543 if (!Matcher::match_rule_supported(Op_OverflowMulI) || !UseMathExactIntrinsics) return NULL; 540 if (!Matcher::match_rule_supported(Op_MulExactI) || !UseMathExactIntrinsics) return NULL;
544 break; 541 break;
545 case vmIntrinsics::_multiplyExactL: 542 case vmIntrinsics::_multiplyExactL:
546 if (!Matcher::match_rule_supported(Op_OverflowMulL) || !UseMathExactIntrinsics) return NULL; 543 if (!Matcher::match_rule_supported(Op_MulExactL) || !UseMathExactIntrinsics) return NULL;
547 break; 544 break;
548 545
549 default: 546 default:
550 assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility"); 547 assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
551 assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?"); 548 assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
1058 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) { 1055 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
1059 ciKlass* thread_klass = env()->Thread_klass(); 1056 ciKlass* thread_klass = env()->Thread_klass();
1060 const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull); 1057 const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
1061 Node* thread = _gvn.transform(new (C) ThreadLocalNode()); 1058 Node* thread = _gvn.transform(new (C) ThreadLocalNode());
1062 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset())); 1059 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
1063 Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT, MemNode::unordered); 1060 Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT);
1064 tls_output = thread; 1061 tls_output = thread;
1065 return threadObj; 1062 return threadObj;
1066 } 1063 }
1067 1064
1068 1065
1937 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog), "LOG"); 1934 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog), "LOG");
1938 case vmIntrinsics::_dlog10: return Matcher::has_match_rule(Op_Log10D) ? inline_math(id) : 1935 case vmIntrinsics::_dlog10: return Matcher::has_match_rule(Op_Log10D) ? inline_math(id) :
1939 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10"); 1936 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10");
1940 1937
1941 // These intrinsics are supported on all hardware 1938 // These intrinsics are supported on all hardware
1942 case vmIntrinsics::_dsqrt: return Matcher::match_rule_supported(Op_SqrtD) ? inline_math(id) : false; 1939 case vmIntrinsics::_dsqrt: return Matcher::has_match_rule(Op_SqrtD) ? inline_math(id) : false;
1943 case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_math(id) : false; 1940 case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_math(id) : false;
1944 1941
1945 case vmIntrinsics::_dexp: return Matcher::has_match_rule(Op_ExpD) ? inline_exp() : 1942 case vmIntrinsics::_dexp: return Matcher::has_match_rule(Op_ExpD) ? inline_exp() :
1946 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dexp), "EXP"); 1943 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dexp), "EXP");
1947 case vmIntrinsics::_dpow: return Matcher::has_match_rule(Op_PowD) ? inline_pow() : 1944 case vmIntrinsics::_dpow: return Matcher::has_match_rule(Op_PowD) ? inline_pow() :
1970 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) { 1967 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
1971 set_result(generate_min_max(id, argument(0), argument(1))); 1968 set_result(generate_min_max(id, argument(0), argument(1)));
1972 return true; 1969 return true;
1973 } 1970 }
1974 1971
1975 void LibraryCallKit::inline_math_mathExact(Node* math, Node *test) { 1972 void LibraryCallKit::inline_math_mathExact(Node* math) {
1976 Node* bol = _gvn.transform( new (C) BoolNode(test, BoolTest::overflow) ); 1973 // If we didn't get the expected opcode it means we have optimized
1974 // the node to something else and don't need the exception edge.
1975 if (!math->is_MathExact()) {
1976 set_result(math);
1977 return;
1978 }
1979
1980 Node* result = _gvn.transform( new(C) ProjNode(math, MathExactNode::result_proj_node));
1981 Node* flags = _gvn.transform( new(C) FlagsProjNode(math, MathExactNode::flags_proj_node));
1982
1983 Node* bol = _gvn.transform( new (C) BoolNode(flags, BoolTest::overflow) );
1977 IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN); 1984 IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
1978 Node* fast_path = _gvn.transform( new (C) IfFalseNode(check)); 1985 Node* fast_path = _gvn.transform( new (C) IfFalseNode(check));
1979 Node* slow_path = _gvn.transform( new (C) IfTrueNode(check) ); 1986 Node* slow_path = _gvn.transform( new (C) IfTrueNode(check) );
1980 1987
1981 { 1988 {
1989 uncommon_trap(Deoptimization::Reason_intrinsic, 1996 uncommon_trap(Deoptimization::Reason_intrinsic,
1990 Deoptimization::Action_none); 1997 Deoptimization::Action_none);
1991 } 1998 }
1992 1999
1993 set_control(fast_path); 2000 set_control(fast_path);
1994 set_result(math); 2001 set_result(result);
1995 } 2002 }
1996 2003
1997 template <typename OverflowOp> 2004 bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
1998 bool LibraryCallKit::inline_math_overflow(Node* arg1, Node* arg2) { 2005 Node* arg1 = argument(0);
1999 typedef typename OverflowOp::MathOp MathOp; 2006 Node* arg2 = NULL;
2000 2007
2001 MathOp* mathOp = new(C) MathOp(arg1, arg2); 2008 if (is_increment) {
2002 Node* operation = _gvn.transform( mathOp ); 2009 arg2 = intcon(1);
2003 Node* ofcheck = _gvn.transform( new(C) OverflowOp(arg1, arg2) ); 2010 } else {
2004 inline_math_mathExact(operation, ofcheck); 2011 arg2 = argument(1);
2012 }
2013
2014 Node* add = _gvn.transform( new(C) AddExactINode(NULL, arg1, arg2) );
2015 inline_math_mathExact(add);
2005 return true; 2016 return true;
2006 } 2017 }
2007 2018
2008 bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
2009 return inline_math_overflow<OverflowAddINode>(argument(0), is_increment ? intcon(1) : argument(1));
2010 }
2011
2012 bool LibraryCallKit::inline_math_addExactL(bool is_increment) { 2019 bool LibraryCallKit::inline_math_addExactL(bool is_increment) {
2013 return inline_math_overflow<OverflowAddLNode>(argument(0), is_increment ? longcon(1) : argument(2)); 2020 Node* arg1 = argument(0); // type long
2021 // argument(1) == TOP
2022 Node* arg2 = NULL;
2023
2024 if (is_increment) {
2025 arg2 = longcon(1);
2026 } else {
2027 arg2 = argument(2); // type long
2028 // argument(3) == TOP
2029 }
2030
2031 Node* add = _gvn.transform(new(C) AddExactLNode(NULL, arg1, arg2));
2032 inline_math_mathExact(add);
2033 return true;
2014 } 2034 }
2015 2035
2016 bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) { 2036 bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) {
2017 return inline_math_overflow<OverflowSubINode>(argument(0), is_decrement ? intcon(1) : argument(1)); 2037 Node* arg1 = argument(0);
2038 Node* arg2 = NULL;
2039
2040 if (is_decrement) {
2041 arg2 = intcon(1);
2042 } else {
2043 arg2 = argument(1);
2044 }
2045
2046 Node* sub = _gvn.transform(new(C) SubExactINode(NULL, arg1, arg2));
2047 inline_math_mathExact(sub);
2048 return true;
2018 } 2049 }
2019 2050
2020 bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) { 2051 bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
2021 return inline_math_overflow<OverflowSubLNode>(argument(0), is_decrement ? longcon(1) : argument(2)); 2052 Node* arg1 = argument(0); // type long
2053 // argument(1) == TOP
2054 Node* arg2 = NULL;
2055
2056 if (is_decrement) {
2057 arg2 = longcon(1);
2058 } else {
2059 arg2 = argument(2); // type long
2060 // argument(3) == TOP
2061 }
2062
2063 Node* sub = _gvn.transform(new(C) SubExactLNode(NULL, arg1, arg2));
2064 inline_math_mathExact(sub);
2065 return true;
2022 } 2066 }
2023 2067
2024 bool LibraryCallKit::inline_math_negateExactI() { 2068 bool LibraryCallKit::inline_math_negateExactI() {
2025 return inline_math_overflow<OverflowSubINode>(intcon(0), argument(0)); 2069 Node* arg1 = argument(0);
2070
2071 Node* neg = _gvn.transform(new(C) NegExactINode(NULL, arg1));
2072 inline_math_mathExact(neg);
2073 return true;
2026 } 2074 }
2027 2075
2028 bool LibraryCallKit::inline_math_negateExactL() { 2076 bool LibraryCallKit::inline_math_negateExactL() {
2029 return inline_math_overflow<OverflowSubLNode>(longcon(0), argument(0)); 2077 Node* arg1 = argument(0);
2078 // argument(1) == TOP
2079
2080 Node* neg = _gvn.transform(new(C) NegExactLNode(NULL, arg1));
2081 inline_math_mathExact(neg);
2082 return true;
2030 } 2083 }
2031 2084
2032 bool LibraryCallKit::inline_math_multiplyExactI() { 2085 bool LibraryCallKit::inline_math_multiplyExactI() {
2033 return inline_math_overflow<OverflowMulINode>(argument(0), argument(1)); 2086 Node* arg1 = argument(0);
2087 Node* arg2 = argument(1);
2088
2089 Node* mul = _gvn.transform(new(C) MulExactINode(NULL, arg1, arg2));
2090 inline_math_mathExact(mul);
2091 return true;
2034 } 2092 }
2035 2093
2036 bool LibraryCallKit::inline_math_multiplyExactL() { 2094 bool LibraryCallKit::inline_math_multiplyExactL() {
2037 return inline_math_overflow<OverflowMulLNode>(argument(0), argument(2)); 2095 Node* arg1 = argument(0);
2096 // argument(1) == TOP
2097 Node* arg2 = argument(2);
2098 // argument(3) == TOP
2099
2100 Node* mul = _gvn.transform(new(C) MulExactLNode(NULL, arg1, arg2));
2101 inline_math_mathExact(mul);
2102 return true;
2038 } 2103 }
2039 2104
2040 Node* 2105 Node*
2041 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) { 2106 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
2042 // These are the candidate return value: 2107 // These are the candidate return value:
2560 // volatile membars (for stores; compare Parse::do_put_xxx), which 2625 // volatile membars (for stores; compare Parse::do_put_xxx), which
2561 // we cannot do effectively here because we probably only have a 2626 // we cannot do effectively here because we probably only have a
2562 // rough approximation of type. 2627 // rough approximation of type.
2563 need_mem_bar = true; 2628 need_mem_bar = true;
2564 // For Stores, place a memory ordering barrier now. 2629 // For Stores, place a memory ordering barrier now.
2565 if (is_store) { 2630 if (is_store)
2566 insert_mem_bar(Op_MemBarRelease); 2631 insert_mem_bar(Op_MemBarRelease);
2567 } else {
2568 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2569 insert_mem_bar(Op_MemBarVolatile);
2570 }
2571 }
2572 } 2632 }
2573 2633
2574 // Memory barrier to prevent normal and 'unsafe' accesses from 2634 // Memory barrier to prevent normal and 'unsafe' accesses from
2575 // bypassing each other. Happens after null checks, so the 2635 // bypassing each other. Happens after null checks, so the
2576 // exception paths do not take memory state from the memory barrier, 2636 // exception paths do not take memory state from the memory barrier,
2578 // of safe & unsafe memory. Otherwise fails in a CTW of rt.jar 2638 // of safe & unsafe memory. Otherwise fails in a CTW of rt.jar
2579 // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl. 2639 // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2580 if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); 2640 if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2581 2641
2582 if (!is_store) { 2642 if (!is_store) {
2583 Node* p = make_load(control(), adr, value_type, type, adr_type, MemNode::unordered, is_volatile); 2643 Node* p = make_load(control(), adr, value_type, type, adr_type, is_volatile);
2584 // load value 2644 // load value
2585 switch (type) { 2645 switch (type) {
2586 case T_BOOLEAN: 2646 case T_BOOLEAN:
2587 case T_CHAR: 2647 case T_CHAR:
2588 case T_BYTE: 2648 case T_BYTE:
2622 val = ConvL2X(val); 2682 val = ConvL2X(val);
2623 val = _gvn.transform(new (C) CastX2PNode(val)); 2683 val = _gvn.transform(new (C) CastX2PNode(val));
2624 break; 2684 break;
2625 } 2685 }
2626 2686
2627 MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2628 if (type != T_OBJECT ) { 2687 if (type != T_OBJECT ) {
2629 (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile); 2688 (void) store_to_memory(control(), adr, val, type, adr_type, is_volatile);
2630 } else { 2689 } else {
2631 // Possibly an oop being stored to Java heap or native memory 2690 // Possibly an oop being stored to Java heap or native memory
2632 if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) { 2691 if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
2633 // oop to Java heap. 2692 // oop to Java heap.
2634 (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo); 2693 (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type);
2635 } else { 2694 } else {
2636 // We can't tell at compile time if we are storing in the Java heap or outside 2695 // We can't tell at compile time if we are storing in the Java heap or outside
2637 // of it. So we need to emit code to conditionally do the proper type of 2696 // of it. So we need to emit code to conditionally do the proper type of
2638 // store. 2697 // store.
2639 2698
2641 #define __ ideal. 2700 #define __ ideal.
2642 // QQQ who knows what probability is here?? 2701 // QQQ who knows what probability is here??
2643 __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); { 2702 __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
2644 // Sync IdealKit and graphKit. 2703 // Sync IdealKit and graphKit.
2645 sync_kit(ideal); 2704 sync_kit(ideal);
2646 Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo); 2705 Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type);
2647 // Update IdealKit memory. 2706 // Update IdealKit memory.
2648 __ sync_kit(this); 2707 __ sync_kit(this);
2649 } __ else_(); { 2708 } __ else_(); {
2650 __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile); 2709 __ store(__ ctrl(), adr, val, type, alias_type->index(), is_volatile);
2651 } __ end_if(); 2710 } __ end_if();
2652 // Final sync IdealKit and GraphKit. 2711 // Final sync IdealKit and GraphKit.
2653 final_sync(ideal); 2712 final_sync(ideal);
2654 #undef __ 2713 #undef __
2655 } 2714 }
2656 } 2715 }
2657 } 2716 }
2658 2717
2659 if (is_volatile) { 2718 if (is_volatile) {
2660 if (!is_store) { 2719 if (!is_store)
2661 insert_mem_bar(Op_MemBarAcquire); 2720 insert_mem_bar(Op_MemBarAcquire);
2662 } else { 2721 else
2663 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2722 insert_mem_bar(Op_MemBarVolatile);
2664 insert_mem_bar(Op_MemBarVolatile);
2665 }
2666 }
2667 } 2723 }
2668 2724
2669 if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); 2725 if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2670 2726
2671 return true; 2727 return true;
2921 #ifdef _LP64 2977 #ifdef _LP64
2922 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 2978 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2923 Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop())); 2979 Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2924 if (kind == LS_xchg) { 2980 if (kind == LS_xchg) {
2925 load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr, 2981 load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr,
2926 newval_enc, adr_type, value_type->make_narrowoop())); 2982 newval_enc, adr_type, value_type->make_narrowoop()));
2927 } else { 2983 } else {
2928 assert(kind == LS_cmpxchg, "wrong LoadStore operation"); 2984 assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2929 Node *oldval_enc = _gvn.transform(new (C) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop())); 2985 Node *oldval_enc = _gvn.transform(new (C) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2930 load_store = _gvn.transform(new (C) CompareAndSwapNNode(control(), mem, adr, 2986 load_store = _gvn.transform(new (C) CompareAndSwapNNode(control(), mem, adr,
2931 newval_enc, oldval_enc)); 2987 newval_enc, oldval_enc));
2932 } 2988 }
2933 } else 2989 } else
2934 #endif 2990 #endif
2935 { 2991 {
2936 if (kind == LS_xchg) { 2992 if (kind == LS_xchg) {
3032 insert_mem_bar(Op_MemBarCPUOrder); 3088 insert_mem_bar(Op_MemBarCPUOrder);
3033 // Ensure that the store is atomic for longs: 3089 // Ensure that the store is atomic for longs:
3034 const bool require_atomic_access = true; 3090 const bool require_atomic_access = true;
3035 Node* store; 3091 Node* store;
3036 if (type == T_OBJECT) // reference stores need a store barrier. 3092 if (type == T_OBJECT) // reference stores need a store barrier.
3037 store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release); 3093 store = store_oop_to_unknown(control(), base, adr, adr_type, val, type);
3038 else { 3094 else {
3039 store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access); 3095 store = store_to_memory(control(), adr, val, type, adr_type, require_atomic_access);
3040 } 3096 }
3041 insert_mem_bar(Op_MemBarCPUOrder); 3097 insert_mem_bar(Op_MemBarCPUOrder);
3042 return true; 3098 return true;
3043 } 3099 }
3044 3100
3046 // Regardless of form, don't allow previous ld/st to move down, 3102 // Regardless of form, don't allow previous ld/st to move down,
3047 // then issue acquire, release, or volatile mem_bar. 3103 // then issue acquire, release, or volatile mem_bar.
3048 insert_mem_bar(Op_MemBarCPUOrder); 3104 insert_mem_bar(Op_MemBarCPUOrder);
3049 switch(id) { 3105 switch(id) {
3050 case vmIntrinsics::_loadFence: 3106 case vmIntrinsics::_loadFence:
3051 insert_mem_bar(Op_LoadFence); 3107 insert_mem_bar(Op_MemBarAcquire);
3052 return true; 3108 return true;
3053 case vmIntrinsics::_storeFence: 3109 case vmIntrinsics::_storeFence:
3054 insert_mem_bar(Op_StoreFence); 3110 insert_mem_bar(Op_MemBarRelease);
3055 return true; 3111 return true;
3056 case vmIntrinsics::_fullFence: 3112 case vmIntrinsics::_fullFence:
3057 insert_mem_bar(Op_MemBarVolatile); 3113 insert_mem_bar(Op_MemBarVolatile);
3058 return true; 3114 return true;
3059 default: 3115 default:
3094 // Serializable.class or Object[].class. The runtime will handle it. 3150 // Serializable.class or Object[].class. The runtime will handle it.
3095 // But we must make an explicit check for initialization. 3151 // But we must make an explicit check for initialization.
3096 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset())); 3152 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3097 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler 3153 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3098 // can generate code to load it as unsigned byte. 3154 // can generate code to load it as unsigned byte.
3099 Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered); 3155 Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN);
3100 Node* bits = intcon(InstanceKlass::fully_initialized); 3156 Node* bits = intcon(InstanceKlass::fully_initialized);
3101 test = _gvn.transform(new (C) SubINode(inst, bits)); 3157 test = _gvn.transform(new (C) SubINode(inst, bits));
3102 // The 'test' is non-zero if we need to take a slow path. 3158 // The 'test' is non-zero if we need to take a slow path.
3103 } 3159 }
3104 3160
3118 Node* cls = null_check(argument(1), T_OBJECT); 3174 Node* cls = null_check(argument(1), T_OBJECT);
3119 Node* kls = load_klass_from_mirror(cls, false, NULL, 0); 3175 Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
3120 kls = null_check(kls, T_OBJECT); 3176 kls = null_check(kls, T_OBJECT);
3121 ByteSize offset = TRACE_ID_OFFSET; 3177 ByteSize offset = TRACE_ID_OFFSET;
3122 Node* insp = basic_plus_adr(kls, in_bytes(offset)); 3178 Node* insp = basic_plus_adr(kls, in_bytes(offset));
3123 Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG, MemNode::unordered); 3179 Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG);
3124 Node* bits = longcon(~0x03l); // ignore bit 0 & 1 3180 Node* bits = longcon(~0x03l); // ignore bit 0 & 1
3125 Node* andl = _gvn.transform(new (C) AndLNode(tvalue, bits)); 3181 Node* andl = _gvn.transform(new (C) AndLNode(tvalue, bits));
3126 Node* clsused = longcon(0x01l); // set the class bit 3182 Node* clsused = longcon(0x01l); // set the class bit
3127 Node* orl = _gvn.transform(new (C) OrLNode(tvalue, clsused)); 3183 Node* orl = _gvn.transform(new (C) OrLNode(tvalue, clsused));
3128 3184
3129 const TypePtr *adr_type = _gvn.type(insp)->isa_ptr(); 3185 const TypePtr *adr_type = _gvn.type(insp)->isa_ptr();
3130 store_to_memory(control(), insp, orl, T_LONG, adr_type, MemNode::unordered); 3186 store_to_memory(control(), insp, orl, T_LONG, adr_type);
3131 set_result(andl); 3187 set_result(andl);
3132 return true; 3188 return true;
3133 } 3189 }
3134 3190
3135 bool LibraryCallKit::inline_native_threadID() { 3191 bool LibraryCallKit::inline_native_threadID() {
3136 Node* tls_ptr = NULL; 3192 Node* tls_ptr = NULL;
3137 Node* cur_thr = generate_current_thread(tls_ptr); 3193 Node* cur_thr = generate_current_thread(tls_ptr);
3138 Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset())); 3194 Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
3139 Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered); 3195 Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS);
3140 p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::thread_id_offset())); 3196 p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::thread_id_offset()));
3141 3197
3142 Node* threadid = NULL; 3198 Node* threadid = NULL;
3143 size_t thread_id_size = OSThread::thread_id_size(); 3199 size_t thread_id_size = OSThread::thread_id_size();
3144 if (thread_id_size == (size_t) BytesPerLong) { 3200 if (thread_id_size == (size_t) BytesPerLong) {
3145 threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG, MemNode::unordered)); 3201 threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG));
3146 } else if (thread_id_size == (size_t) BytesPerInt) { 3202 } else if (thread_id_size == (size_t) BytesPerInt) {
3147 threadid = make_load(control(), p, TypeInt::INT, T_INT, MemNode::unordered); 3203 threadid = make_load(control(), p, TypeInt::INT, T_INT);
3148 } else { 3204 } else {
3149 ShouldNotReachHere(); 3205 ShouldNotReachHere();
3150 } 3206 }
3151 set_result(threadid); 3207 set_result(threadid);
3152 return true; 3208 return true;
3217 3273
3218 generate_slow_guard(bol_thr, slow_region); 3274 generate_slow_guard(bol_thr, slow_region);
3219 3275
3220 // (b) Interrupt bit on TLS must be false. 3276 // (b) Interrupt bit on TLS must be false.
3221 Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset())); 3277 Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
3222 Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered); 3278 Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS);
3223 p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset())); 3279 p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
3224 3280
3225 // Set the control input on the field _interrupted read to prevent it floating up. 3281 // Set the control input on the field _interrupted read to prevent it floating up.
3226 Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT, MemNode::unordered); 3282 Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT);
3227 Node* cmp_bit = _gvn.transform(new (C) CmpINode(int_bit, intcon(0))); 3283 Node* cmp_bit = _gvn.transform(new (C) CmpINode(int_bit, intcon(0)));
3228 Node* bol_bit = _gvn.transform(new (C) BoolNode(cmp_bit, BoolTest::ne)); 3284 Node* bol_bit = _gvn.transform(new (C) BoolNode(cmp_bit, BoolTest::ne));
3229 3285
3230 IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN); 3286 IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
3231 3287
3289 3345
3290 //---------------------------load_mirror_from_klass---------------------------- 3346 //---------------------------load_mirror_from_klass----------------------------
3291 // Given a klass oop, load its java mirror (a java.lang.Class oop). 3347 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3292 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) { 3348 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3293 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset())); 3349 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3294 return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered); 3350 return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT);
3295 } 3351 }
3296 3352
3297 //-----------------------load_klass_from_mirror_common------------------------- 3353 //-----------------------load_klass_from_mirror_common-------------------------
3298 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop. 3354 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3299 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE), 3355 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3326 // Fall through if (mods & mask) == bits, take the guard otherwise. 3382 // Fall through if (mods & mask) == bits, take the guard otherwise.
3327 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) { 3383 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3328 // Branch around if the given klass has the given modifier bit set. 3384 // Branch around if the given klass has the given modifier bit set.
3329 // Like generate_guard, adds a new path onto the region. 3385 // Like generate_guard, adds a new path onto the region.
3330 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset())); 3386 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3331 Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered); 3387 Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT);
3332 Node* mask = intcon(modifier_mask); 3388 Node* mask = intcon(modifier_mask);
3333 Node* bits = intcon(modifier_bits); 3389 Node* bits = intcon(modifier_bits);
3334 Node* mbit = _gvn.transform(new (C) AndINode(mods, mask)); 3390 Node* mbit = _gvn.transform(new (C) AndINode(mods, mask));
3335 Node* cmp = _gvn.transform(new (C) CmpINode(mbit, bits)); 3391 Node* cmp = _gvn.transform(new (C) CmpINode(mbit, bits));
3336 Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne)); 3392 Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
3443 query_value = gen_instanceof(obj, kls, safe_for_replace); 3499 query_value = gen_instanceof(obj, kls, safe_for_replace);
3444 break; 3500 break;
3445 3501
3446 case vmIntrinsics::_getModifiers: 3502 case vmIntrinsics::_getModifiers:
3447 p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset())); 3503 p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset()));
3448 query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered); 3504 query_value = make_load(NULL, p, TypeInt::INT, T_INT);
3449 break; 3505 break;
3450 3506
3451 case vmIntrinsics::_isInterface: 3507 case vmIntrinsics::_isInterface:
3452 // (To verify this code sequence, check the asserts in JVM_IsInterface.) 3508 // (To verify this code sequence, check the asserts in JVM_IsInterface.)
3453 if (generate_interface_guard(kls, region) != NULL) 3509 if (generate_interface_guard(kls, region) != NULL)
3501 case vmIntrinsics::_getComponentType: 3557 case vmIntrinsics::_getComponentType:
3502 if (generate_array_guard(kls, region) != NULL) { 3558 if (generate_array_guard(kls, region) != NULL) {
3503 // Be sure to pin the oop load to the guard edge just created: 3559 // Be sure to pin the oop load to the guard edge just created:
3504 Node* is_array_ctrl = region->in(region->req()-1); 3560 Node* is_array_ctrl = region->in(region->req()-1);
3505 Node* cma = basic_plus_adr(kls, in_bytes(ArrayKlass::component_mirror_offset())); 3561 Node* cma = basic_plus_adr(kls, in_bytes(ArrayKlass::component_mirror_offset()));
3506 Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered); 3562 Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT);
3507 phi->add_req(cmo); 3563 phi->add_req(cmo);
3508 } 3564 }
3509 query_value = null(); // non-array case is null 3565 query_value = null(); // non-array case is null
3510 break; 3566 break;
3511 3567
3512 case vmIntrinsics::_getClassAccessFlags: 3568 case vmIntrinsics::_getClassAccessFlags:
3513 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset())); 3569 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3514 query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered); 3570 query_value = make_load(NULL, p, TypeInt::INT, T_INT);
3515 break; 3571 break;
3516 3572
3517 default: 3573 default:
3518 fatal_unexpected_iid(id); 3574 fatal_unexpected_iid(id);
3519 break; 3575 break;
3875 // Get the Method* out of the appropriate vtable entry. 3931 // Get the Method* out of the appropriate vtable entry.
3876 int entry_offset = (InstanceKlass::vtable_start_offset() + 3932 int entry_offset = (InstanceKlass::vtable_start_offset() +
3877 vtable_index*vtableEntry::size()) * wordSize + 3933 vtable_index*vtableEntry::size()) * wordSize +
3878 vtableEntry::method_offset_in_bytes(); 3934 vtableEntry::method_offset_in_bytes();
3879 Node* entry_addr = basic_plus_adr(obj_klass, entry_offset); 3935 Node* entry_addr = basic_plus_adr(obj_klass, entry_offset);
3880 Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered); 3936 Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS);
3881 3937
3882 // Compare the target method with the expected method (e.g., Object.hashCode). 3938 // Compare the target method with the expected method (e.g., Object.hashCode).
3883 const TypePtr* native_call_addr = TypeMetadataPtr::make(method); 3939 const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
3884 3940
3885 Node* native_call = makecon(native_call_addr); 3941 Node* native_call = makecon(native_call_addr);
4001 generate_virtual_guard(obj_klass, slow_region); 4057 generate_virtual_guard(obj_klass, slow_region);
4002 } 4058 }
4003 4059
4004 // Get the header out of the object, use LoadMarkNode when available 4060 // Get the header out of the object, use LoadMarkNode when available
4005 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); 4061 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4006 Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); 4062 Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type());
4007 4063
4008 // Test the header to see if it is unlocked. 4064 // Test the header to see if it is unlocked.
4009 Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place); 4065 Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
4010 Node *lmasked_header = _gvn.transform(new (C) AndXNode(header, lock_mask)); 4066 Node *lmasked_header = _gvn.transform(new (C) AndXNode(header, lock_mask));
4011 Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value); 4067 Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value);
5422 start = _gvn.transform(new(C) AndXNode(start, MakeConX(~to_clear))); 5478 start = _gvn.transform(new(C) AndXNode(start, MakeConX(~to_clear)));
5423 if (bump_bit != 0) { 5479 if (bump_bit != 0) {
5424 // Store a zero to the immediately preceding jint: 5480 // Store a zero to the immediately preceding jint:
5425 Node* x1 = _gvn.transform(new(C) AddXNode(start, MakeConX(-bump_bit))); 5481 Node* x1 = _gvn.transform(new(C) AddXNode(start, MakeConX(-bump_bit)));
5426 Node* p1 = basic_plus_adr(dest, x1); 5482 Node* p1 = basic_plus_adr(dest, x1);
5427 mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered); 5483 mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT);
5428 mem = _gvn.transform(mem); 5484 mem = _gvn.transform(mem);
5429 } 5485 }
5430 } 5486 }
5431 Node* end = dest_size; // pre-rounded 5487 Node* end = dest_size; // pre-rounded
5432 mem = ClearArrayNode::clear_memory(control(), mem, dest, 5488 mem = ClearArrayNode::clear_memory(control(), mem, dest,
5472 // This is a common case, since abase can be odd mod 8. 5528 // This is a common case, since abase can be odd mod 8.
5473 if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt && 5529 if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt &&
5474 ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) { 5530 ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) {
5475 Node* sptr = basic_plus_adr(src, src_off); 5531 Node* sptr = basic_plus_adr(src, src_off);
5476 Node* dptr = basic_plus_adr(dest, dest_off); 5532 Node* dptr = basic_plus_adr(dest, dest_off);
5477 Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 5533 Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, adr_type);
5478 store_to_memory(control(), dptr, sval, T_INT, adr_type, MemNode::unordered); 5534 store_to_memory(control(), dptr, sval, T_INT, adr_type);
5479 src_off += BytesPerInt; 5535 src_off += BytesPerInt;
5480 dest_off += BytesPerInt; 5536 dest_off += BytesPerInt;
5481 } else { 5537 } else {
5482 return false; 5538 return false;
5483 } 5539 }
5538 // for the target array. This is an optimistic check. It will 5594 // for the target array. This is an optimistic check. It will
5539 // look in each non-null element's class, at the desired klass's 5595 // look in each non-null element's class, at the desired klass's
5540 // super_check_offset, for the desired klass. 5596 // super_check_offset, for the desired klass.
5541 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 5597 int sco_offset = in_bytes(Klass::super_check_offset_offset());
5542 Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset); 5598 Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
5543 Node* n3 = new(C) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered); 5599 Node* n3 = new(C) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr());
5544 Node* check_offset = ConvI2X(_gvn.transform(n3)); 5600 Node* check_offset = ConvI2X(_gvn.transform(n3));
5545 Node* check_value = dest_elem_klass; 5601 Node* check_value = dest_elem_klass;
5546 5602
5547 Node* src_start = array_element_address(src, src_offset, T_OBJECT); 5603 Node* src_start = array_element_address(src, src_offset, T_OBJECT);
5548 Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT); 5604 Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT);
5679 result = _gvn.transform(new (C) AndINode(result, intcon(0xFF))); 5735 result = _gvn.transform(new (C) AndINode(result, intcon(0xFF)));
5680 5736
5681 Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr())); 5737 Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr()));
5682 Node* offset = _gvn.transform(new (C) LShiftINode(result, intcon(0x2))); 5738 Node* offset = _gvn.transform(new (C) LShiftINode(result, intcon(0x2)));
5683 Node* adr = basic_plus_adr(top(), base, ConvI2X(offset)); 5739 Node* adr = basic_plus_adr(top(), base, ConvI2X(offset));
5684 result = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered); 5740 result = make_load(control(), adr, TypeInt::INT, T_INT);
5685 5741
5686 crc = _gvn.transform(new (C) URShiftINode(crc, intcon(8))); 5742 crc = _gvn.transform(new (C) URShiftINode(crc, intcon(8)));
5687 result = _gvn.transform(new (C) XorINode(crc, result)); 5743 result = _gvn.transform(new (C) XorINode(crc, result));
5688 result = _gvn.transform(new (C) XorINode(result, M1)); 5744 result = _gvn.transform(new (C) XorINode(result, M1));
5689 set_result(result); 5745 set_result(result);
5780 5836
5781 ciInstanceKlass* klass = env()->Object_klass(); 5837 ciInstanceKlass* klass = env()->Object_klass();
5782 const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass); 5838 const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
5783 5839
5784 Node* no_ctrl = NULL; 5840 Node* no_ctrl = NULL;
5785 Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered); 5841 Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT);
5786 5842
5787 // Use the pre-barrier to record the value in the referent field 5843 // Use the pre-barrier to record the value in the referent field
5788 pre_barrier(false /* do_load */, 5844 pre_barrier(false /* do_load */,
5789 control(), 5845 control(),
5790 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, 5846 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
5827 5883
5828 // Build the resultant type of the load 5884 // Build the resultant type of the load
5829 const Type *type = TypeOopPtr::make_from_klass(field_klass->as_klass()); 5885 const Type *type = TypeOopPtr::make_from_klass(field_klass->as_klass());
5830 5886
5831 // Build the load. 5887 // Build the load.
5832 Node* loadedField = make_load(NULL, adr, type, bt, adr_type, MemNode::unordered, is_vol); 5888 Node* loadedField = make_load(NULL, adr, type, bt, adr_type, is_vol);
5833 return loadedField; 5889 return loadedField;
5834 } 5890 }
5835 5891
5836 5892
5837 //------------------------------inline_aescrypt_Block----------------------- 5893 //------------------------------inline_aescrypt_Block-----------------------
5878 // now need to get the start of its expanded key array 5934 // now need to get the start of its expanded key array
5879 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java 5935 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
5880 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object); 5936 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
5881 if (k_start == NULL) return false; 5937 if (k_start == NULL) return false;
5882 5938
5883 if (Matcher::pass_original_key_for_aes()) { 5939 // Call the stub.
5884 // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to 5940 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
5885 // compatibility issues between Java key expansion and SPARC crypto instructions 5941 stubAddr, stubName, TypePtr::BOTTOM,
5886 Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object); 5942 src_start, dest_start, k_start);
5887 if (original_k_start == NULL) return false;
5888
5889 // Call the stub.
5890 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
5891 stubAddr, stubName, TypePtr::BOTTOM,
5892 src_start, dest_start, k_start, original_k_start);
5893 } else {
5894 // Call the stub.
5895 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
5896 stubAddr, stubName, TypePtr::BOTTOM,
5897 src_start, dest_start, k_start);
5898 }
5899 5943
5900 return true; 5944 return true;
5901 } 5945 }
5902 5946
5903 //------------------------------inline_cipherBlockChaining_AESCrypt----------------------- 5947 //------------------------------inline_cipherBlockChaining_AESCrypt-----------------------
5971 // similarly, get the start address of the r vector 6015 // similarly, get the start address of the r vector
5972 Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B", /*is_exact*/ false); 6016 Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B", /*is_exact*/ false);
5973 if (objRvec == NULL) return false; 6017 if (objRvec == NULL) return false;
5974 Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE); 6018 Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
5975 6019
5976 Node* cbcCrypt; 6020 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
5977 if (Matcher::pass_original_key_for_aes()) { 6021 make_runtime_call(RC_LEAF|RC_NO_FP,
5978 // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to 6022 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
5979 // compatibility issues between Java key expansion and SPARC crypto instructions 6023 stubAddr, stubName, TypePtr::BOTTOM,
5980 Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object); 6024 src_start, dest_start, k_start, r_start, len);
5981 if (original_k_start == NULL) return false; 6025
5982 6026 // return is void so no result needs to be pushed
5983 // Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start 6027
5984 cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
5985 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
5986 stubAddr, stubName, TypePtr::BOTTOM,
5987 src_start, dest_start, k_start, r_start, len, original_k_start);
5988 } else {
5989 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
5990 cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
5991 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
5992 stubAddr, stubName, TypePtr::BOTTOM,
5993 src_start, dest_start, k_start, r_start, len);
5994 }
5995
5996 // return cipher length (int)
5997 Node* retvalue = _gvn.transform(new (C) ProjNode(cbcCrypt, TypeFunc::Parms));
5998 set_result(retvalue);
5999 return true; 6028 return true;
6000 } 6029 }
6001 6030
6002 //------------------------------get_key_start_from_aescrypt_object----------------------- 6031 //------------------------------get_key_start_from_aescrypt_object-----------------------
6003 Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) { 6032 Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) {
6006 if (objAESCryptKey == NULL) return (Node *) NULL; 6035 if (objAESCryptKey == NULL) return (Node *) NULL;
6007 6036
6008 // now have the array, need to get the start address of the K array 6037 // now have the array, need to get the start address of the K array
6009 Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT); 6038 Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
6010 return k_start; 6039 return k_start;
6011 }
6012
6013 //------------------------------get_original_key_start_from_aescrypt_object-----------------------
6014 Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
6015 Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
6016 assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6017 if (objAESCryptKey == NULL) return (Node *) NULL;
6018
6019 // now have the array, need to get the start address of the lastKey array
6020 Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
6021 return original_k_start;
6022 } 6040 }
6023 6041
6024 //----------------------------inline_cipherBlockChaining_AESCrypt_predicate---------------------------- 6042 //----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
6025 // Return node representing slow path of predicate check. 6043 // Return node representing slow path of predicate check.
6026 // the pseudo code we want to emulate with this predicate is: 6044 // the pseudo code we want to emulate with this predicate is: