comparison src/share/vm/opto/library_call.cpp @ 14422:2b8e28fdf503

Merge
author kvn
date Tue, 05 Nov 2013 17:38:04 -0800
parents a57a165b8296
children 00f5eff62d18 2113136690bc
comparison
equal deleted inserted replaced
14421:3068270ba476 14422:2b8e28fdf503
30 #include "oops/objArrayKlass.hpp" 30 #include "oops/objArrayKlass.hpp"
31 #include "opto/addnode.hpp" 31 #include "opto/addnode.hpp"
32 #include "opto/callGenerator.hpp" 32 #include "opto/callGenerator.hpp"
33 #include "opto/cfgnode.hpp" 33 #include "opto/cfgnode.hpp"
34 #include "opto/idealKit.hpp" 34 #include "opto/idealKit.hpp"
35 #include "opto/mathexactnode.hpp"
35 #include "opto/mulnode.hpp" 36 #include "opto/mulnode.hpp"
36 #include "opto/parse.hpp" 37 #include "opto/parse.hpp"
37 #include "opto/runtime.hpp" 38 #include "opto/runtime.hpp"
38 #include "opto/subnode.hpp" 39 #include "opto/subnode.hpp"
39 #include "prims/nativeLookup.hpp" 40 #include "prims/nativeLookup.hpp"
44 // Extend the set of intrinsics known to the runtime: 45 // Extend the set of intrinsics known to the runtime:
45 public: 46 public:
46 private: 47 private:
47 bool _is_virtual; 48 bool _is_virtual;
48 bool _is_predicted; 49 bool _is_predicted;
50 bool _does_virtual_dispatch;
49 vmIntrinsics::ID _intrinsic_id; 51 vmIntrinsics::ID _intrinsic_id;
50 52
51 public: 53 public:
52 LibraryIntrinsic(ciMethod* m, bool is_virtual, bool is_predicted, vmIntrinsics::ID id) 54 LibraryIntrinsic(ciMethod* m, bool is_virtual, bool is_predicted, bool does_virtual_dispatch, vmIntrinsics::ID id)
53 : InlineCallGenerator(m), 55 : InlineCallGenerator(m),
54 _is_virtual(is_virtual), 56 _is_virtual(is_virtual),
55 _is_predicted(is_predicted), 57 _is_predicted(is_predicted),
58 _does_virtual_dispatch(does_virtual_dispatch),
56 _intrinsic_id(id) 59 _intrinsic_id(id)
57 { 60 {
58 } 61 }
59 virtual bool is_intrinsic() const { return true; } 62 virtual bool is_intrinsic() const { return true; }
60 virtual bool is_virtual() const { return _is_virtual; } 63 virtual bool is_virtual() const { return _is_virtual; }
61 virtual bool is_predicted() const { return _is_predicted; } 64 virtual bool is_predicted() const { return _is_predicted; }
62 virtual JVMState* generate(JVMState* jvms); 65 virtual bool does_virtual_dispatch() const { return _does_virtual_dispatch; }
66 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
63 virtual Node* generate_predicate(JVMState* jvms); 67 virtual Node* generate_predicate(JVMState* jvms);
64 vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; } 68 vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
65 }; 69 };
66 70
67 71
197 Node* round_double_node(Node* n); 201 Node* round_double_node(Node* n);
198 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName); 202 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
199 bool inline_math_native(vmIntrinsics::ID id); 203 bool inline_math_native(vmIntrinsics::ID id);
200 bool inline_trig(vmIntrinsics::ID id); 204 bool inline_trig(vmIntrinsics::ID id);
201 bool inline_math(vmIntrinsics::ID id); 205 bool inline_math(vmIntrinsics::ID id);
206 void inline_math_mathExact(Node* math);
207 bool inline_math_addExactI(bool is_increment);
208 bool inline_math_addExactL(bool is_increment);
209 bool inline_math_multiplyExactI();
210 bool inline_math_multiplyExactL();
211 bool inline_math_negateExactI();
212 bool inline_math_negateExactL();
213 bool inline_math_subtractExactI(bool is_decrement);
214 bool inline_math_subtractExactL(bool is_decrement);
202 bool inline_exp(); 215 bool inline_exp();
203 bool inline_pow(); 216 bool inline_pow();
204 void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName); 217 void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
205 bool inline_min_max(vmIntrinsics::ID id); 218 bool inline_min_max(vmIntrinsics::ID id);
206 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y); 219 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
350 return NULL; 363 return NULL;
351 } 364 }
352 } 365 }
353 366
354 bool is_predicted = false; 367 bool is_predicted = false;
368 bool does_virtual_dispatch = false;
355 369
356 switch (id) { 370 switch (id) {
357 case vmIntrinsics::_compareTo: 371 case vmIntrinsics::_compareTo:
358 if (!SpecialStringCompareTo) return NULL; 372 if (!SpecialStringCompareTo) return NULL;
359 if (!Matcher::match_rule_supported(Op_StrComp)) return NULL; 373 if (!Matcher::match_rule_supported(Op_StrComp)) return NULL;
376 if (StubRoutines::unsafe_arraycopy() == NULL) return NULL; 390 if (StubRoutines::unsafe_arraycopy() == NULL) return NULL;
377 if (!InlineArrayCopy) return NULL; 391 if (!InlineArrayCopy) return NULL;
378 break; 392 break;
379 case vmIntrinsics::_hashCode: 393 case vmIntrinsics::_hashCode:
380 if (!InlineObjectHash) return NULL; 394 if (!InlineObjectHash) return NULL;
395 does_virtual_dispatch = true;
381 break; 396 break;
382 case vmIntrinsics::_clone: 397 case vmIntrinsics::_clone:
398 does_virtual_dispatch = true;
383 case vmIntrinsics::_copyOf: 399 case vmIntrinsics::_copyOf:
384 case vmIntrinsics::_copyOfRange: 400 case vmIntrinsics::_copyOfRange:
385 if (!InlineObjectCopy) return NULL; 401 if (!InlineObjectCopy) return NULL;
386 // These also use the arraycopy intrinsic mechanism: 402 // These also use the arraycopy intrinsic mechanism:
387 if (!InlineArrayCopy) return NULL; 403 if (!InlineArrayCopy) return NULL;
494 510
495 case vmIntrinsics::_updateCRC32: 511 case vmIntrinsics::_updateCRC32:
496 case vmIntrinsics::_updateBytesCRC32: 512 case vmIntrinsics::_updateBytesCRC32:
497 case vmIntrinsics::_updateByteBufferCRC32: 513 case vmIntrinsics::_updateByteBufferCRC32:
498 if (!UseCRC32Intrinsics) return NULL; 514 if (!UseCRC32Intrinsics) return NULL;
515 break;
516
517 case vmIntrinsics::_incrementExactI:
518 case vmIntrinsics::_addExactI:
519 if (!Matcher::match_rule_supported(Op_AddExactI) || !UseMathExactIntrinsics) return NULL;
520 break;
521 case vmIntrinsics::_incrementExactL:
522 case vmIntrinsics::_addExactL:
523 if (!Matcher::match_rule_supported(Op_AddExactL) || !UseMathExactIntrinsics) return NULL;
524 break;
525 case vmIntrinsics::_decrementExactI:
526 case vmIntrinsics::_subtractExactI:
527 if (!Matcher::match_rule_supported(Op_SubExactI) || !UseMathExactIntrinsics) return NULL;
528 break;
529 case vmIntrinsics::_decrementExactL:
530 case vmIntrinsics::_subtractExactL:
531 if (!Matcher::match_rule_supported(Op_SubExactL) || !UseMathExactIntrinsics) return NULL;
532 break;
533 case vmIntrinsics::_negateExactI:
534 if (!Matcher::match_rule_supported(Op_NegExactI) || !UseMathExactIntrinsics) return NULL;
535 break;
536 case vmIntrinsics::_negateExactL:
537 if (!Matcher::match_rule_supported(Op_NegExactL) || !UseMathExactIntrinsics) return NULL;
538 break;
539 case vmIntrinsics::_multiplyExactI:
540 if (!Matcher::match_rule_supported(Op_MulExactI) || !UseMathExactIntrinsics) return NULL;
541 break;
542 case vmIntrinsics::_multiplyExactL:
543 if (!Matcher::match_rule_supported(Op_MulExactL) || !UseMathExactIntrinsics) return NULL;
499 break; 544 break;
500 545
501 default: 546 default:
502 assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility"); 547 assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
503 assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?"); 548 assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
527 // -XX:-InlineUnsafeOps disables natives from the Unsafe class. 572 // -XX:-InlineUnsafeOps disables natives from the Unsafe class.
528 if (m->holder()->name() == ciSymbol::sun_misc_Unsafe()) { 573 if (m->holder()->name() == ciSymbol::sun_misc_Unsafe()) {
529 if (!InlineUnsafeOps) return NULL; 574 if (!InlineUnsafeOps) return NULL;
530 } 575 }
531 576
532 return new LibraryIntrinsic(m, is_virtual, is_predicted, (vmIntrinsics::ID) id); 577 return new LibraryIntrinsic(m, is_virtual, is_predicted, does_virtual_dispatch, (vmIntrinsics::ID) id);
533 } 578 }
534 579
535 //----------------------register_library_intrinsics----------------------- 580 //----------------------register_library_intrinsics-----------------------
536 // Initialize this file's data structures, for each Compile instance. 581 // Initialize this file's data structures, for each Compile instance.
537 void Compile::register_library_intrinsics() { 582 void Compile::register_library_intrinsics() {
538 // Nothing to do here. 583 // Nothing to do here.
539 } 584 }
540 585
541 JVMState* LibraryIntrinsic::generate(JVMState* jvms) { 586 JVMState* LibraryIntrinsic::generate(JVMState* jvms, Parse* parent_parser) {
542 LibraryCallKit kit(jvms, this); 587 LibraryCallKit kit(jvms, this);
543 Compile* C = kit.C; 588 Compile* C = kit.C;
544 int nodes = C->unique(); 589 int nodes = C->unique();
545 #ifndef PRODUCT 590 #ifndef PRODUCT
546 if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) { 591 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
547 char buf[1000]; 592 char buf[1000];
548 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf)); 593 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
549 tty->print_cr("Intrinsic %s", str); 594 tty->print_cr("Intrinsic %s", str);
550 } 595 }
551 #endif 596 #endif
552 ciMethod* callee = kit.callee(); 597 ciMethod* callee = kit.callee();
553 const int bci = kit.bci(); 598 const int bci = kit.bci();
554 599
555 // Try to inline the intrinsic. 600 // Try to inline the intrinsic.
556 if (kit.try_to_inline()) { 601 if (kit.try_to_inline()) {
557 if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { 602 if (C->print_intrinsics() || C->print_inlining()) {
558 C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); 603 C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
559 } 604 }
560 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); 605 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
561 if (C->log()) { 606 if (C->log()) {
562 C->log()->elem("intrinsic id='%s'%s nodes='%d'", 607 C->log()->elem("intrinsic id='%s'%s nodes='%d'",
568 kit.push_result(); 613 kit.push_result();
569 return kit.transfer_exceptions_into_jvms(); 614 return kit.transfer_exceptions_into_jvms();
570 } 615 }
571 616
572 // The intrinsic bailed out 617 // The intrinsic bailed out
573 if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { 618 if (C->print_intrinsics() || C->print_inlining()) {
574 if (jvms->has_method()) { 619 if (jvms->has_method()) {
575 // Not a root compile. 620 // Not a root compile.
576 const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)"; 621 const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
577 C->print_inlining(callee, jvms->depth() - 1, bci, msg); 622 C->print_inlining(callee, jvms->depth() - 1, bci, msg);
578 } else { 623 } else {
590 LibraryCallKit kit(jvms, this); 635 LibraryCallKit kit(jvms, this);
591 Compile* C = kit.C; 636 Compile* C = kit.C;
592 int nodes = C->unique(); 637 int nodes = C->unique();
593 #ifndef PRODUCT 638 #ifndef PRODUCT
594 assert(is_predicted(), "sanity"); 639 assert(is_predicted(), "sanity");
595 if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) { 640 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
596 char buf[1000]; 641 char buf[1000];
597 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf)); 642 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
598 tty->print_cr("Predicate for intrinsic %s", str); 643 tty->print_cr("Predicate for intrinsic %s", str);
599 } 644 }
600 #endif 645 #endif
601 ciMethod* callee = kit.callee(); 646 ciMethod* callee = kit.callee();
602 const int bci = kit.bci(); 647 const int bci = kit.bci();
603 648
604 Node* slow_ctl = kit.try_to_predicate(); 649 Node* slow_ctl = kit.try_to_predicate();
605 if (!kit.failing()) { 650 if (!kit.failing()) {
606 if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { 651 if (C->print_intrinsics() || C->print_inlining()) {
607 C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); 652 C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
608 } 653 }
609 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); 654 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
610 if (C->log()) { 655 if (C->log()) {
611 C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'", 656 C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'",
615 } 660 }
616 return slow_ctl; // Could be NULL if the check folds. 661 return slow_ctl; // Could be NULL if the check folds.
617 } 662 }
618 663
619 // The intrinsic bailed out 664 // The intrinsic bailed out
620 if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { 665 if (C->print_intrinsics() || C->print_inlining()) {
621 if (jvms->has_method()) { 666 if (jvms->has_method()) {
622 // Not a root compile. 667 // Not a root compile.
623 const char* msg = "failed to generate predicate for intrinsic"; 668 const char* msg = "failed to generate predicate for intrinsic";
624 C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg); 669 C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
625 } else { 670 } else {
665 case vmIntrinsics::_dlog10: 710 case vmIntrinsics::_dlog10:
666 case vmIntrinsics::_dpow: return inline_math_native(intrinsic_id()); 711 case vmIntrinsics::_dpow: return inline_math_native(intrinsic_id());
667 712
668 case vmIntrinsics::_min: 713 case vmIntrinsics::_min:
669 case vmIntrinsics::_max: return inline_min_max(intrinsic_id()); 714 case vmIntrinsics::_max: return inline_min_max(intrinsic_id());
715
716 case vmIntrinsics::_addExactI: return inline_math_addExactI(false /* add */);
717 case vmIntrinsics::_addExactL: return inline_math_addExactL(false /* add */);
718 case vmIntrinsics::_decrementExactI: return inline_math_subtractExactI(true /* decrement */);
719 case vmIntrinsics::_decrementExactL: return inline_math_subtractExactL(true /* decrement */);
720 case vmIntrinsics::_incrementExactI: return inline_math_addExactI(true /* increment */);
721 case vmIntrinsics::_incrementExactL: return inline_math_addExactL(true /* increment */);
722 case vmIntrinsics::_multiplyExactI: return inline_math_multiplyExactI();
723 case vmIntrinsics::_multiplyExactL: return inline_math_multiplyExactL();
724 case vmIntrinsics::_negateExactI: return inline_math_negateExactI();
725 case vmIntrinsics::_negateExactL: return inline_math_negateExactL();
726 case vmIntrinsics::_subtractExactI: return inline_math_subtractExactI(false /* subtract */);
727 case vmIntrinsics::_subtractExactL: return inline_math_subtractExactL(false /* subtract */);
670 728
671 case vmIntrinsics::_arraycopy: return inline_arraycopy(); 729 case vmIntrinsics::_arraycopy: return inline_arraycopy();
672 730
673 case vmIntrinsics::_compareTo: return inline_string_compareTo(); 731 case vmIntrinsics::_compareTo: return inline_string_compareTo();
674 case vmIntrinsics::_indexOf: return inline_string_indexOf(); 732 case vmIntrinsics::_indexOf: return inline_string_indexOf();
1278 Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true))); 1336 Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true)));
1279 jint target_length = target_array->length(); 1337 jint target_length = target_array->length();
1280 const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin)); 1338 const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
1281 const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot); 1339 const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
1282 1340
1341 // String.value field is known to be @Stable.
1342 if (UseImplicitStableValues) {
1343 target = cast_array_to_stable(target, target_type);
1344 }
1345
1283 IdealKit kit(this, false, true); 1346 IdealKit kit(this, false, true);
1284 #define __ kit. 1347 #define __ kit.
1285 Node* zero = __ ConI(0); 1348 Node* zero = __ ConI(0);
1286 Node* one = __ ConI(1); 1349 Node* one = __ ConI(1);
1287 Node* cache = __ ConI(cache_i); 1350 Node* cache = __ ConI(cache_i);
1901 } 1964 }
1902 1965
1903 //----------------------------inline_min_max----------------------------------- 1966 //----------------------------inline_min_max-----------------------------------
1904 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) { 1967 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
1905 set_result(generate_min_max(id, argument(0), argument(1))); 1968 set_result(generate_min_max(id, argument(0), argument(1)));
1969 return true;
1970 }
1971
1972 void LibraryCallKit::inline_math_mathExact(Node* math) {
1973 // If we didn't get the expected opcode it means we have optimized
1974 // the node to something else and don't need the exception edge.
1975 if (!math->is_MathExact()) {
1976 set_result(math);
1977 return;
1978 }
1979
1980 Node* result = _gvn.transform( new(C) ProjNode(math, MathExactNode::result_proj_node));
1981 Node* flags = _gvn.transform( new(C) FlagsProjNode(math, MathExactNode::flags_proj_node));
1982
1983 Node* bol = _gvn.transform( new (C) BoolNode(flags, BoolTest::overflow) );
1984 IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
1985 Node* fast_path = _gvn.transform( new (C) IfFalseNode(check));
1986 Node* slow_path = _gvn.transform( new (C) IfTrueNode(check) );
1987
1988 {
1989 PreserveJVMState pjvms(this);
1990 PreserveReexecuteState preexecs(this);
1991 jvms()->set_should_reexecute(true);
1992
1993 set_control(slow_path);
1994 set_i_o(i_o());
1995
1996 uncommon_trap(Deoptimization::Reason_intrinsic,
1997 Deoptimization::Action_none);
1998 }
1999
2000 set_control(fast_path);
2001 set_result(result);
2002 }
2003
2004 bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
2005 Node* arg1 = argument(0);
2006 Node* arg2 = NULL;
2007
2008 if (is_increment) {
2009 arg2 = intcon(1);
2010 } else {
2011 arg2 = argument(1);
2012 }
2013
2014 Node* add = _gvn.transform( new(C) AddExactINode(NULL, arg1, arg2) );
2015 inline_math_mathExact(add);
2016 return true;
2017 }
2018
2019 bool LibraryCallKit::inline_math_addExactL(bool is_increment) {
2020 Node* arg1 = argument(0); // type long
2021 // argument(1) == TOP
2022 Node* arg2 = NULL;
2023
2024 if (is_increment) {
2025 arg2 = longcon(1);
2026 } else {
2027 arg2 = argument(2); // type long
2028 // argument(3) == TOP
2029 }
2030
2031 Node* add = _gvn.transform(new(C) AddExactLNode(NULL, arg1, arg2));
2032 inline_math_mathExact(add);
2033 return true;
2034 }
2035
2036 bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) {
2037 Node* arg1 = argument(0);
2038 Node* arg2 = NULL;
2039
2040 if (is_decrement) {
2041 arg2 = intcon(1);
2042 } else {
2043 arg2 = argument(1);
2044 }
2045
2046 Node* sub = _gvn.transform(new(C) SubExactINode(NULL, arg1, arg2));
2047 inline_math_mathExact(sub);
2048 return true;
2049 }
2050
2051 bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
2052 Node* arg1 = argument(0); // type long
2053 // argument(1) == TOP
2054 Node* arg2 = NULL;
2055
2056 if (is_decrement) {
2057 arg2 = longcon(1);
2058 } else {
2059 arg2 = argument(2); // type long
2060 // argument(3) == TOP
2061 }
2062
2063 Node* sub = _gvn.transform(new(C) SubExactLNode(NULL, arg1, arg2));
2064 inline_math_mathExact(sub);
2065 return true;
2066 }
2067
2068 bool LibraryCallKit::inline_math_negateExactI() {
2069 Node* arg1 = argument(0);
2070
2071 Node* neg = _gvn.transform(new(C) NegExactINode(NULL, arg1));
2072 inline_math_mathExact(neg);
2073 return true;
2074 }
2075
2076 bool LibraryCallKit::inline_math_negateExactL() {
2077 Node* arg1 = argument(0);
2078 // argument(1) == TOP
2079
2080 Node* neg = _gvn.transform(new(C) NegExactLNode(NULL, arg1));
2081 inline_math_mathExact(neg);
2082 return true;
2083 }
2084
2085 bool LibraryCallKit::inline_math_multiplyExactI() {
2086 Node* arg1 = argument(0);
2087 Node* arg2 = argument(1);
2088
2089 Node* mul = _gvn.transform(new(C) MulExactINode(NULL, arg1, arg2));
2090 inline_math_mathExact(mul);
2091 return true;
2092 }
2093
2094 bool LibraryCallKit::inline_math_multiplyExactL() {
2095 Node* arg1 = argument(0);
2096 // argument(1) == TOP
2097 Node* arg2 = argument(2);
2098 // argument(3) == TOP
2099
2100 Node* mul = _gvn.transform(new(C) MulExactLNode(NULL, arg1, arg2));
2101 inline_math_mathExact(mul);
1906 return true; 2102 return true;
1907 } 2103 }
1908 2104
1909 Node* 2105 Node*
1910 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) { 2106 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
2292 // contraint in place. 2488 // contraint in place.
2293 if (sharpened_klass != NULL && sharpened_klass->is_loaded()) { 2489 if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2294 const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass); 2490 const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2295 2491
2296 #ifndef PRODUCT 2492 #ifndef PRODUCT
2297 if (PrintIntrinsics || PrintInlining || PrintOptoInlining) { 2493 if (C->print_intrinsics() || C->print_inlining()) {
2298 tty->print(" from base type: "); adr_type->dump(); 2494 tty->print(" from base type: "); adr_type->dump();
2299 tty->print(" sharpened value: "); tjp->dump(); 2495 tty->print(" sharpened value: "); tjp->dump();
2300 } 2496 }
2301 #endif 2497 #endif
2302 // Sharpen the value type. 2498 // Sharpen the value type.
2754 // Execute transformation here to avoid barrier generation in such case. 2950 // Execute transformation here to avoid barrier generation in such case.
2755 if (_gvn.type(newval) == TypePtr::NULL_PTR) 2951 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2756 newval = _gvn.makecon(TypePtr::NULL_PTR); 2952 newval = _gvn.makecon(TypePtr::NULL_PTR);
2757 2953
2758 // Reference stores need a store barrier. 2954 // Reference stores need a store barrier.
2759 pre_barrier(true /* do_load*/, 2955 if (kind == LS_xchg) {
2760 control(), base, adr, alias_idx, newval, value_type->make_oopptr(), 2956 // If pre-barrier must execute before the oop store, old value will require do_load here.
2761 NULL /* pre_val*/, 2957 if (!can_move_pre_barrier()) {
2762 T_OBJECT); 2958 pre_barrier(true /* do_load*/,
2959 control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
2960 NULL /* pre_val*/,
2961 T_OBJECT);
2962 } // Else move pre_barrier to use load_store value, see below.
2963 } else if (kind == LS_cmpxchg) {
2964 // Same as for newval above:
2965 if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
2966 oldval = _gvn.makecon(TypePtr::NULL_PTR);
2967 }
2968 // The only known value which might get overwritten is oldval.
2969 pre_barrier(false /* do_load */,
2970 control(), NULL, NULL, max_juint, NULL, NULL,
2971 oldval /* pre_val */,
2972 T_OBJECT);
2973 } else {
2974 ShouldNotReachHere();
2975 }
2976
2763 #ifdef _LP64 2977 #ifdef _LP64
2764 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 2978 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2765 Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop())); 2979 Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2766 if (kind == LS_xchg) { 2980 if (kind == LS_xchg) {
2767 load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr, 2981 load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr,
2793 // main role is to prevent LoadStore nodes from being optimized away 3007 // main role is to prevent LoadStore nodes from being optimized away
2794 // when their results aren't used. 3008 // when their results aren't used.
2795 Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store)); 3009 Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
2796 set_memory(proj, alias_idx); 3010 set_memory(proj, alias_idx);
2797 3011
3012 if (type == T_OBJECT && kind == LS_xchg) {
3013 #ifdef _LP64
3014 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
3015 load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
3016 }
3017 #endif
3018 if (can_move_pre_barrier()) {
3019 // Don't need to load pre_val. The old value is returned by load_store.
3020 // The pre_barrier can execute after the xchg as long as no safepoint
3021 // gets inserted between them.
3022 pre_barrier(false /* do_load */,
3023 control(), NULL, NULL, max_juint, NULL, NULL,
3024 load_store /* pre_val */,
3025 T_OBJECT);
3026 }
3027 }
3028
2798 // Add the trailing membar surrounding the access 3029 // Add the trailing membar surrounding the access
2799 insert_mem_bar(Op_MemBarCPUOrder); 3030 insert_mem_bar(Op_MemBarCPUOrder);
2800 insert_mem_bar(Op_MemBarAcquire); 3031 insert_mem_bar(Op_MemBarAcquire);
2801
2802 #ifdef _LP64
2803 if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) {
2804 load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
2805 }
2806 #endif
2807 3032
2808 assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); 3033 assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2809 set_result(load_store); 3034 set_result(load_store);
2810 return true; 3035 return true;
2811 } 3036 }
3224 3449
3225 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr(); 3450 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3226 if (mirror_con == NULL) return false; // cannot happen? 3451 if (mirror_con == NULL) return false; // cannot happen?
3227 3452
3228 #ifndef PRODUCT 3453 #ifndef PRODUCT
3229 if (PrintIntrinsics || PrintInlining || PrintOptoInlining) { 3454 if (C->print_intrinsics() || C->print_inlining()) {
3230 ciType* k = mirror_con->java_mirror_type(); 3455 ciType* k = mirror_con->java_mirror_type();
3231 if (k) { 3456 if (k) {
3232 tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id())); 3457 tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
3233 k->print_name(); 3458 k->print_name();
3234 tty->cr(); 3459 tty->cr();
3258 // Side-effects region with the control path if the klass is null. 3483 // Side-effects region with the control path if the klass is null.
3259 Node* kls = load_klass_from_mirror(mirror, never_see_null, region, _prim_path); 3484 Node* kls = load_klass_from_mirror(mirror, never_see_null, region, _prim_path);
3260 // If kls is null, we have a primitive mirror. 3485 // If kls is null, we have a primitive mirror.
3261 phi->init_req(_prim_path, prim_return_value); 3486 phi->init_req(_prim_path, prim_return_value);
3262 if (stopped()) { set_result(region, phi); return true; } 3487 if (stopped()) { set_result(region, phi); return true; }
3488 bool safe_for_replace = (region->in(_prim_path) == top());
3263 3489
3264 Node* p; // handy temp 3490 Node* p; // handy temp
3265 Node* null_ctl; 3491 Node* null_ctl;
3266 3492
3267 // Now that we have the non-null klass, we can perform the real query. 3493 // Now that we have the non-null klass, we can perform the real query.
3268 // For constant classes, the query will constant-fold in LoadNode::Value. 3494 // For constant classes, the query will constant-fold in LoadNode::Value.
3269 Node* query_value = top(); 3495 Node* query_value = top();
3270 switch (id) { 3496 switch (id) {
3271 case vmIntrinsics::_isInstance: 3497 case vmIntrinsics::_isInstance:
3272 // nothing is an instance of a primitive type 3498 // nothing is an instance of a primitive type
3273 query_value = gen_instanceof(obj, kls); 3499 query_value = gen_instanceof(obj, kls, safe_for_replace);
3274 break; 3500 break;
3275 3501
3276 case vmIntrinsics::_getModifiers: 3502 case vmIntrinsics::_getModifiers:
3277 p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset())); 3503 p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset()));
3278 query_value = make_load(NULL, p, TypeInt::INT, T_INT); 3504 query_value = make_load(NULL, p, TypeInt::INT, T_INT);
3698 // Helper for hashCode and clone. Peeks inside the vtable to avoid a call. 3924 // Helper for hashCode and clone. Peeks inside the vtable to avoid a call.
3699 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass, 3925 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
3700 RegionNode* slow_region) { 3926 RegionNode* slow_region) {
3701 ciMethod* method = callee(); 3927 ciMethod* method = callee();
3702 int vtable_index = method->vtable_index(); 3928 int vtable_index = method->vtable_index();
3929 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
3930 err_msg_res("bad index %d", vtable_index));
3703 // Get the Method* out of the appropriate vtable entry. 3931 // Get the Method* out of the appropriate vtable entry.
3704 int entry_offset = (InstanceKlass::vtable_start_offset() + 3932 int entry_offset = (InstanceKlass::vtable_start_offset() +
3705 vtable_index*vtableEntry::size()) * wordSize + 3933 vtable_index*vtableEntry::size()) * wordSize +
3706 vtableEntry::method_offset_in_bytes(); 3934 vtableEntry::method_offset_in_bytes();
3707 Node* entry_addr = basic_plus_adr(obj_klass, entry_offset); 3935 Node* entry_addr = basic_plus_adr(obj_klass, entry_offset);
3748 } else { 3976 } else {
3749 // hashCode and clone are not a miranda methods, 3977 // hashCode and clone are not a miranda methods,
3750 // so the vtable index is fixed. 3978 // so the vtable index is fixed.
3751 // No need to use the linkResolver to get it. 3979 // No need to use the linkResolver to get it.
3752 vtable_index = method->vtable_index(); 3980 vtable_index = method->vtable_index();
3981 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
3982 err_msg_res("bad index %d", vtable_index));
3753 } 3983 }
3754 slow_call = new(C) CallDynamicJavaNode(tf, 3984 slow_call = new(C) CallDynamicJavaNode(tf,
3755 SharedRuntime::get_resolve_virtual_call_stub(), 3985 SharedRuntime::get_resolve_virtual_call_stub(),
3756 method, vtable_index, bci()); 3986 method, vtable_index, bci());
3757 } else { // neither virtual nor static: opt_virtual 3987 } else { // neither virtual nor static: opt_virtual
3912 // NOTE: This code must perform the same logic as JVM_GetCallerClass 4142 // NOTE: This code must perform the same logic as JVM_GetCallerClass
3913 // in that it must skip particular security frames and checks for 4143 // in that it must skip particular security frames and checks for
3914 // caller sensitive methods. 4144 // caller sensitive methods.
3915 bool LibraryCallKit::inline_native_Reflection_getCallerClass() { 4145 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
3916 #ifndef PRODUCT 4146 #ifndef PRODUCT
3917 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { 4147 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
3918 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass"); 4148 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
3919 } 4149 }
3920 #endif 4150 #endif
3921 4151
3922 if (!jvms()->has_method()) { 4152 if (!jvms()->has_method()) {
3923 #ifndef PRODUCT 4153 #ifndef PRODUCT
3924 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { 4154 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
3925 tty->print_cr(" Bailing out because intrinsic was inlined at top level"); 4155 tty->print_cr(" Bailing out because intrinsic was inlined at top level");
3926 } 4156 }
3927 #endif 4157 #endif
3928 return false; 4158 return false;
3929 } 4159 }
3943 break; 4173 break;
3944 case 1: 4174 case 1:
3945 // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass). 4175 // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
3946 if (!m->caller_sensitive()) { 4176 if (!m->caller_sensitive()) {
3947 #ifndef PRODUCT 4177 #ifndef PRODUCT
3948 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { 4178 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
3949 tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n); 4179 tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n);
3950 } 4180 }
3951 #endif 4181 #endif
3952 return false; // bail-out; let JVM_GetCallerClass do the work 4182 return false; // bail-out; let JVM_GetCallerClass do the work
3953 } 4183 }
3959 ciInstanceKlass* caller_klass = caller_jvms->method()->holder(); 4189 ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
3960 ciInstance* caller_mirror = caller_klass->java_mirror(); 4190 ciInstance* caller_mirror = caller_klass->java_mirror();
3961 set_result(makecon(TypeInstPtr::make(caller_mirror))); 4191 set_result(makecon(TypeInstPtr::make(caller_mirror)));
3962 4192
3963 #ifndef PRODUCT 4193 #ifndef PRODUCT
3964 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { 4194 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
3965 tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth()); 4195 tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
3966 tty->print_cr(" JVM state at this point:"); 4196 tty->print_cr(" JVM state at this point:");
3967 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) { 4197 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
3968 ciMethod* m = jvms()->of_depth(i)->method(); 4198 ciMethod* m = jvms()->of_depth(i)->method();
3969 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8()); 4199 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
3975 break; 4205 break;
3976 } 4206 }
3977 } 4207 }
3978 4208
3979 #ifndef PRODUCT 4209 #ifndef PRODUCT
3980 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { 4210 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
3981 tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth()); 4211 tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
3982 tty->print_cr(" JVM state at this point:"); 4212 tty->print_cr(" JVM state at this point:");
3983 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) { 4213 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
3984 ciMethod* m = jvms()->of_depth(i)->method(); 4214 ciMethod* m = jvms()->of_depth(i)->method();
3985 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8()); 4215 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4168 // base_off: 4398 // base_off:
4169 // 8 - 32-bit VM 4399 // 8 - 32-bit VM
4170 // 12 - 64-bit VM, compressed klass 4400 // 12 - 64-bit VM, compressed klass
4171 // 16 - 64-bit VM, normal klass 4401 // 16 - 64-bit VM, normal klass
4172 if (base_off % BytesPerLong != 0) { 4402 if (base_off % BytesPerLong != 0) {
4173 assert(UseCompressedKlassPointers, ""); 4403 assert(UseCompressedClassPointers, "");
4174 if (is_array) { 4404 if (is_array) {
4175 // Exclude length to copy by 8 bytes words. 4405 // Exclude length to copy by 8 bytes words.
4176 base_off += sizeof(int); 4406 base_off += sizeof(int);
4177 } else { 4407 } else {
4178 // Include klass to copy by 8 bytes words. 4408 // Include klass to copy by 8 bytes words.
4454 // (1) src and dest are arrays. 4684 // (1) src and dest are arrays.
4455 const Type* src_type = src->Value(&_gvn); 4685 const Type* src_type = src->Value(&_gvn);
4456 const Type* dest_type = dest->Value(&_gvn); 4686 const Type* dest_type = dest->Value(&_gvn);
4457 const TypeAryPtr* top_src = src_type->isa_aryptr(); 4687 const TypeAryPtr* top_src = src_type->isa_aryptr();
4458 const TypeAryPtr* top_dest = dest_type->isa_aryptr(); 4688 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
4459 if (top_src == NULL || top_src->klass() == NULL || 4689
4460 top_dest == NULL || top_dest->klass() == NULL) { 4690 // Do we have the type of src?
4691 bool has_src = (top_src != NULL && top_src->klass() != NULL);
4692 // Do we have the type of dest?
4693 bool has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4694 // Is the type for src from speculation?
4695 bool src_spec = false;
4696 // Is the type for dest from speculation?
4697 bool dest_spec = false;
4698
4699 if (!has_src || !has_dest) {
4700 // We don't have sufficient type information, let's see if
4701 // speculative types can help. We need to have types for both src
4702 // and dest so that it pays off.
4703
4704 // Do we already have or could we have type information for src
4705 bool could_have_src = has_src;
4706 // Do we already have or could we have type information for dest
4707 bool could_have_dest = has_dest;
4708
4709 ciKlass* src_k = NULL;
4710 if (!has_src) {
4711 src_k = src_type->speculative_type();
4712 if (src_k != NULL && src_k->is_array_klass()) {
4713 could_have_src = true;
4714 }
4715 }
4716
4717 ciKlass* dest_k = NULL;
4718 if (!has_dest) {
4719 dest_k = dest_type->speculative_type();
4720 if (dest_k != NULL && dest_k->is_array_klass()) {
4721 could_have_dest = true;
4722 }
4723 }
4724
4725 if (could_have_src && could_have_dest) {
4726 // This is going to pay off so emit the required guards
4727 if (!has_src) {
4728 src = maybe_cast_profiled_obj(src, src_k);
4729 src_type = _gvn.type(src);
4730 top_src = src_type->isa_aryptr();
4731 has_src = (top_src != NULL && top_src->klass() != NULL);
4732 src_spec = true;
4733 }
4734 if (!has_dest) {
4735 dest = maybe_cast_profiled_obj(dest, dest_k);
4736 dest_type = _gvn.type(dest);
4737 top_dest = dest_type->isa_aryptr();
4738 has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4739 dest_spec = true;
4740 }
4741 }
4742 }
4743
4744 if (!has_src || !has_dest) {
4461 // Conservatively insert a memory barrier on all memory slices. 4745 // Conservatively insert a memory barrier on all memory slices.
4462 // Do not let writes into the source float below the arraycopy. 4746 // Do not let writes into the source float below the arraycopy.
4463 insert_mem_bar(Op_MemBarCPUOrder); 4747 insert_mem_bar(Op_MemBarCPUOrder);
4464 4748
4465 // Call StubRoutines::generic_arraycopy stub. 4749 // Call StubRoutines::generic_arraycopy stub.
4488 // (But, avoid the native method wrapper to JVM_ArrayCopy.) 4772 // (But, avoid the native method wrapper to JVM_ArrayCopy.)
4489 generate_slow_arraycopy(TypePtr::BOTTOM, 4773 generate_slow_arraycopy(TypePtr::BOTTOM,
4490 src, src_offset, dest, dest_offset, length, 4774 src, src_offset, dest, dest_offset, length,
4491 /*dest_uninitialized*/false); 4775 /*dest_uninitialized*/false);
4492 return true; 4776 return true;
4777 }
4778
4779 if (src_elem == T_OBJECT) {
4780 // If both arrays are object arrays then having the exact types
4781 // for both will remove the need for a subtype check at runtime
4782 // before the call and may make it possible to pick a faster copy
4783 // routine (without a subtype check on every element)
4784 // Do we have the exact type of src?
4785 bool could_have_src = src_spec;
4786 // Do we have the exact type of dest?
4787 bool could_have_dest = dest_spec;
4788 ciKlass* src_k = top_src->klass();
4789 ciKlass* dest_k = top_dest->klass();
4790 if (!src_spec) {
4791 src_k = src_type->speculative_type();
4792 if (src_k != NULL && src_k->is_array_klass()) {
4793 could_have_src = true;
4794 }
4795 }
4796 if (!dest_spec) {
4797 dest_k = dest_type->speculative_type();
4798 if (dest_k != NULL && dest_k->is_array_klass()) {
4799 could_have_dest = true;
4800 }
4801 }
4802 if (could_have_src && could_have_dest) {
4803 // If we can have both exact types, emit the missing guards
4804 if (could_have_src && !src_spec) {
4805 src = maybe_cast_profiled_obj(src, src_k);
4806 }
4807 if (could_have_dest && !dest_spec) {
4808 dest = maybe_cast_profiled_obj(dest, dest_k);
4809 }
4810 }
4493 } 4811 }
4494 4812
4495 //--------------------------------------------------------------------------- 4813 //---------------------------------------------------------------------------
4496 // We will make a fast path for this call to arraycopy. 4814 // We will make a fast path for this call to arraycopy.
4497 4815